Commit f3fb15b9 authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-memory-usage-rework'

Shannon Nelson says:

====================
ionic memory usage rework

Previous review comments have suggested [1],[2] that this driver
needs to rework how queue resources are managed and reconfigured
so that we don't do a full driver reset and to better handle
potential allocation failures.  This patchset is intended to
address those comments.

The first few patches clean some general issues and
simplify some of the memory structures.  The last 4 patches
specifically address queue parameter changes without a full
ionic_stop()/ionic_open().

[1] https://lore.kernel.org/netdev/20200706103305.182bd727@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/
[2] https://lore.kernel.org/netdev/20200724.194417.2151242753657227232.davem@davemloft.net/

v3: use PTR_ALIGN without typecast
    fix up Neel's attribution

v2: use PTR_ALIGN
    recovery if netif_set_real_num_tx/rx_queues fails
    less racy queue bring up after reconfig
    common-ize the reconfig queue stop and start
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 161e3106 6f7d6f0f
......@@ -42,13 +42,11 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
struct list_head lifs;
struct ionic_lif *master_lif;
struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
......
......@@ -294,21 +294,21 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_port_reset;
}
err = ionic_lifs_size(ionic);
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
err = ionic_lifs_alloc(ionic);
err = ionic_lif_alloc(ionic);
if (err) {
dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
err = ionic_lifs_init(ionic);
err = ionic_lif_init(ionic->lif);
if (err) {
dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
......@@ -321,9 +321,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
err = ionic_lifs_register(ionic);
err = ionic_lif_register(ionic->lif);
if (err) {
dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
......@@ -336,12 +336,13 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_deregister_lifs:
ionic_lifs_unregister(ionic);
ionic_lif_unregister(ionic->lif);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lifs_deinit(ionic);
ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
ionic_lifs_free(ionic);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
......@@ -377,11 +378,12 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
if (ionic->master_lif) {
if (ionic->lif) {
ionic_devlink_unregister(ionic);
ionic_lifs_unregister(ionic);
ionic_lifs_deinit(ionic);
ionic_lifs_free(ionic);
ionic_lif_unregister(ionic->lif);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
......
......@@ -76,7 +76,7 @@ static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->tail->index);
seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
......@@ -86,7 +86,7 @@ static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->head->index);
seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
......@@ -96,7 +96,7 @@ static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
seq_printf(seq, "%d\n", cq->tail->index);
seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
......@@ -112,7 +112,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
......@@ -121,21 +122,21 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
if (IS_ERR_OR_NULL(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
q_dentry = debugfs_create_dir("q", qcq->dentry);
debugfs_create_u32("index", 0400, q_dentry, &q->index);
debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
if (qcq->flags & IONIC_QCQ_F_SG) {
debugfs_create_x64("sg_base_pa", 0400, q_dentry,
&q->sg_base_pa);
debugfs_create_u32("sg_desc_size", 0400, q_dentry,
&q->sg_desc_size);
}
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
......
......@@ -21,8 +21,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
hb = ionic_heartbeat_check(ionic);
if (hb >= 0 && ionic->master_lif)
ionic_link_status_check_request(ionic->master_lif);
if (hb >= 0 && ionic->lif)
ionic_link_status_check_request(ionic->lif);
}
void ionic_init_devinfo(struct ionic *ionic)
......@@ -126,7 +126,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
/* is this a transition? */
if (fw_status != idev->last_fw_status &&
idev->last_fw_status != 0xff) {
struct ionic_lif *lif = ionic->master_lif;
struct ionic_lif *lif = ionic->lif;
bool trigger = false;
if (!fw_status || fw_status == 0xff) {
......@@ -482,7 +482,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->bound_intr = intr;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
cq->tail = cq->info;
cq->tail_idx = 0;
cq->done_color = 1;
cur = cq->info;
......@@ -522,15 +522,18 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
while (cb(cq, cq->tail)) {
if (cq->tail->last)
cq_info = &cq->info[cq->tail_idx];
while (cb(cq, cq_info)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail = cq->tail->next;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
......@@ -565,8 +568,8 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
q->num_descs = num_descs;
q->desc_size = desc_size;
q->sg_desc_size = sg_desc_size;
q->tail = q->info;
q->head = q->tail;
q->tail_idx = 0;
q->head_idx = 0;
q->pid = pid;
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
......@@ -614,19 +617,22 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
q->head->cb = cb;
q->head->cb_arg = cb_arg;
q->head = q->head->next;
desc_info = &q->info[q->head_idx];
desc_info->cb = cb;
desc_info->cb_arg = cb_arg;
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head->index, ring_doorbell);
q->head_idx, ring_doorbell);
if (ring_doorbell)
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head->index);
q->dbval | q->head_idx);
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
......@@ -634,8 +640,8 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
unsigned int mask, tail, head;
mask = q->num_descs - 1;
tail = q->tail->index;
head = q->head->index;
tail = q->tail_idx;
head = q->head_idx;
return ((pos - tail) & mask) < ((head - tail) & mask);
}
......@@ -648,18 +654,18 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
void *cb_arg;
/* check for empty queue */
if (q->tail->index == q->head->index)
if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->lif->ionic->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
q->name, stop_index, q->tail->index, q->head->index);
q->name, stop_index, q->tail_idx, q->head_idx);
do {
desc_info = q->tail;
q->tail = desc_info->next;
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
......
......@@ -149,7 +149,13 @@ struct ionic_dev {
};
struct ionic_cq_info {
void *cq_desc;
union {
void *cq_desc;
struct ionic_txq_comp *txcq;
struct ionic_rxq_comp *rxcq;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
struct ionic_cq_info *next;
unsigned int index;
bool last;
......@@ -169,8 +175,17 @@ struct ionic_page_info {
};
struct ionic_desc_info {
void *desc;
void *sg_desc;
union {
void *desc;
struct ionic_txq_desc *txq_desc;
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
struct ionic_desc_info *next;
unsigned int index;
unsigned int left;
......@@ -183,22 +198,32 @@ struct ionic_desc_info {
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
struct device *dev;
u64 dbell_count;
u64 drop;
u64 stop;
u64 wake;
struct ionic_lif *lif;
struct ionic_desc_info *info;
struct ionic_desc_info *tail;
struct ionic_desc_info *head;
struct ionic_dev *idev;
u16 head_idx;
u16 tail_idx;
unsigned int index;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
u64 dbval;
void *base;
void *sg_base;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
dma_addr_t sg_base_pa;
unsigned int num_descs;
......@@ -225,9 +250,9 @@ struct ionic_cq {
dma_addr_t base_pa;
struct ionic_lif *lif;
struct ionic_cq_info *info;
struct ionic_cq_info *tail;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
bool done_color;
unsigned int num_descs;
u64 compl_count;
......@@ -246,12 +271,12 @@ static inline void ionic_intr_init(struct ionic_dev *idev,
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
unsigned int avail = q->tail->index;
unsigned int avail = q->tail_idx;
if (q->head->index >= avail)
avail += q->head->left - 1;
if (q->head_idx >= avail)
avail += q->num_descs - q->head_idx - 1;
else
avail -= q->head->index + 1;
avail -= q->head_idx + 1;
return avail;
}
......
......@@ -85,7 +85,7 @@ int ionic_devlink_register(struct ionic *ionic)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
else
devlink_port_type_eth_set(&ionic->dl_port,
ionic->master_lif->netdev);
ionic->lif->netdev);
return err;
}
......
......@@ -454,7 +454,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
qcq = lif->rxqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->rx_coalesce_hw);
......@@ -471,7 +471,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->txqcqs[i].qcq;
qcq = lif->txqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->tx_coalesce_hw);
......@@ -493,18 +493,14 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
{
struct ethtool_ringparam *ring = arg;
lif->ntxq_descs = ring->tx_pending;
lif->nrxq_descs = ring->rx_pending;
}
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_queue_params qparam;
int err;
ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
......@@ -522,7 +518,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
return ionic_reset_queues(lif, ionic_set_ringsize, ring);
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
if (ring->rx_pending != lif->nrxq_descs)
netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
lif->nrxq_descs, ring->rx_pending);
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->ntxq_descs = ring->tx_pending;
lif->nrxq_descs = ring->rx_pending;
return 0;
}
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
err = ionic_reconfigure_queues(lif, &qparam);
if (err)
netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
return err;
}
static void ionic_get_channels(struct net_device *netdev,
......@@ -544,32 +561,15 @@ static void ionic_get_channels(struct net_device *netdev,
}
}
static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
{
struct ethtool_channels *ch = arg;
if (ch->combined_count) {
lif->nxqs = ch->combined_count;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
netdev_info(lif->netdev, "Sharing queue interrupts\n");
}
} else {
lif->nxqs = ch->rx_count;
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
netdev_info(lif->netdev, "Splitting queue interrupts\n");
}
}
}
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
int new_cnt;
struct ionic_queue_params qparam;
int max_cnt;
int err;
ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
......@@ -577,20 +577,63 @@ static int ionic_set_channels(struct net_device *netdev,
}
if (ch->combined_count && ch->rx_count) {
netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
netdev_info(netdev, "Use either combined or rx and tx, not both\n");
return -EINVAL;
}
if (ch->combined_count)
new_cnt = ch->combined_count;
else
new_cnt = ch->rx_count;
max_cnt = lif->ionic->ntxqs_per_lif;
if (ch->combined_count) {
if (ch->combined_count > max_cnt)
return -EINVAL;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netdev_info(lif->netdev, "Sharing queue interrupts\n");
else if (ch->combined_count == lif->nxqs)
return 0;
if (lif->nxqs != ch->combined_count)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, ch->combined_count);
if (lif->nxqs != new_cnt)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, new_cnt);
qparam.nxqs = ch->combined_count;
qparam.intr_split = 0;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
return -EINVAL;
return ionic_reset_queues(lif, ionic_set_queuecount, ch);
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netdev_info(lif->netdev, "Splitting queue interrupts\n");
else if (ch->rx_count == lif->nxqs)
return 0;
if (lif->nxqs != ch->rx_count)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
qparam.intr_split = 1;
}
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
if (qparam.intr_split) {
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
} else {
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
return 0;
}
err = ionic_reconfigure_queues(lif, &qparam);
if (err)
netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
return err;
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
......
......@@ -36,6 +36,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
static void ionic_txrx_deinit(struct ionic_lif *lif);
static int ionic_txrx_init(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
......@@ -297,6 +299,18 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags &= ~IONIC_QCQ_F_INITED;
}
static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
return;
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
ionic_intr_free(lif->ionic, qcq->intr.index);
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
......@@ -306,51 +320,62 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_debugfs_del_qcq(qcq);
dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
qcq->base = NULL;
qcq->base_pa = 0;
if (qcq->q_base) {
dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
qcq->q_base = NULL;
qcq->q_base_pa = 0;
}
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
ionic_intr_free(lif->ionic, qcq->intr.index);
if (qcq->cq_base) {
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
qcq->cq_base = NULL;
qcq->cq_base_pa = 0;
}
if (qcq->sg_base) {
dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
qcq->sg_base = NULL;
qcq->sg_base_pa = 0;
}
devm_kfree(dev, qcq->cq.info);
qcq->cq.info = NULL;
devm_kfree(dev, qcq->q.info);
qcq->q.info = NULL;
devm_kfree(dev, qcq);
ionic_qcq_intr_free(lif, qcq);
if (qcq->cq.info) {
devm_kfree(dev, qcq->cq.info);
qcq->cq.info = NULL;
}
if (qcq->q.info) {
devm_kfree(dev, qcq->q.info);
qcq->q.info = NULL;
}
}
static void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
unsigned int i;
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
devm_kfree(dev, lif->notifyqcq);
lif->notifyqcq = NULL;
}
if (lif->adminqcq) {
ionic_qcq_free(lif, lif->adminqcq);
devm_kfree(dev, lif->adminqcq);
lif->adminqcq = NULL;
}
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
if (lif->rxqcqs[i].stats)
devm_kfree(dev, lif->rxqcqs[i].stats);
devm_kfree(dev, lif->rxqstats);
lif->rxqstats = NULL;
devm_kfree(dev, lif->rxqcqs);
lif->rxqcqs = NULL;
}
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++)
if (lif->txqcqs[i].stats)
devm_kfree(dev, lif->txqcqs[i].stats);
devm_kfree(dev, lif->txqstats);
lif->txqstats = NULL;
devm_kfree(dev, lif->txqcqs);
lif->txqcqs = NULL;
}
......@@ -368,6 +393,53 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.index = src_qcq->intr.index;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
return 0;
}
err = ionic_intr_alloc(lif, &qcq->intr);
if (err) {
netdev_warn(lif->netdev, "no intr for %s: %d\n",
qcq->q.name, err);
goto err_out;
}
err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
if (err < 0) {
netdev_warn(lif->netdev, "no vector for %s: %d\n",
qcq->q.name, err);
goto err_out_free_intr;
}
qcq->intr.vector = err;
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
err = ionic_request_irq(lif, qcq);
if (err) {
netdev_warn(lif->netdev, "irq request failed %d\n", err);
goto err_out_free_intr;
}
/* try to get the irq on the local numa node first */
qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
dev_to_node(lif->ionic->dev));
if (qcq->intr.cpu != -1)
cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
err_out_free_intr:
ionic_intr_free(lif->ionic, qcq->intr.index);
err_out:
return err;
}
static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int index,
const char *name, unsigned int flags,
......@@ -377,7 +449,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
u32 q_size, cq_size, sg_size, total_size;
struct device *dev = lif->ionic->dev;
void *q_base, *cq_base, *sg_base;
dma_addr_t cq_base_pa = 0;
......@@ -388,21 +459,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
*qcq = NULL;
q_size = num_descs * desc_size;
cq_size = num_descs * cq_desc_size;
sg_size = num_descs * sg_desc_size;
total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
/* Note: aligning q_size/cq_size is not enough due to cq_base
* address aligning as q_base could be not aligned to the page.
* Adding PAGE_SIZE.
*/
total_size += PAGE_SIZE;
if (flags & IONIC_QCQ_F_SG) {
total_size += ALIGN(sg_size, PAGE_SIZE);
total_size += PAGE_SIZE;
}
new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
if (!new) {
netdev_err(lif->netdev, "Cannot allocate queue structure\n");
......@@ -417,7 +473,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
goto err_out;
goto err_out_free_qcq;
}
new->q.type = type;
......@@ -426,41 +482,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
goto err_out;
goto err_out_free_q_info;
}
if (flags & IONIC_QCQ_F_INTR) {
err = ionic_intr_alloc(lif, &new->intr);
if (err) {
netdev_warn(lif->netdev, "no intr for %s: %d\n",
name, err);
goto err_out;
}
err = ionic_bus_get_irq(lif->ionic, new->intr.index);
if (err < 0) {
netdev_warn(lif->netdev, "no vector for %s: %d\n",
name, err);
goto err_out_free_intr;
}
new->intr.vector = err;
ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
IONIC_INTR_MASK_SET);
err = ionic_request_irq(lif, new);
if (err) {
netdev_warn(lif->netdev, "irq request failed %d\n", err);
goto err_out_free_intr;
}
new->intr.cpu = cpumask_local_spread(new->intr.index,
dev_to_node(dev));
if (new->intr.cpu != -1)
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
goto err_out;
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
GFP_KERNEL);
......@@ -473,46 +500,67 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
goto err_out_free_irq;
goto err_out_free_cq_info;
}
new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
GFP_KERNEL);
if (!new->base) {
new->q_size = PAGE_SIZE + (num_descs * desc_size);
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
GFP_KERNEL);
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
goto err_out_free_irq;
goto err_out_free_cq_info;
}
q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
new->total_size = total_size;
q_base = new->base;
q_base_pa = new->base_pa;
cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
GFP_KERNEL);
if (!new->cq_base) {
netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
err = -ENOMEM;
goto err_out_free_q;
}
cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q);
if (flags & IONIC_QCQ_F_SG) {
sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
PAGE_SIZE);
sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
GFP_KERNEL);
if (!new->sg_base) {
netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
err = -ENOMEM;
goto err_out_free_cq;
}
sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
ionic_q_map(&new->q, q_base, q_base_pa);
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q);
*qcq = new;
return 0;
err_out_free_cq:
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
err_out_free_q:
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
devm_kfree(dev, new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR)
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
if (flags & IONIC_QCQ_F_INTR)
ionic_intr_free(lif->ionic, new->intr.index);
}
err_out_free_q_info:
devm_kfree(dev, new->q.info);
err_out_free_qcq:
devm_kfree(dev, new);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
......@@ -521,10 +569,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
static int ionic_qcqs_alloc(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
unsigned int q_list_size;
unsigned int flags;
int err;
int i;
flags = IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
......@@ -544,63 +590,50 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0, lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out_free_adminqcq;
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
}
q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
err = -ENOMEM;
lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->txqcqs)
goto err_out_free_notifyqcq;
for (i = 0; i < lif->nxqs; i++) {
lif->txqcqs[i].stats = devm_kzalloc(dev,
sizeof(struct ionic_q_stats),
GFP_KERNEL);
if (!lif->txqcqs[i].stats)
goto err_out_free_tx_stats;
}
lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
goto err_out;
lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->rxqcqs)
goto err_out_free_tx_stats;
for (i = 0; i < lif->nxqs; i++) {
lif->rxqcqs[i].stats = devm_kzalloc(dev,
sizeof(struct ionic_q_stats),
GFP_KERNEL);
if (!lif->rxqcqs[i].stats)
goto err_out_free_rx_stats;
}
goto err_out;
return 0;
lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
sizeof(struct ionic_tx_stats), GFP_KERNEL);
if (!lif->txqstats)
goto err_out;
lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_rx_stats), GFP_KERNEL);
if (!lif->rxqstats)
goto err_out;
err_out_free_rx_stats:
for (i = 0; i < lif->nxqs; i++)
if (lif->rxqcqs[i].stats)
devm_kfree(dev, lif->rxqcqs[i].stats);
devm_kfree(dev, lif->rxqcqs);
lif->rxqcqs = NULL;
err_out_free_tx_stats:
for (i = 0; i < lif->nxqs; i++)
if (lif->txqcqs[i].stats)
devm_kfree(dev, lif->txqcqs[i].stats);
devm_kfree(dev, lif->txqcqs);
lif->txqcqs = NULL;
err_out_free_notifyqcq:
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
lif->notifyqcq = NULL;
}
err_out_free_adminqcq:
ionic_qcq_free(lif, lif->adminqcq);
lif->adminqcq = NULL;
return 0;
err_out:
ionic_qcqs_free(lif);
return err;
}
static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
{
qcq->q.tail_idx = 0;
qcq->q.head_idx = 0;
qcq->cq.tail_idx = 0;
qcq->cq.done_color = 1;
memset(qcq->q_base, 0, qcq->q_size);
memset(qcq->cq_base, 0, qcq->cq_size);
memset(qcq->sg_base, 0, qcq->sg_size);
}
static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
......@@ -626,10 +659,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
unsigned int intr_index;
int err;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
if (qcq->flags & IONIC_QCQ_F_INTR)
intr_index = qcq->intr.index;
else
intr_index = lif->rxqcqs[q->index].qcq->intr.index;
intr_index = lif->rxqcqs[q->index]->intr.index;
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
......@@ -640,9 +673,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
cq->tail = cq->info;
ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
......@@ -697,9 +728,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
cq->tail = cq->info;
ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
......@@ -1315,6 +1344,35 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_addr_add(netdev, mac);
}
static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
{
/* Stop and clean the queues before reconfiguration */
mutex_lock(&lif->queue_lock);
netif_device_detach(lif->netdev);
ionic_stop_queues(lif);
ionic_txrx_deinit(lif);
}
static int ionic_start_queues_reconfig(struct ionic_lif *lif)
{
int err;
/* Re-init the queues after reconfiguration */
/* The only way txrx_init can fail here is if communication
* with FW is suddenly broken. There's not much we can do
* at this point - error messages have already been printed,
* so we can continue on and the user can eventually do a
* DOWN and UP to try to reset and clear the issue.
*/
err = ionic_txrx_init(lif);
mutex_unlock(&lif->queue_lock);
ionic_link_status_check_request(lif);
netif_device_attach(lif->netdev);
return err;
}
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
......@@ -1334,9 +1392,12 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
netdev->mtu = new_mtu;
err = ionic_reset_queues(lif, NULL, NULL);
/* if we're not running, nothing more to do */
if (!netif_running(netdev))
return 0;
return err;
ionic_stop_queues_reconfig(lif);
return ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout_work(struct work_struct *ws)
......@@ -1345,9 +1406,14 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
netdev_info(lif->netdev, "Tx Timeout recovery\n");
rtnl_lock();
ionic_reset_queues(lif, NULL, NULL);
rtnl_unlock();
/* if we were stopped before this scheduled job was launched,
* don't bother the queues as they are already stopped.
*/
if (!netif_running(lif->netdev))
return;
ionic_stop_queues_reconfig(lif);
ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
......@@ -1482,7 +1548,7 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_disable(lif->txqcqs[i].qcq);
err = ionic_qcq_disable(lif->txqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
......@@ -1490,7 +1556,7 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
err = ionic_qcq_disable(lif->rxqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
......@@ -1502,18 +1568,18 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++) {
ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
ionic_tx_empty(&lif->txqcqs[i].qcq->q);
for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
ionic_tx_flush(&lif->txqcqs[i]->cq);
ionic_tx_empty(&lif->txqcqs[i]->q);
}
}
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++) {
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
ionic_rx_flush(&lif->rxqcqs[i]->cq);
ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
lif->rx_mode = 0;
......@@ -1524,16 +1590,18 @@ static void ionic_txrx_free(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++) {
ionic_qcq_free(lif, lif->txqcqs[i].qcq);
lif->txqcqs[i].qcq = NULL;
for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
ionic_qcq_free(lif, lif->txqcqs[i]);
devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
lif->txqcqs[i] = NULL;
}
}
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++) {
ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
lif->rxqcqs[i].qcq = NULL;
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
ionic_qcq_free(lif, lif->rxqcqs[i]);
devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
lif->rxqcqs[i] = NULL;
}
}
}
......@@ -1561,17 +1629,16 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i].qcq);
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
if (flags & IONIC_QCQ_F_INTR)
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->txqcqs[i].qcq->intr.index,
lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
......@@ -1581,20 +1648,19 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
sizeof(struct ionic_rxq_sg_desc),
lif->kern_pid, &lif->rxqcqs[i].qcq);
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i].qcq->intr.index,
lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
ionic_link_qcq_interrupts(lif->rxqcqs[i],
lif->txqcqs[i]);
lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
}
return 0;
......@@ -1611,13 +1677,13 @@ static int ionic_txrx_init(struct ionic_lif *lif)
int err;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
if (err)
goto err_out;
err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
if (err) {
ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
goto err_out;
}
}
......@@ -1631,8 +1697,8 @@ static int ionic_txrx_init(struct ionic_lif *lif)
err_out:
while (i--) {
ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
}
return err;
......@@ -1643,15 +1709,15 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int i, err;
for (i = 0; i < lif->nxqs; i++) {
ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
ionic_rx_fill(&lif->rxqcqs[i]->q);
err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
err = ionic_qcq_enable(lif->txqcqs[i].qcq);
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
if (err != -ETIMEDOUT)
ionic_qcq_disable(lif->rxqcqs[i].qcq);
ionic_qcq_disable(lif->rxqcqs[i]);
goto err_out;
}
}
......@@ -1660,10 +1726,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
err = ionic_qcq_disable(lif->txqcqs[i].qcq);
err = ionic_qcq_disable(lif->txqcqs[i]);
if (err == -ETIMEDOUT)
break;
err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
err = ionic_qcq_disable(lif->rxqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
......@@ -1688,7 +1754,7 @@ static int ionic_start_queues(struct ionic_lif *lif)
return 0;
}
int ionic_open(struct net_device *netdev)
static int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
......@@ -1734,7 +1800,7 @@ static void ionic_stop_queues(struct ionic_lif *lif)
ionic_txrx_disable(lif);
}
int ionic_stop(struct net_device *netdev)
static int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
......@@ -2016,35 +2082,210 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
{
bool running;
int err = 0;
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
/* only swapping the queues, not the napi, flags, or other stuff */
swap(a->q.num_descs, b->q.num_descs);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
swap(a->q.sg_base, b->q.sg_base);
swap(a->q.sg_base_pa, b->q.sg_base_pa);
swap(a->sg_base, b->sg_base);
swap(a->sg_base_pa, b->sg_base_pa);
swap(a->sg_size, b->sg_size);
swap(a->cq.num_descs, b->cq.num_descs);
swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa);
swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size);
}
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
struct ionic_qcq **tx_qcqs = NULL;
struct ionic_qcq **rx_qcqs = NULL;
unsigned int sg_desc_sz;
unsigned int flags;
int err = -ENOMEM;
unsigned int i;
mutex_lock(&lif->queue_lock);
running = netif_running(lif->netdev);
if (running) {
netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!tx_qcqs)
goto err_out;
}
if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs)
goto err_out;
}
/* allocate new desc_info and rings, but leave the interrupt setup
* until later so as to not mess with the still-running queues
*/
if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
sizeof(struct ionic_txq_sg_desc_v1))
sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
else
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
if (tx_qcqs) {
for (i = 0; i < qparam->nxqs; i++) {
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
qparam->ntxq_descs,
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
sg_desc_sz,
lif->kern_pid, &tx_qcqs[i]);
if (err)
goto err_out;
}
}
if (rx_qcqs) {
for (i = 0; i < qparam->nxqs; i++) {
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
qparam->nrxq_descs,
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
sizeof(struct ionic_rxq_sg_desc),
lif->kern_pid, &rx_qcqs[i]);
if (err)
goto err_out;
}
}
/* stop and clean the queues */
ionic_stop_queues_reconfig(lif);
if (qparam->nxqs != lif->nxqs) {
err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
if (err)
goto reset_out;
goto err_out_reinit_unlock;
err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
if (err) {
netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
goto err_out_reinit_unlock;
}
}
if (cb)
cb(lif, arg);
/* swap new desc_info and rings, keeping existing interrupt config */
if (tx_qcqs) {
lif->ntxq_descs = qparam->ntxq_descs;
for (i = 0; i < qparam->nxqs; i++)
ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
}
if (running) {
err = ionic_open(lif->netdev);
netif_device_attach(lif->netdev);
if (rx_qcqs) {
lif->nrxq_descs = qparam->nrxq_descs;
for (i = 0; i < qparam->nxqs; i++)
ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
}
reset_out:
mutex_unlock(&lif->queue_lock);
/* if we need to change the interrupt layout, this is the time */
if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
qparam->nxqs != lif->nxqs) {
if (qparam->intr_split) {
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
} else {
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
/* clear existing interrupt assignments */
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
ionic_qcq_intr_free(lif, lif->txqcqs[i]);
ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
}
/* re-assign the interrupts */
for (i = 0; i < qparam->nxqs; i++) {
lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
if (qparam->intr_split) {
lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
} else {
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
}
}
}
swap(lif->nxqs, qparam->nxqs);
err_out_reinit_unlock:
/* re-init the queues, but don't loose an error code */
if (err)
ionic_start_queues_reconfig(lif);
else
err = ionic_start_queues_reconfig(lif);
err_out:
/* free old allocs without cleaning intr */
for (i = 0; i < qparam->nxqs; i++) {
if (tx_qcqs && tx_qcqs[i]) {
tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_qcq_free(lif, tx_qcqs[i]);
devm_kfree(lif->ionic->dev, tx_qcqs[i]);
tx_qcqs[i] = NULL;
}
if (rx_qcqs && rx_qcqs[i]) {
rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_qcq_free(lif, rx_qcqs[i]);
devm_kfree(lif->ionic->dev, rx_qcqs[i]);
rx_qcqs[i] = NULL;
}
}
/* free q array */
if (rx_qcqs) {
devm_kfree(lif->ionic->dev, rx_qcqs);
rx_qcqs = NULL;
}
if (tx_qcqs) {
devm_kfree(lif->ionic->dev, tx_qcqs);
tx_qcqs = NULL;
}
/* clean the unused dma and info allocations when new set is smaller
* than the full array, but leave the qcq shells in place
*/
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_qcq_free(lif, lif->txqcqs[i]);
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_qcq_free(lif, lif->rxqcqs[i]);
}
return err;
}
static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
union ionic_lif_identity *lid;
......@@ -2055,7 +2296,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lid = kzalloc(sizeof(*lid), GFP_KERNEL);
if (!lid)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
netdev = alloc_etherdev_mqs(sizeof(*lif),
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
......@@ -2069,7 +2310,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif = netdev_priv(netdev);
lif->netdev = netdev;
ionic->master_lif = lif;
ionic->lif = lif;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
......@@ -2079,7 +2320,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->identity = lid;
lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
ionic_lif_identify(ionic, lif->lif_type, lif->identity);
lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size);
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
......@@ -2087,7 +2329,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nxqs = ionic->ntxqs_per_lif;
lif->ionic = ionic;
lif->index = index;
lif->index = 0;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
......@@ -2099,7 +2341,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
spin_lock_init(&lif->adminq_lock);
......@@ -2119,7 +2361,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_debugfs_add_lif(lif);
/* allocate queues */
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out_free_lif_info;
......@@ -2138,9 +2381,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
}
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
list_add_tail(&lif->list, &ionic->lifs);
return lif;
return 0;
err_out_free_qcqs:
ionic_qcqs_free(lif);
......@@ -2154,27 +2395,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
err_out_free_lid:
kfree(lid);
return ERR_PTR(err);
}
int ionic_lifs_alloc(struct ionic *ionic)
{
struct ionic_lif *lif;
INIT_LIST_HEAD(&ionic->lifs);
/* only build the first lif, others are for later features */
set_bit(0, ionic->lifbits);
lif = ionic_lif_alloc(ionic, 0);
if (IS_ERR_OR_NULL(lif)) {
clear_bit(0, ionic->lifbits);
return -ENOMEM;
}
ionic_lif_queue_identify(lif);
return 0;
return err;
}
static void ionic_lif_reset(struct ionic_lif *lif)
......@@ -2209,7 +2430,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
}
ionic_lifs_deinit(ionic);
ionic_lif_deinit(lif);
ionic_reset(ionic);
ionic_qcqs_free(lif);
......@@ -2232,7 +2453,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
if (err)
goto err_out;
err = ionic_lifs_init(ionic);
err = ionic_lif_init(lif);
if (err)
goto err_qcqs_free;
......@@ -2261,14 +2482,14 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err_txrx_free:
ionic_txrx_free(lif);
err_lifs_deinit:
ionic_lifs_deinit(ionic);
ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
static void ionic_lif_free(struct ionic_lif *lif)
void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
......@@ -2297,23 +2518,10 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
list_del(&lif->list);
free_netdev(lif->netdev);
}
void ionic_lifs_free(struct ionic *ionic)
{
struct list_head *cur, *tmp;
struct ionic_lif *lif;
list_for_each_safe(cur, tmp, &ionic->lifs) {
lif = list_entry(cur, struct ionic_lif, list);
ionic_lif_free(lif);
}
}
static void ionic_lif_deinit(struct ionic_lif *lif)
void ionic_lif_deinit(struct ionic_lif *lif)
{
if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
......@@ -2334,17 +2542,6 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_reset(lif);
}
void ionic_lifs_deinit(struct ionic *ionic)
{
struct list_head *cur, *tmp;
struct ionic_lif *lif;
list_for_each_safe(cur, tmp, &ionic->lifs) {
lif = list_entry(cur, struct ionic_lif, list);
ionic_lif_deinit(lif);
}
}
static int ionic_lif_adminq_init(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
......@@ -2490,7 +2687,7 @@ static int ionic_station_set(struct ionic_lif *lif)
return 0;
}
static int ionic_lif_init(struct ionic_lif *lif)
int ionic_lif_init(struct ionic_lif *lif)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
......@@ -2580,22 +2777,6 @@ static int ionic_lif_init(struct ionic_lif *lif)
return err;
}
int ionic_lifs_init(struct ionic *ionic)
{
struct list_head *cur, *tmp;
struct ionic_lif *lif;
int err;
list_for_each_safe(cur, tmp, &ionic->lifs) {
lif = list_entry(cur, struct ionic_lif, list);
err = ionic_lif_init(lif);
if (err)
return err;
}
return 0;
}
static void ionic_lif_notify_work(struct work_struct *ws)
{
}
......@@ -2644,45 +2825,41 @@ static int ionic_lif_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
int ionic_lifs_register(struct ionic *ionic)
int ionic_lif_register(struct ionic_lif *lif)
{
int err;
INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify;
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&ionic->nb);
err = register_netdevice_notifier(&lif->ionic->nb);
if (err)
ionic->nb.notifier_call = NULL;
lif->ionic->nb.notifier_call = NULL;
/* only register LIF0 for now */
err = register_netdev(ionic->master_lif->netdev);
err = register_netdev(lif->netdev);
if (err) {
dev_err(ionic->dev, "Cannot register net device, aborting\n");
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
ionic->master_lif->registered = true;
ionic_lif_set_netdev_info(ionic->master_lif);
lif->registered = true;
ionic_lif_set_netdev_info(lif);
return 0;
}
void ionic_lifs_unregister(struct ionic *ionic)
void ionic_lif_unregister(struct ionic_lif *lif)
{
if (ionic->nb.notifier_call) {
unregister_netdevice_notifier(&ionic->nb);
cancel_work_sync(&ionic->nb_work);
ionic->nb.notifier_call = NULL;
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
/* There is only one lif ever registered in the
* current model, so don't bother searching the
* ionic->lif for candidates to unregister
*/
if (ionic->master_lif &&
ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(ionic->master_lif->netdev);
if (lif->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(lif->netdev);
lif->registered = false;
}
static void ionic_lif_queue_identify(struct ionic_lif *lif)
......@@ -2801,7 +2978,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
return 0;
}
int ionic_lifs_size(struct ionic *ionic)
int ionic_lif_size(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
unsigned int nintrs, dev_nintrs;
......
......@@ -56,35 +56,28 @@ struct ionic_napi_stats {
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
struct ionic_q_stats {
union {
struct ionic_tx_stats tx;
struct ionic_rx_stats rx;
};
};
struct ionic_qcq {
void *base;
dma_addr_t base_pa;
unsigned int total_size;
void *q_base;
dma_addr_t q_base_pa;
u32 q_size;
void *cq_base;
dma_addr_t cq_base_pa;
u32 cq_size;
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
struct ionic_napi_stats napi_stats;
struct ionic_q_stats *stats;
unsigned int flags;
struct dentry *dentry;
};
struct ionic_qcqst {
struct ionic_qcq *qcq;
struct ionic_q_stats *stats;
};
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
......@@ -170,8 +163,10 @@ struct ionic_lif {
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
struct ionic_qcqst *txqcqs;
struct ionic_qcqst *rxqcqs;
struct ionic_qcq **txqcqs;
struct ionic_tx_stats *txqstats;
struct ionic_qcq **rxqcqs;
struct ionic_rx_stats *rxqstats;
u64 last_eid;
unsigned int neqs;
unsigned int nxqs;
......@@ -212,12 +207,21 @@ struct ionic_lif {
struct work_struct tx_timeout_work;
};
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
unsigned int intr_split;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
......@@ -242,34 +246,33 @@ void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
void ionic_lifs_free(struct ionic *ionic);
void ionic_lifs_deinit(struct ionic *ionic);
int ionic_lifs_init(struct ionic *ionic);
int ionic_lifs_register(struct ionic *ionic);
void ionic_lifs_unregister(struct ionic *ionic);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
void ionic_lif_free(struct ionic_lif *lif);
void ionic_lif_deinit(struct ionic_lif *lif);
int ionic_lif_register(struct ionic_lif *lif);
void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
int ionic_lifs_size(struct ionic *ionic);
int ionic_lif_size(struct ionic *ionic);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
int ionic_open(struct net_device *netdev);
int ionic_stop(struct net_device *netdev);
int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
struct ionic_txq_desc *desc, bool dbell)
static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
& IONIC_TXQ_DESC_NSGE_MASK);
struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 num_sg_elems;
qcq->q.dbell_count += dbell;
q->dbell_count += dbell;
num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
& IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
qcq->stats->tx.sg_cntr[num_sg_elems]++;
q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
......@@ -284,10 +287,9 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
debug_stats_txq_post(qcq, txdesc, dbell)
#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
......
......@@ -181,15 +181,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_queue *adminq = &lif->adminqcq->q;
struct ionic_queue *q = &lif->adminqcq->q;
struct ionic_desc_info *desc_info;
spin_lock(&lif->adminq_lock);
while (adminq->tail != adminq->head) {
memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
adminq->tail->cb = NULL;
adminq->tail->cb_arg = NULL;
adminq->tail = adminq->tail->next;
while (q->tail_idx != q->head_idx) {
desc_info = &q->info[q->tail_idx];
memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock(&lif->adminq_lock);
}
......@@ -245,7 +247,8 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_queue *adminq;
struct ionic_desc_info *desc_info;
struct ionic_queue *q;
int err = 0;
WARN_ON(in_interrupt());
......@@ -253,10 +256,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (!lif->adminqcq)
return -EIO;
adminq = &lif->adminqcq->q;
q = &lif->adminqcq->q;
spin_lock(&lif->adminq_lock);
if (!ionic_q_has_space(adminq, 1)) {
if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
......@@ -265,13 +268,14 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
desc_info = &q->info[q->head_idx];
memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock(&lif->adminq_lock);
......
......@@ -179,36 +179,28 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
struct ionic_tx_stats *txstats;
struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txqcq = lif_to_txqcq(lif, q_num);
if (txqcq && txqcq->stats) {
tstats = &txqcq->stats->tx;
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
stats->tx_tso_bytes += tstats->tso_bytes;
stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
rxqcq = lif_to_rxqcq(lif, q_num);
if (rxqcq && rxqcq->stats) {
rstats = &rxqcq->stats->rx;
stats->rx_packets += rstats->pkts;
stats->rx_bytes += rstats->bytes;
stats->rx_csum_none += rstats->csum_none;
stats->rx_csum_complete += rstats->csum_complete;
stats->rx_csum_error += rstats->csum_error;
}
txstats = &lif->txqstats[q_num];
stats->tx_packets += txstats->pkts;
stats->tx_bytes += txstats->bytes;
stats->tx_tso += txstats->tso;
stats->tx_tso_bytes += txstats->tso_bytes;
stats->tx_csum_none += txstats->csum_none;
stats->tx_csum += txstats->csum;
rxstats = &lif->rxqstats[q_num];
stats->rx_packets += rxstats->pkts;
stats->rx_bytes += rxstats->bytes;
stats->rx_csum_none += rxstats->csum_none;
stats->rx_csum_complete += rxstats->csum_complete;
stats->rx_csum_error += rxstats->csum_error;
}
ionic_get_stats64(lif->netdev, &ns);
......@@ -371,7 +363,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats,
......@@ -381,7 +373,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
txqcq = lif_to_txqcq(lif, q_num);
txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
......@@ -405,7 +397,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
rxstats = &lif_to_rxstats(lif, q_num);
rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats,
......@@ -415,7 +407,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
rxqcq = lif_to_rxqcq(lif, q_num);
rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
......
......@@ -22,7 +22,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
......@@ -32,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
......@@ -49,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
struct sk_buff *skb;
netdev = lif->netdev;
stats = q_to_rx_stats(q);
stats = &q->lif->rxqstats[q->index];
if (frags)
skb = napi_get_frags(&q_to_qcq(q)->napi);
......@@ -235,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return false;
/* check for empty queue */
if (q->tail->index == q->head->index)
if (q->tail_idx == q->head_idx)
return false;
desc_info = q->tail;
desc_info = &q->info[q->tail_idx];
if (desc_info->index != le16_to_cpu(comp->comp_index))
return false;
q->tail = desc_info->next;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
......@@ -338,7 +338,7 @@ void ionic_rx_fill(struct ionic_queue *q)
for (i = ionic_q_space_avail(q); i; i--) {
remain_len = len;
desc_info = q->head;
desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
page_info = &desc_info->pages[0];
......@@ -387,7 +387,7 @@ void ionic_rx_fill(struct ionic_queue *q)
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head->index);
q->dbval | q->head_idx);
}
static void ionic_rx_fill_cb(void *arg)
......@@ -397,25 +397,29 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *cur;
struct ionic_desc_info *desc_info;
struct ionic_rxq_desc *desc;
unsigned int i;
u16 idx;
for (cur = q->tail; cur != q->head; cur = cur->next) {
desc = cur->desc;
idx = q->tail_idx;
while (idx != q->head_idx) {
desc_info = &q->info[idx];
desc = desc_info->desc;
desc->addr = 0;
desc->len = 0;
for (i = 0; i < cur->npages; i++) {
if (likely(cur->pages[i].page)) {
ionic_rx_page_free(q, cur->pages[i].page,
cur->pages[i].dma_addr);
cur->pages[i].page = NULL;
cur->pages[i].dma_addr = 0;
for (i = 0; i < desc_info->npages; i++) {
if (likely(desc_info->pages[i].page)) {
ionic_rx_page_free(q, desc_info->pages[i].page,
desc_info->pages[i].dma_addr);
desc_info->pages[i].page = NULL;
desc_info->pages[i].dma_addr = 0;
}
}
cur->cb_arg = NULL;
desc_info->cb_arg = NULL;
idx = (idx + 1) & (q->num_descs - 1);
}
}
......@@ -502,7 +506,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi].qcq->cq;
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
ionic_tx_service, NULL, NULL);
......@@ -630,9 +634,9 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
desc_info = q->tail;
q->tail = desc_info->next;
ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} while (desc_info->index != le16_to_cpu(comp->comp_index));
......@@ -658,9 +662,9 @@ void ionic_tx_empty(struct ionic_queue *q)
int done = 0;
/* walk the not completed tx entries, if any */
while (q->head != q->tail) {
desc_info = q->tail;
q->tail = desc_info->next;
while (q->head_idx != q->tail_idx) {
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
......@@ -748,8 +752,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
struct ionic_txq_sg_elem **elem)
{
struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
struct ionic_txq_desc *desc = q->head->desc;
struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems;
return desc;
......@@ -758,13 +762,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *abort = q->head;
struct ionic_desc_info *rewind_desc_info;
struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *rewind = abort;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
u16 abort = q->head_idx;
unsigned int len_left;
dma_addr_t desc_addr;
unsigned int hdrlen;
......@@ -772,6 +776,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
u16 rewind = abort;
unsigned int left;
unsigned int len;
unsigned int mss;
......@@ -916,19 +921,20 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
err_out_abort:
while (rewind->desc != q->head->desc) {
ionic_tx_clean(q, rewind, NULL, NULL);
rewind = rewind->next;
while (rewind != q->head_idx) {
rewind_desc_info = &q->info[rewind];
ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
rewind = (rewind + 1) & (q->num_descs - 1);
}
q->head = abort;
q->head_idx = abort;
return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
......@@ -967,8 +973,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
......@@ -1002,7 +1008,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
......@@ -1111,9 +1117,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
if (unlikely(!lif_to_txqcq(lif, queue_index)))
if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
q = lif_to_txq(lif, queue_index);
q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (ndescs < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment