Commit f3fb15b9 authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-memory-usage-rework'

Shannon Nelson says:

====================
ionic memory usage rework

Previous review comments have suggested [1],[2] that this driver
needs to rework how queue resources are managed and reconfigured
so that we don't do a full driver reset and to better handle
potential allocation failures.  This patchset is intended to
address those comments.

The first few patches clean some general issues and
simplify some of the memory structures.  The last 4 patches
specifically address queue parameter changes without a full
ionic_stop()/ionic_open().

[1] https://lore.kernel.org/netdev/20200706103305.182bd727@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/
[2] https://lore.kernel.org/netdev/20200724.194417.2151242753657227232.davem@davemloft.net/

v3: use PTR_ALIGN without typecast
    fix up Neel's attribution

v2: use PTR_ALIGN
    recovery if netif_set_real_num_tx/rx_queues fails
    less racy queue bring up after reconfig
    common-ize the reconfig queue stop and start
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 161e3106 6f7d6f0f
......@@ -42,13 +42,11 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
struct list_head lifs;
struct ionic_lif *master_lif;
struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
......
......@@ -294,21 +294,21 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_port_reset;
}
err = ionic_lifs_size(ionic);
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
err = ionic_lifs_alloc(ionic);
err = ionic_lif_alloc(ionic);
if (err) {
dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
err = ionic_lifs_init(ionic);
err = ionic_lif_init(ionic->lif);
if (err) {
dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
......@@ -321,9 +321,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
err = ionic_lifs_register(ionic);
err = ionic_lif_register(ionic->lif);
if (err) {
dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
......@@ -336,12 +336,13 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_deregister_lifs:
ionic_lifs_unregister(ionic);
ionic_lif_unregister(ionic->lif);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lifs_deinit(ionic);
ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
ionic_lifs_free(ionic);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
......@@ -377,11 +378,12 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
if (ionic->master_lif) {
if (ionic->lif) {
ionic_devlink_unregister(ionic);
ionic_lifs_unregister(ionic);
ionic_lifs_deinit(ionic);
ionic_lifs_free(ionic);
ionic_lif_unregister(ionic->lif);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
......
......@@ -76,7 +76,7 @@ static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->tail->index);
seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
......@@ -86,7 +86,7 @@ static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->head->index);
seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
......@@ -96,7 +96,7 @@ static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
seq_printf(seq, "%d\n", cq->tail->index);
seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
......@@ -112,7 +112,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
......@@ -121,21 +122,21 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
if (IS_ERR_OR_NULL(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
q_dentry = debugfs_create_dir("q", qcq->dentry);
debugfs_create_u32("index", 0400, q_dentry, &q->index);
debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
if (qcq->flags & IONIC_QCQ_F_SG) {
debugfs_create_x64("sg_base_pa", 0400, q_dentry,
&q->sg_base_pa);
debugfs_create_u32("sg_desc_size", 0400, q_dentry,
&q->sg_desc_size);
}
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
......
......@@ -21,8 +21,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
hb = ionic_heartbeat_check(ionic);
if (hb >= 0 && ionic->master_lif)
ionic_link_status_check_request(ionic->master_lif);
if (hb >= 0 && ionic->lif)
ionic_link_status_check_request(ionic->lif);
}
void ionic_init_devinfo(struct ionic *ionic)
......@@ -126,7 +126,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
/* is this a transition? */
if (fw_status != idev->last_fw_status &&
idev->last_fw_status != 0xff) {
struct ionic_lif *lif = ionic->master_lif;
struct ionic_lif *lif = ionic->lif;
bool trigger = false;
if (!fw_status || fw_status == 0xff) {
......@@ -482,7 +482,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->bound_intr = intr;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
cq->tail = cq->info;
cq->tail_idx = 0;
cq->done_color = 1;
cur = cq->info;
......@@ -522,15 +522,18 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
while (cb(cq, cq->tail)) {
if (cq->tail->last)
cq_info = &cq->info[cq->tail_idx];
while (cb(cq, cq_info)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail = cq->tail->next;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
......@@ -565,8 +568,8 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
q->num_descs = num_descs;
q->desc_size = desc_size;
q->sg_desc_size = sg_desc_size;
q->tail = q->info;
q->head = q->tail;
q->tail_idx = 0;
q->head_idx = 0;
q->pid = pid;
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
......@@ -614,19 +617,22 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
q->head->cb = cb;
q->head->cb_arg = cb_arg;
q->head = q->head->next;
desc_info = &q->info[q->head_idx];
desc_info->cb = cb;
desc_info->cb_arg = cb_arg;
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head->index, ring_doorbell);
q->head_idx, ring_doorbell);
if (ring_doorbell)
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head->index);
q->dbval | q->head_idx);
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
......@@ -634,8 +640,8 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
unsigned int mask, tail, head;
mask = q->num_descs - 1;
tail = q->tail->index;
head = q->head->index;
tail = q->tail_idx;
head = q->head_idx;
return ((pos - tail) & mask) < ((head - tail) & mask);
}
......@@ -648,18 +654,18 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
void *cb_arg;
/* check for empty queue */
if (q->tail->index == q->head->index)
if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->lif->ionic->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
q->name, stop_index, q->tail->index, q->head->index);
q->name, stop_index, q->tail_idx, q->head_idx);
do {
desc_info = q->tail;
q->tail = desc_info->next;
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
......
......@@ -149,7 +149,13 @@ struct ionic_dev {
};
struct ionic_cq_info {
void *cq_desc;
union {
void *cq_desc;
struct ionic_txq_comp *txcq;
struct ionic_rxq_comp *rxcq;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
struct ionic_cq_info *next;
unsigned int index;
bool last;
......@@ -169,8 +175,17 @@ struct ionic_page_info {
};
struct ionic_desc_info {
void *desc;
void *sg_desc;
union {
void *desc;
struct ionic_txq_desc *txq_desc;
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
struct ionic_desc_info *next;
unsigned int index;
unsigned int left;
......@@ -183,22 +198,32 @@ struct ionic_desc_info {
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
struct device *dev;
u64 dbell_count;
u64 drop;
u64 stop;
u64 wake;
struct ionic_lif *lif;
struct ionic_desc_info *info;
struct ionic_desc_info *tail;
struct ionic_desc_info *head;
struct ionic_dev *idev;
u16 head_idx;
u16 tail_idx;
unsigned int index;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
u64 dbval;
void *base;
void *sg_base;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
dma_addr_t sg_base_pa;
unsigned int num_descs;
......@@ -225,9 +250,9 @@ struct ionic_cq {
dma_addr_t base_pa;
struct ionic_lif *lif;
struct ionic_cq_info *info;
struct ionic_cq_info *tail;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
bool done_color;
unsigned int num_descs;
u64 compl_count;
......@@ -246,12 +271,12 @@ static inline void ionic_intr_init(struct ionic_dev *idev,
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
unsigned int avail = q->tail->index;
unsigned int avail = q->tail_idx;
if (q->head->index >= avail)
avail += q->head->left - 1;
if (q->head_idx >= avail)
avail += q->num_descs - q->head_idx - 1;
else
avail -= q->head->index + 1;
avail -= q->head_idx + 1;
return avail;
}
......
......@@ -85,7 +85,7 @@ int ionic_devlink_register(struct ionic *ionic)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
else
devlink_port_type_eth_set(&ionic->dl_port,
ionic->master_lif->netdev);
ionic->lif->netdev);
return err;
}
......
......@@ -454,7 +454,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
qcq = lif->rxqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->rx_coalesce_hw);
......@@ -471,7 +471,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->txqcqs[i].qcq;
qcq = lif->txqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->tx_coalesce_hw);
......@@ -493,18 +493,14 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
{
struct ethtool_ringparam *ring = arg;
lif->ntxq_descs = ring->tx_pending;
lif->nrxq_descs = ring->rx_pending;
}
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_queue_params qparam;
int err;
ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
......@@ -522,7 +518,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
return ionic_reset_queues(lif, ionic_set_ringsize, ring);
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
if (ring->rx_pending != lif->nrxq_descs)
netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
lif->nrxq_descs, ring->rx_pending);
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->ntxq_descs = ring->tx_pending;
lif->nrxq_descs = ring->rx_pending;
return 0;
}
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
err = ionic_reconfigure_queues(lif, &qparam);
if (err)
netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
return err;
}
static void ionic_get_channels(struct net_device *netdev,
......@@ -544,32 +561,15 @@ static void ionic_get_channels(struct net_device *netdev,
}
}
static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
{
struct ethtool_channels *ch = arg;
if (ch->combined_count) {
lif->nxqs = ch->combined_count;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
netdev_info(lif->netdev, "Sharing queue interrupts\n");
}
} else {
lif->nxqs = ch->rx_count;
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
netdev_info(lif->netdev, "Splitting queue interrupts\n");
}
}
}
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
int new_cnt;
struct ionic_queue_params qparam;
int max_cnt;
int err;
ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
......@@ -577,20 +577,63 @@ static int ionic_set_channels(struct net_device *netdev,
}
if (ch->combined_count && ch->rx_count) {
netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
netdev_info(netdev, "Use either combined or rx and tx, not both\n");
return -EINVAL;
}
if (ch->combined_count)
new_cnt = ch->combined_count;
else
new_cnt = ch->rx_count;
max_cnt = lif->ionic->ntxqs_per_lif;
if (ch->combined_count) {
if (ch->combined_count > max_cnt)
return -EINVAL;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netdev_info(lif->netdev, "Sharing queue interrupts\n");
else if (ch->combined_count == lif->nxqs)
return 0;
if (lif->nxqs != ch->combined_count)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, ch->combined_count);
if (lif->nxqs != new_cnt)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, new_cnt);
qparam.nxqs = ch->combined_count;
qparam.intr_split = 0;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
return -EINVAL;
return ionic_reset_queues(lif, ionic_set_queuecount, ch);
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netdev_info(lif->netdev, "Splitting queue interrupts\n");
else if (ch->rx_count == lif->nxqs)
return 0;
if (lif->nxqs != ch->rx_count)
netdev_info(netdev, "Changing queue count from %d to %d\n",
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
qparam.intr_split = 1;
}
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
if (qparam.intr_split) {
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
} else {
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
return 0;
}
err = ionic_reconfigure_queues(lif, &qparam);
if (err)
netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
return err;
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
......
......@@ -56,35 +56,28 @@ struct ionic_napi_stats {
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
struct ionic_q_stats {
union {
struct ionic_tx_stats tx;
struct ionic_rx_stats rx;
};
};
struct ionic_qcq {
void *base;
dma_addr_t base_pa;
unsigned int total_size;
void *q_base;
dma_addr_t q_base_pa;
u32 q_size;
void *cq_base;
dma_addr_t cq_base_pa;
u32 cq_size;
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
struct ionic_napi_stats napi_stats;
struct ionic_q_stats *stats;
unsigned int flags;
struct dentry *dentry;
};
struct ionic_qcqst {
struct ionic_qcq *qcq;
struct ionic_q_stats *stats;
};
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
......@@ -170,8 +163,10 @@ struct ionic_lif {
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
struct ionic_qcqst *txqcqs;
struct ionic_qcqst *rxqcqs;
struct ionic_qcq **txqcqs;
struct ionic_tx_stats *txqstats;
struct ionic_qcq **rxqcqs;
struct ionic_rx_stats *rxqstats;
u64 last_eid;
unsigned int neqs;
unsigned int nxqs;
......@@ -212,12 +207,21 @@ struct ionic_lif {
struct work_struct tx_timeout_work;
};
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
unsigned int intr_split;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
......@@ -242,34 +246,33 @@ void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
void ionic_lifs_free(struct ionic *ionic);
void ionic_lifs_deinit(struct ionic *ionic);
int ionic_lifs_init(struct ionic *ionic);
int ionic_lifs_register(struct ionic *ionic);
void ionic_lifs_unregister(struct ionic *ionic);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
void ionic_lif_free(struct ionic_lif *lif);
void ionic_lif_deinit(struct ionic_lif *lif);
int ionic_lif_register(struct ionic_lif *lif);
void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
int ionic_lifs_size(struct ionic *ionic);
int ionic_lif_size(struct ionic *ionic);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
int ionic_open(struct net_device *netdev);
int ionic_stop(struct net_device *netdev);
int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
struct ionic_txq_desc *desc, bool dbell)
static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
& IONIC_TXQ_DESC_NSGE_MASK);
struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 num_sg_elems;
qcq->q.dbell_count += dbell;
q->dbell_count += dbell;
num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
& IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
qcq->stats->tx.sg_cntr[num_sg_elems]++;
q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
......@@ -284,10 +287,9 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
debug_stats_txq_post(qcq, txdesc, dbell)
#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
......
......@@ -181,15 +181,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_queue *adminq = &lif->adminqcq->q;
struct ionic_queue *q = &lif->adminqcq->q;
struct ionic_desc_info *desc_info;
spin_lock(&lif->adminq_lock);
while (adminq->tail != adminq->head) {
memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
adminq->tail->cb = NULL;
adminq->tail->cb_arg = NULL;
adminq->tail = adminq->tail->next;
while (q->tail_idx != q->head_idx) {
desc_info = &q->info[q->tail_idx];
memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock(&lif->adminq_lock);
}
......@@ -245,7 +247,8 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_queue *adminq;
struct ionic_desc_info *desc_info;
struct ionic_queue *q;
int err = 0;
WARN_ON(in_interrupt());
......@@ -253,10 +256,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (!lif->adminqcq)
return -EIO;
adminq = &lif->adminqcq->q;
q = &lif->adminqcq->q;
spin_lock(&lif->adminq_lock);
if (!ionic_q_has_space(adminq, 1)) {
if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
......@@ -265,13 +268,14 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
desc_info = &q->info[q->head_idx];
memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock(&lif->adminq_lock);
......
......@@ -179,36 +179,28 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
struct ionic_tx_stats *txstats;
struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txqcq = lif_to_txqcq(lif, q_num);
if (txqcq && txqcq->stats) {
tstats = &txqcq->stats->tx;
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
stats->tx_tso_bytes += tstats->tso_bytes;
stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
rxqcq = lif_to_rxqcq(lif, q_num);
if (rxqcq && rxqcq->stats) {
rstats = &rxqcq->stats->rx;
stats->rx_packets += rstats->pkts;
stats->rx_bytes += rstats->bytes;
stats->rx_csum_none += rstats->csum_none;
stats->rx_csum_complete += rstats->csum_complete;
stats->rx_csum_error += rstats->csum_error;
}
txstats = &lif->txqstats[q_num];
stats->tx_packets += txstats->pkts;
stats->tx_bytes += txstats->bytes;
stats->tx_tso += txstats->tso;
stats->tx_tso_bytes += txstats->tso_bytes;
stats->tx_csum_none += txstats->csum_none;
stats->tx_csum += txstats->csum;
rxstats = &lif->rxqstats[q_num];
stats->rx_packets += rxstats->pkts;
stats->rx_bytes += rxstats->bytes;
stats->rx_csum_none += rxstats->csum_none;
stats->rx_csum_complete += rxstats->csum_complete;
stats->rx_csum_error += rxstats->csum_error;
}
ionic_get_stats64(lif->netdev, &ns);
......@@ -371,7 +363,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats,
......@@ -381,7 +373,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
txqcq = lif_to_txqcq(lif, q_num);
txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
......@@ -405,7 +397,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
rxstats = &lif_to_rxstats(lif, q_num);
rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats,
......@@ -415,7 +407,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
rxqcq = lif_to_rxqcq(lif, q_num);
rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
......
......@@ -22,7 +22,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
......@@ -32,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
......@@ -49,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
struct sk_buff *skb;
netdev = lif->netdev;
stats = q_to_rx_stats(q);
stats = &q->lif->rxqstats[q->index];
if (frags)
skb = napi_get_frags(&q_to_qcq(q)->napi);
......@@ -235,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return false;
/* check for empty queue */
if (q->tail->index == q->head->index)
if (q->tail_idx == q->head_idx)
return false;
desc_info = q->tail;
desc_info = &q->info[q->tail_idx];
if (desc_info->index != le16_to_cpu(comp->comp_index))
return false;
q->tail = desc_info->next;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
......@@ -338,7 +338,7 @@ void ionic_rx_fill(struct ionic_queue *q)
for (i = ionic_q_space_avail(q); i; i--) {
remain_len = len;
desc_info = q->head;
desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
page_info = &desc_info->pages[0];
......@@ -387,7 +387,7 @@ void ionic_rx_fill(struct ionic_queue *q)
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head->index);
q->dbval | q->head_idx);
}
static void ionic_rx_fill_cb(void *arg)
......@@ -397,25 +397,29 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *cur;
struct ionic_desc_info *desc_info;
struct ionic_rxq_desc *desc;
unsigned int i;
u16 idx;
for (cur = q->tail; cur != q->head; cur = cur->next) {
desc = cur->desc;
idx = q->tail_idx;
while (idx != q->head_idx) {
desc_info = &q->info[idx];
desc = desc_info->desc;
desc->addr = 0;
desc->len = 0;
for (i = 0; i < cur->npages; i++) {
if (likely(cur->pages[i].page)) {
ionic_rx_page_free(q, cur->pages[i].page,
cur->pages[i].dma_addr);
cur->pages[i].page = NULL;
cur->pages[i].dma_addr = 0;
for (i = 0; i < desc_info->npages; i++) {
if (likely(desc_info->pages[i].page)) {
ionic_rx_page_free(q, desc_info->pages[i].page,
desc_info->pages[i].dma_addr);
desc_info->pages[i].page = NULL;
desc_info->pages[i].dma_addr = 0;
}
}
cur->cb_arg = NULL;
desc_info->cb_arg = NULL;
idx = (idx + 1) & (q->num_descs - 1);
}
}
......@@ -502,7 +506,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi].qcq->cq;
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
ionic_tx_service, NULL, NULL);
......@@ -630,9 +634,9 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
desc_info = q->tail;
q->tail = desc_info->next;
ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} while (desc_info->index != le16_to_cpu(comp->comp_index));
......@@ -658,9 +662,9 @@ void ionic_tx_empty(struct ionic_queue *q)
int done = 0;
/* walk the not completed tx entries, if any */
while (q->head != q->tail) {
desc_info = q->tail;
q->tail = desc_info->next;
while (q->head_idx != q->tail_idx) {
desc_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
......@@ -748,8 +752,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
struct ionic_txq_sg_elem **elem)
{
struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
struct ionic_txq_desc *desc = q->head->desc;
struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems;
return desc;
......@@ -758,13 +762,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *abort = q->head;
struct ionic_desc_info *rewind_desc_info;
struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *rewind = abort;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
u16 abort = q->head_idx;
unsigned int len_left;
dma_addr_t desc_addr;
unsigned int hdrlen;
......@@ -772,6 +776,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
u16 rewind = abort;
unsigned int left;
unsigned int len;
unsigned int mss;
......@@ -916,19 +921,20 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
err_out_abort:
while (rewind->desc != q->head->desc) {
ionic_tx_clean(q, rewind, NULL, NULL);
rewind = rewind->next;
while (rewind != q->head_idx) {
rewind_desc_info = &q->info[rewind];
ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
rewind = (rewind + 1) & (q->num_descs - 1);
}
q->head = abort;
q->head_idx = abort;
return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
......@@ -967,8 +973,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
......@@ -1002,7 +1008,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
......@@ -1111,9 +1117,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
if (unlikely(!lif_to_txqcq(lif, queue_index)))
if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
q = lif_to_txq(lif, queue_index);
q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (ndescs < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment