Commit aec626d2 authored by Benjamin Poirier's avatar Benjamin Poirier Committed by Greg Kroah-Hartman

staging: qlge: Update buffer queue prod index despite oom

Currently, if we repeatedly fail to allocate all of the buffers from the
desired batching budget, we will never update the prod_idx register.
Restructure code to always update prod_idx if new buffers could be
allocated. This eliminates the current two stage process (clean_idx ->
prod_idx) and some associated bookkeeping variables.
Signed-off-by: default avatarBenjamin Poirier <bpoirier@suse.com>
Link: https://lore.kernel.org/r/20190927101210.23856-16-bpoirier@suse.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c8c1ff5c
...@@ -1424,10 +1424,10 @@ struct qlge_bq { ...@@ -1424,10 +1424,10 @@ struct qlge_bq {
dma_addr_t base_indirect_dma; dma_addr_t base_indirect_dma;
struct qlge_bq_desc *queue; struct qlge_bq_desc *queue;
void __iomem *prod_idx_db_reg; void __iomem *prod_idx_db_reg;
u32 prod_idx; /* current sw prod idx */ /* next index where sw should refill a buffer for hw */
u32 curr_idx; /* next entry we expect */ u16 next_to_use;
u32 clean_idx; /* beginning of new descs */ /* next index where sw expects to find a buffer filled by hw */
u32 free_cnt; /* free buffer desc cnt */ u16 next_to_clean;
enum { enum {
QLGE_SB, /* small buffer */ QLGE_SB, /* small buffer */
QLGE_LB, /* large buffer */ QLGE_LB, /* large buffer */
......
...@@ -1776,8 +1776,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) ...@@ -1776,8 +1776,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue); pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n", pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
rx_ring->lbq.prod_idx_db_reg); rx_ring->lbq.prod_idx_db_reg);
pr_err("rx_ring->lbq.prod_idx = %d\n", rx_ring->lbq.prod_idx); pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
pr_err("rx_ring->lbq.curr_idx = %d\n", rx_ring->lbq.curr_idx); pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
...@@ -1791,10 +1791,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) ...@@ -1791,10 +1791,8 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue); pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n", pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
rx_ring->sbq.prod_idx_db_reg); rx_ring->sbq.prod_idx_db_reg);
pr_err("rx_ring->sbq.prod_idx = %d\n", rx_ring->sbq.prod_idx); pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
pr_err("rx_ring->sbq.curr_idx = %d\n", rx_ring->sbq.curr_idx); pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
pr_err("rx_ring->sbq.clean_idx = %d\n", rx_ring->sbq.clean_idx);
pr_err("rx_ring->sbq.free_cnt = %d\n", rx_ring->sbq.free_cnt);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
......
...@@ -982,9 +982,8 @@ static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq) ...@@ -982,9 +982,8 @@ static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
{ {
struct qlge_bq_desc *bq_desc; struct qlge_bq_desc *bq_desc;
bq_desc = &bq->queue[bq->curr_idx]; bq_desc = &bq->queue[bq->next_to_clean];
bq->curr_idx = QLGE_BQ_WRAP(bq->curr_idx + 1); bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
bq->free_cnt++;
return bq_desc; return bq_desc;
} }
...@@ -1114,9 +1113,9 @@ static void qlge_refill_bq(struct qlge_bq *bq) ...@@ -1114,9 +1113,9 @@ static void qlge_refill_bq(struct qlge_bq *bq)
{ {
struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
struct ql_adapter *qdev = rx_ring->qdev; struct ql_adapter *qdev = rx_ring->qdev;
u32 clean_idx = bq->clean_idx; struct qlge_bq_desc *bq_desc;
int free_count, refill_count;
unsigned int reserved_count; unsigned int reserved_count;
u32 start_idx = clean_idx;
int i; int i;
if (bq->type == QLGE_SB) if (bq->type == QLGE_SB)
...@@ -1124,44 +1123,52 @@ static void qlge_refill_bq(struct qlge_bq *bq) ...@@ -1124,44 +1123,52 @@ static void qlge_refill_bq(struct qlge_bq *bq)
else else
reserved_count = 32; reserved_count = 32;
while (bq->free_cnt > reserved_count) { free_count = bq->next_to_clean - bq->next_to_use;
for (i = (bq->clean_idx % 16); i < 16; i++) { if (free_count <= 0)
struct qlge_bq_desc *bq_desc = &bq->queue[clean_idx]; free_count += QLGE_BQ_LEN;
int retval;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, refill_count = free_count - reserved_count;
"ring %u %s: try cleaning clean_idx = %d.\n", /* refill batch size */
rx_ring->cq_id, bq_type_name[bq->type], if (refill_count < 16)
clean_idx); return;
if (bq->type == QLGE_SB) i = bq->next_to_use;
retval = qlge_refill_sb(rx_ring, bq_desc); bq_desc = &bq->queue[i];
else i -= QLGE_BQ_LEN;
retval = qlge_refill_lb(rx_ring, bq_desc); do {
if (retval < 0) { int retval;
bq->clean_idx = clean_idx;
netif_err(qdev, ifup, qdev->ndev, netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: Could not get a page chunk, i=%d, clean_idx =%d .\n", "ring %u %s: try cleaning idx %d\n",
rx_ring->cq_id, rx_ring->cq_id, bq_type_name[bq->type], i);
bq_type_name[bq->type], i,
clean_idx);
return;
}
clean_idx = QLGE_BQ_WRAP(clean_idx + 1); if (bq->type == QLGE_SB)
retval = qlge_refill_sb(rx_ring, bq_desc);
else
retval = qlge_refill_lb(rx_ring, bq_desc);
if (retval < 0) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s: Could not get a page chunk, idx %d\n",
rx_ring->cq_id, bq_type_name[bq->type], i);
break;
} }
bq->clean_idx = clean_idx; bq_desc++;
bq->prod_idx = QLGE_BQ_WRAP(bq->prod_idx + 16); i++;
bq->free_cnt -= 16; if (unlikely(!i)) {
} bq_desc = &bq->queue[0];
i -= QLGE_BQ_LEN;
}
refill_count--;
} while (refill_count);
i += QLGE_BQ_LEN;
if (start_idx != clean_idx) { if (bq->next_to_use != i) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"ring %u %s: updating prod idx = %d.\n", "ring %u %s: updating prod idx = %d.\n",
rx_ring->cq_id, bq_type_name[bq->type], rx_ring->cq_id, bq_type_name[bq->type], i);
bq->prod_idx); bq->next_to_use = i;
ql_write_db_reg(bq->prod_idx, bq->prod_idx_db_reg); ql_write_db_reg(bq->next_to_use, bq->prod_idx_db_reg);
} }
} }
...@@ -2709,25 +2716,21 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev, ...@@ -2709,25 +2716,21 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{ {
struct qlge_bq *lbq = &rx_ring->lbq;
unsigned int last_offset; unsigned int last_offset;
uint32_t curr_idx, clean_idx;
last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size; last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
curr_idx = rx_ring->lbq.curr_idx; while (lbq->next_to_clean != lbq->next_to_use) {
clean_idx = rx_ring->lbq.clean_idx; struct qlge_bq_desc *lbq_desc =
while (curr_idx != clean_idx) { &lbq->queue[lbq->next_to_clean];
struct qlge_bq_desc *lbq_desc = &rx_ring->lbq.queue[curr_idx];
if (lbq_desc->p.pg_chunk.offset == last_offset) if (lbq_desc->p.pg_chunk.offset == last_offset)
pci_unmap_page(qdev->pdev, lbq_desc->dma_addr, pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
ql_lbq_block_size(qdev), ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
put_page(lbq_desc->p.pg_chunk.page); put_page(lbq_desc->p.pg_chunk.page);
lbq_desc->p.pg_chunk.page = NULL;
curr_idx = QLGE_BQ_WRAP(curr_idx + 1); lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
} }
if (rx_ring->master_chunk.page) { if (rx_ring->master_chunk.page) {
...@@ -3024,10 +3027,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -3024,10 +3027,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->lbq_buf_size = cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size)); cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->lbq.prod_idx = 0; rx_ring->lbq.next_to_use = 0;
rx_ring->lbq.curr_idx = 0; rx_ring->lbq.next_to_clean = 0;
rx_ring->lbq.clean_idx = 0;
rx_ring->lbq.free_cnt = QLGE_BQ_LEN;
cqicb->flags |= FLAGS_LS; /* Load sbq values */ cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq.base_dma; tmp = (u64)rx_ring->sbq.base_dma;
...@@ -3043,10 +3044,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -3043,10 +3044,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cpu_to_le64(rx_ring->sbq.base_indirect_dma); cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->sbq.prod_idx = 0; rx_ring->sbq.next_to_use = 0;
rx_ring->sbq.curr_idx = 0; rx_ring->sbq.next_to_clean = 0;
rx_ring->sbq.clean_idx = 0;
rx_ring->sbq.free_cnt = QLGE_BQ_LEN;
} }
if (rx_ring->cq_id < qdev->rss_ring_count) { if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in /* Inbound completion handling rx_rings run in
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment