Commit defd28aa authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Jakub Kicinski

virtio_net: rx remove premapped failover code

Now, the premapped mode can be enabled unconditionally.

So we can remove the failover code for merge and small mode.
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Reviewed-by: default avatarLarysa Zaremba <larysa.zaremba@intel.com>
Link: https://lore.kernel.org/r/20240511031404.30903-4-xuanzhuo@linux.alibaba.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a377ae54
...@@ -348,9 +348,6 @@ struct receive_queue { ...@@ -348,9 +348,6 @@ struct receive_queue {
/* Record the last dma info to free after new pages is allocated. */ /* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma; struct virtnet_rq_dma *last_dma;
/* Do dma by self */
bool do_dma;
}; };
/* This structure can contain rss message with maximum settings for indirection table and keysize /* This structure can contain rss message with maximum settings for indirection table and keysize
...@@ -850,7 +847,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) ...@@ -850,7 +847,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf; void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf && rq->do_dma) if (buf)
virtnet_rq_unmap(rq, buf, *len); virtnet_rq_unmap(rq, buf, *len);
return buf; return buf;
...@@ -863,11 +860,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) ...@@ -863,11 +860,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset; u32 offset;
void *head; void *head;
if (!rq->do_dma) {
sg_init_one(rq->sg, buf, len);
return;
}
head = page_address(rq->alloc_frag.page); head = page_address(rq->alloc_frag.page);
offset = buf - head; offset = buf - head;
...@@ -893,44 +885,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) ...@@ -893,44 +885,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page); head = page_address(alloc_frag->page);
if (rq->do_dma) { dma = head;
dma = head;
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma->len = alloc_frag->size - sizeof(*dma); /* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, dma->len = alloc_frag->size - sizeof(*dma);
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr; addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
/* Add a reference to dma to prevent the entire dma from dma->addr = addr;
* being released during error handling. This reference dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
rq->last_dma = dma; /* Add a reference to dma to prevent the entire dma from
} * being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
++dma->ref; rq->last_dma = dma;
} }
++dma->ref;
buf = head + alloc_frag->offset; buf = head + alloc_frag->offset;
get_page(alloc_frag->page); get_page(alloc_frag->page);
...@@ -947,12 +937,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) ...@@ -947,12 +937,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets) if (!vi->mergeable_rx_bufs && vi->big_packets)
return; return;
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++)
if (virtqueue_set_dma_premapped(vi->rq[i].vq)) /* error should never happen */
continue; BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
vi->rq[i].do_dma = true;
}
} }
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
...@@ -2030,8 +2017,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, ...@@ -2030,8 +2017,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) { if (err < 0) {
if (rq->do_dma) virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
} }
...@@ -2145,8 +2131,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, ...@@ -2145,8 +2131,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom); ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) { if (err < 0) {
if (rq->do_dma) virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
} }
...@@ -5229,7 +5214,7 @@ static void free_receive_page_frags(struct virtnet_info *vi) ...@@ -5229,7 +5214,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i; int i;
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) { if (vi->rq[i].alloc_frag.page) {
if (vi->rq[i].do_dma && vi->rq[i].last_dma) if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page); put_page(vi->rq[i].alloc_frag.page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment