Commit 4c5fef77 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by Greg Kroah-Hartman

hv_netvsc: empty current transmit aggregation if flow blocked

[ Commit cfd8afd9 upstream. ]

If the transmit queue is known full, then don't keep aggregating
data. And the cp_partial flag which indicates that the current
aggregation buffer is full can be folded in to avoid more
conditionals.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 41f24dbe
...@@ -192,7 +192,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, ...@@ -192,7 +192,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info); const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
void netvsc_device_remove(struct hv_device *device); void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct net_device_context *ndc, int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet, struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg, struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer, struct hv_page_buffer *page_buffer,
......
...@@ -700,13 +700,13 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) ...@@ -700,13 +700,13 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
return NETVSC_INVALID_INDEX; return NETVSC_INVALID_INDEX;
} }
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
unsigned int section_index, unsigned int section_index,
u32 pend_size, u32 pend_size,
struct hv_netvsc_packet *packet, struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg, struct rndis_message *rndis_msg,
struct hv_page_buffer *pb, struct hv_page_buffer *pb,
struct sk_buff *skb) bool xmit_more)
{ {
char *start = net_device->send_buf; char *start = net_device->send_buf;
char *dest = start + (section_index * net_device->send_section_size) char *dest = start + (section_index * net_device->send_section_size)
...@@ -719,7 +719,8 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, ...@@ -719,7 +719,8 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
packet->page_buf_cnt; packet->page_buf_cnt;
/* Add padding */ /* Add padding */
if (skb->xmit_more && remain && !packet->cp_partial) { remain = packet->total_data_buflen & (net_device->pkt_align - 1);
if (xmit_more && remain) {
padding = net_device->pkt_align - remain; padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding; rndis_msg->msg_len += padding;
packet->total_data_buflen += padding; packet->total_data_buflen += padding;
...@@ -739,8 +740,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, ...@@ -739,8 +740,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
memset(dest, 0, padding); memset(dest, 0, padding);
msg_size += padding; msg_size += padding;
} }
return msg_size;
} }
static inline int netvsc_send_pkt( static inline int netvsc_send_pkt(
...@@ -828,12 +827,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, ...@@ -828,12 +827,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
} }
/* RCU already held by caller */ /* RCU already held by caller */
int netvsc_send(struct net_device_context *ndev_ctx, int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet, struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg, struct rndis_message *rndis_msg,
struct hv_page_buffer *pb, struct hv_page_buffer *pb,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device struct netvsc_device *net_device
= rcu_dereference_bh(ndev_ctx->nvdev); = rcu_dereference_bh(ndev_ctx->nvdev);
struct hv_device *device = ndev_ctx->device_ctx; struct hv_device *device = ndev_ctx->device_ctx;
...@@ -844,8 +844,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -844,8 +844,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
struct multi_send_data *msdp; struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL; struct sk_buff *msd_skb = NULL;
bool try_batch; bool try_batch, xmit_more;
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
/* If device is rescinded, return error and packet will get dropped. */ /* If device is rescinded, return error and packet will get dropped. */
if (unlikely(!net_device || net_device->destroy)) if (unlikely(!net_device || net_device->destroy))
...@@ -896,10 +895,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -896,10 +895,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
} }
} }
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
xmit_more = skb->xmit_more &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
if (section_index != NETVSC_INVALID_INDEX) { if (section_index != NETVSC_INVALID_INDEX) {
netvsc_copy_to_send_buf(net_device, netvsc_copy_to_send_buf(net_device,
section_index, msd_len, section_index, msd_len,
packet, rndis_msg, pb, skb); packet, rndis_msg, pb, xmit_more);
packet->send_buf_index = section_index; packet->send_buf_index = section_index;
...@@ -919,7 +925,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -919,7 +925,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
if (msdp->skb) if (msdp->skb)
dev_consume_skb_any(msdp->skb); dev_consume_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) { if (xmit_more) {
msdp->skb = skb; msdp->skb = skb;
msdp->pkt = packet; msdp->pkt = packet;
msdp->count++; msdp->count++;
......
...@@ -614,7 +614,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -614,7 +614,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */ /* timestamp packet in software */
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); ret = netvsc_send(net, packet, rndis_msg, pb, skb);
if (likely(ret == 0)) if (likely(ret == 0))
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -217,7 +217,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, ...@@ -217,7 +217,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct hv_netvsc_packet *packet; struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2]; struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf; struct hv_page_buffer *pb = page_buf;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
int ret; int ret;
/* Setup the packet to send it */ /* Setup the packet to send it */
...@@ -245,7 +244,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, ...@@ -245,7 +244,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
} }
rcu_read_lock_bh(); rcu_read_lock_bh();
ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment