Commit 57bf6eef authored by Joe Perches's avatar Joe Perches Committed by David S. Miller

ixgb and e1000: Use new function for copybreak tests

There appears to be an off-by-1 defect in the maximum packet size
copied when copybreak is speified in these modules.

The copybreak module params are specified as:
"Maximum size of packet that is copied to a new buffer on receive"

The tests are changed from "< copybreak" to "<= copybreak"
and moved into new static functions for readability.
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 621b99b6
...@@ -3785,6 +3785,31 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -3785,6 +3785,31 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
return cleaned; return cleaned;
} }
/*
* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
struct e1000_buffer *buffer_info,
u32 length, struct sk_buff **skb)
{
struct sk_buff *new_skb;
if (length > copybreak)
return;
new_skb = netdev_alloc_skb_ip_align(netdev, length);
if (!new_skb)
return;
skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
(*skb)->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
/* save the skb in buffer_info as good */
buffer_info->skb = *skb;
*skb = new_skb;
}
/** /**
* e1000_clean_rx_irq - Send received data up the network stack; legacy * e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure * @adapter: board private structure
...@@ -3883,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -3883,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
total_rx_bytes += length; total_rx_bytes += length;
total_rx_packets++; total_rx_packets++;
/* code added for copybreak, this should improve e1000_check_copybreak(netdev, buffer_info, length, &skb);
* performance for small packets with large amounts
* of reassembly being done in the stack */
if (length < copybreak) {
struct sk_buff *new_skb =
netdev_alloc_skb_ip_align(netdev, length);
if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
(skb->data -
NET_IP_ALIGN),
(length +
NET_IP_ALIGN));
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
}
/* else just continue with the old one */
}
/* end copybreak code */
skb_put(skb, length); skb_put(skb, length);
/* Receive Checksum Offload */ /* Receive Checksum Offload */
......
...@@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, ...@@ -1921,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
} }
} }
/*
* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void ixgb_check_copybreak(struct net_device *netdev,
struct ixgb_buffer *buffer_info,
u32 length, struct sk_buff **skb)
{
struct sk_buff *new_skb;
if (length > copybreak)
return;
new_skb = netdev_alloc_skb_ip_align(netdev, length);
if (!new_skb)
return;
skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
(*skb)->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
/* save the skb in buffer_info as good */
buffer_info->skb = *skb;
*skb = new_skb;
}
/** /**
* ixgb_clean_rx_irq - Send received data up the network stack, * ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure * @adapter: board private structure
...@@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) ...@@ -1957,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
prefetch(skb->data - NET_IP_ALIGN); prefetch(skb->data - NET_IP_ALIGN);
if (++i == rx_ring->count) i = 0; if (++i == rx_ring->count)
i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i); next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd); prefetch(next_rxd);
if ((j = i + 1) == rx_ring->count) j = 0; j = i + 1;
if (j == rx_ring->count)
j = 0;
next2_buffer = &rx_ring->buffer_info[j]; next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer); prefetch(next2_buffer);
...@@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) ...@@ -1997,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done; goto rxdesc_done;
} }
/* code added for copybreak, this should improve ixgb_check_copybreak(netdev, buffer_info, length, &skb);
* performance for small packets with large amounts
* of reassembly being done in the stack */
if (length < copybreak) {
struct sk_buff *new_skb =
netdev_alloc_skb_ip_align(netdev, length);
if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
(skb->data -
NET_IP_ALIGN),
(length +
NET_IP_ALIGN));
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
}
}
/* end copybreak code */
/* Good Receive */ /* Good Receive */
skb_put(skb, length); skb_put(skb, length);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment