Commit 6ed272b2 authored by David S. Miller's avatar David S. Miller

Merge branch 's390-qeth-next'

Julian Wiedmann says:

====================
s390/net: more updates for 4.14

please apply another batch of qeth patches for net-next.
This reworks the xmit path for L2 OSAs to use skb_cow_head() instead of
skb_realloc_headroom().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ae2b27b8 0d6f02d3
...@@ -949,9 +949,10 @@ int qeth_get_elements_for_frags(struct sk_buff *); ...@@ -949,9 +949,10 @@ int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *card, int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset, struct qeth_hdr *hdr, unsigned int offset,
int hd_len); unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *, struct qeth_hdr *, int); struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int hd_len, unsigned int offset, int elements);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_core_get_sset_count(struct net_device *, int); int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *, void qeth_core_get_ethtool_stats(struct net_device *,
...@@ -984,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t); ...@@ -984,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *); int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
int qeth_vm_request_mac(struct qeth_card *card); int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
/* exports for OSN */ /* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int); int qeth_osn_assist(struct net_device *, void *, int);
......
...@@ -3890,6 +3890,34 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) ...@@ -3890,6 +3890,34 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
} }
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
/**
* qeth_push_hdr() - push a qeth_hdr onto an skb.
* @skb: skb that the qeth_hdr should be pushed onto.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
* @len: length of the hdr that needs to be pushed on.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
* the cache instead and 0 is returned.
* Error to create the hdr is indicated by returning with < 0.
*/
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
{
if (skb_headroom(skb) >= len &&
qeth_get_elements_for_range((addr_t)skb->data - len,
(addr_t)skb->data) == 1) {
*hdr = skb_push(skb, len);
return len;
}
/* fall back */
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!*hdr)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_push_hdr);
static void __qeth_fill_buffer(struct sk_buff *skb, static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf, struct qeth_qdio_out_buffer *buf,
bool is_first_elem, unsigned int offset) bool is_first_elem, unsigned int offset)
...@@ -3953,43 +3981,38 @@ static void __qeth_fill_buffer(struct sk_buff *skb, ...@@ -3953,43 +3981,38 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
buf->next_element_to_fill = element; buf->next_element_to_fill = element;
} }
/**
* qeth_fill_buffer() - map skb into an output buffer
* @queue: QDIO queue to submit the buffer on
* @buf: buffer to transport the skb
* @skb: skb to map into the buffer
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
*/
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr, struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, int hd_len) unsigned int offset, unsigned int hd_len)
{ {
struct qdio_buffer *buffer; struct qdio_buffer *buffer = buf->buffer;
int flush_cnt = 0, hdr_len;
bool is_first_elem = true; bool is_first_elem = true;
int flush_cnt = 0;
buffer = buf->buffer;
refcount_inc(&skb->users); refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb); skb_queue_tail(&buf->skb_list, skb);
if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { /* build dedicated header element */
int element = buf->next_element_to_fill; if (hd_len) {
is_first_elem = false;
hdr_len = sizeof(struct qeth_hdr_tso) +
((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb_pull(skb, hdr_len);
}
/* IQD */
if (offset > 0) {
int element = buf->next_element_to_fill; int element = buf->next_element_to_fill;
is_first_elem = false; is_first_elem = false;
buffer->element[element].addr = hdr; buffer->element[element].addr = hdr;
buffer->element[element].length = sizeof(struct qeth_hdr) + buffer->element[element].length = hd_len;
hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->is_header[element] = 1; /* remember to free cache-allocated qeth_hdr: */
buf->is_header[element] = ((void *)hdr != skb->data);
buf->next_element_to_fill++; buf->next_element_to_fill++;
} }
...@@ -4020,7 +4043,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -4020,7 +4043,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
int qeth_do_send_packet_fast(struct qeth_card *card, int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset, struct qeth_hdr *hdr, unsigned int offset,
int hd_len) unsigned int hd_len)
{ {
struct qeth_qdio_out_buffer *buffer; struct qeth_qdio_out_buffer *buffer;
int index; int index;
...@@ -4050,8 +4073,9 @@ int qeth_do_send_packet_fast(struct qeth_card *card, ...@@ -4050,8 +4073,9 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr, struct sk_buff *skb, struct qeth_hdr *hdr,
int elements_needed) unsigned int offset, unsigned int hd_len,
int elements_needed)
{ {
struct qeth_qdio_out_buffer *buffer; struct qeth_qdio_out_buffer *buffer;
int start_index; int start_index;
...@@ -4100,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -4100,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
} }
} }
} }
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, 0); tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q; QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp; flush_count += tmp;
......
...@@ -259,13 +259,14 @@ static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, ...@@ -259,13 +259,14 @@ static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
card->perf_stats.tx_csum++; card->perf_stats.tx_csum++;
} }
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
struct sk_buff *skb, int cast_type) int cast_type, unsigned int data_len)
{ {
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
memset(hdr, 0, sizeof(struct qeth_hdr)); memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
hdr->hdr.l2.pkt_length = data_len;
/* set byte byte 3 to casting flags */ /* set byte byte 3 to casting flags */
if (cast_type == RTN_MULTICAST) if (cast_type == RTN_MULTICAST)
...@@ -275,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, ...@@ -275,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
else else
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr);
/* VSWITCH relies on the VLAN /* VSWITCH relies on the VLAN
* information to be present in * information to be present in
* the QDIO header */ * the QDIO header */
...@@ -676,143 +676,172 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) ...@@ -676,143 +676,172 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card); qeth_promisc_to_bridge(card);
} }
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
struct net_device *dev) struct qeth_qdio_out_q *queue, int cast_type)
{ {
unsigned int data_offset = ETH_HLEN;
struct qeth_hdr *hdr;
int rc; int rc;
struct qeth_hdr *hdr = NULL;
int elements = 0;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
unsigned int nr_frags;
if (card->qdio.do_prio_queueing || (cast_type && hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
card->info.is_multicast_different)) if (!hdr)
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, return -ENOMEM;
qeth_get_ip_version(skb), cast_type)]; qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
else skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
queue = card->qdio.out_qs[card->qdio.default_out_queue]; data_offset);
if ((card->state != CARD_STATE_UP) || !card->lan_online) { if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
card->stats.tx_carrier_errors++; rc = -E2BIG;
goto tx_drop; goto out;
} }
rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset,
sizeof(*hdr) + data_offset);
out:
if (rc)
kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
if ((card->info.type == QETH_CARD_TYPE_OSN) && static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
(skb->protocol == htons(ETH_P_IPV6))) struct qeth_qdio_out_q *queue, int cast_type)
goto tx_drop; {
int push_len = sizeof(struct qeth_hdr);
if (card->options.performance_stats) { unsigned int elements, nr_frags;
card->perf_stats.outbound_cnt++; unsigned int hdr_elements = 0;
card->perf_stats.outbound_start_time = qeth_get_micros(); struct qeth_hdr *hdr = NULL;
} unsigned int hd_len = 0;
netif_stop_queue(dev); int rc;
/* fix hardware limitation: as long as we do not have sbal /* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists * chaining we can not send long frag lists
*/ */
if ((card->info.type != QETH_CARD_TYPE_IQD) && if (!qeth_get_elements_no(card, skb, 0, 0)) {
!qeth_get_elements_no(card, new_skb, 0, 0)) { rc = skb_linearize(skb);
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) { if (card->options.performance_stats) {
if (lin_rc) if (rc)
card->perf_stats.tx_linfail++; card->perf_stats.tx_linfail++;
else else
card->perf_stats.tx_lin++; card->perf_stats.tx_lin++;
} }
if (lin_rc) if (rc)
goto tx_drop; return rc;
}
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
else {
if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
hd_len = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache,
GFP_ATOMIC);
if (!hdr)
goto tx_drop;
elements_needed++;
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
skb_copy_from_linear_data(new_skb,
((char *)hdr) + sizeof(*hdr),
ETH_HLEN);
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb,
sizeof(struct qeth_hdr));
if (!new_skb)
goto tx_drop;
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
if (new_skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, new_skb);
}
} }
nr_frags = skb_shinfo(skb)->nr_frags;
elements = qeth_get_elements_no(card, new_skb, elements_needed, rc = skb_cow_head(skb, push_len);
(data_offset > 0) ? data_offset : 0); if (rc)
return rc;
push_len = qeth_push_hdr(skb, &hdr, push_len);
if (push_len < 0)
return push_len;
if (!push_len) {
/* hdr was allocated from cache */
hd_len = sizeof(*hdr);
hdr_elements = 1;
}
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, skb);
elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
if (!elements) { if (!elements) {
if (data_offset >= 0) rc = -E2BIG;
kmem_cache_free(qeth_core_header_cache, hdr); goto out;
goto tx_drop;
} }
elements += hdr_elements;
if (card->info.type != QETH_CARD_TYPE_IQD) { /* TODO: remove the skb_orphan() once TX completion is fast enough */
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, skb_orphan(skb);
sizeof(struct qeth_hdr_layer2))) rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
goto tx_drop; out:
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, hd_len);
if (!rc) { if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats && nr_frags) { if (card->options.performance_stats && nr_frags) {
card->perf_stats.sg_skbs_sent++; card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */ /* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1; card->perf_stats.sg_frags_sent += nr_frags + 1;
} }
if (new_skb != skb)
dev_kfree_skb_any(skb);
rc = NETDEV_TX_OK;
} else { } else {
if (data_offset >= 0) if (hd_len)
kmem_cache_free(qeth_core_header_cache, hdr); kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY)
/* roll back to ETH header */
skb_pull(skb, push_len);
}
return rc;
}
if (rc == -EBUSY) { static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
if (new_skb != skb) struct qeth_qdio_out_q *queue)
dev_kfree_skb_any(new_skb); {
return NETDEV_TX_BUSY; unsigned int elements;
} else struct qeth_hdr *hdr;
goto tx_drop;
if (skb->protocol == htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
hdr = (struct qeth_hdr *)skb->data;
elements = qeth_get_elements_no(card, skb, 0, 0);
if (!elements)
return -E2BIG;
if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
return -EINVAL;
return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
}
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
qeth_get_ip_version(skb), cast_type)];
else
queue = card->qdio.out_qs[card->qdio.default_out_queue];
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
} }
netif_wake_queue(dev); if (card->options.performance_stats) {
if (card->options.performance_stats) card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_time += qeth_get_micros() - card->perf_stats.outbound_start_time = qeth_get_micros();
card->perf_stats.outbound_start_time; }
return rc; netif_stop_queue(dev);
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
rc = qeth_l2_xmit_osn(card, skb, queue);
break;
case QETH_CARD_TYPE_IQD:
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
}
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats)
card->perf_stats.outbound_time += qeth_get_micros() -
card->perf_stats.outbound_start_time;
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
tx_drop: tx_drop:
card->stats.tx_dropped++; card->stats.tx_dropped++;
card->stats.tx_errors++; card->stats.tx_errors++;
if ((new_skb != skb) && new_skb)
dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
netif_wake_queue(dev); netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -990,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) ...@@ -990,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_RXCSUM; card->dev->vlan_features |= NETIF_F_RXCSUM;
} }
} }
if (card->info.type != QETH_CARD_TYPE_OSN &&
card->info.type != QETH_CARD_TYPE_IQD) {
card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
}
card->info.broadcast_capable = 1; card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card); qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
......
...@@ -2637,6 +2637,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2637,6 +2637,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_get_priority_queue(card, skb, ipv, cast_type) : qeth_get_priority_queue(card, skb, ipv, cast_type) :
card->qdio.default_out_queue]; card->qdio.default_out_queue];
int tx_bytes = skb->len; int tx_bytes = skb->len;
unsigned int hd_len = 0;
bool use_tso; bool use_tso;
int data_offset = -1; int data_offset = -1;
unsigned int nr_frags; unsigned int nr_frags;
...@@ -2669,6 +2670,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2669,6 +2670,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->info.type == QETH_CARD_TYPE_IQD) { if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb; new_skb = skb;
data_offset = ETH_HLEN; data_offset = ETH_HLEN;
hd_len = sizeof(*hdr);
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr) if (!hdr)
goto tx_drop; goto tx_drop;
...@@ -2756,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2756,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->info.type != QETH_CARD_TYPE_IQD) { if (card->info.type != QETH_CARD_TYPE_IQD) {
int len; int len;
if (use_tso) if (use_tso) {
len = ((unsigned long)tcp_hdr(new_skb) + hd_len = sizeof(struct qeth_hdr_tso) +
tcp_hdrlen(new_skb)) - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
(unsigned long)new_skb->data; len = hd_len;
else } else {
len = sizeof(struct qeth_hdr_layer3); len = sizeof(struct qeth_hdr_layer3);
}
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop; goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
hd_len, elements);
} else } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, 0); data_offset, hd_len);
if (!rc) { if (!rc) {
card->stats.tx_packets++; card->stats.tx_packets++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment