Commit 5f89eca5 authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller

s390/qeth: speed up L2 IQD xmit

Modify the L2 OSA xmit path so that it also supports L2 IQD devices
(in particular, their HW header requirements). This allows IQD devices
to advertise NETIF_F_SG support, and eliminates the allocation overhead
for the HW header.
Signed-off-by: default avatarJulian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a7c2f4a3
...@@ -5731,6 +5731,13 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) ...@@ -5731,6 +5731,13 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->mtu = 0; dev->mtu = 0;
SET_NETDEV_DEV(dev, &card->gdev->dev); SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev); netif_carrier_off(dev);
if (!IS_OSN(card)) {
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
}
return dev; return dev;
} }
......
...@@ -641,37 +641,13 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) ...@@ -641,37 +641,13 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card); qeth_promisc_to_bridge(card);
} }
static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int cast_type) struct qeth_qdio_out_q *queue, int cast_type, int ipv)
{
unsigned int data_offset = ETH_HLEN;
struct qeth_hdr *hdr;
int rc;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
return -ENOMEM;
qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
data_offset);
if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
rc = -E2BIG;
goto out;
}
rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
sizeof(*hdr) + data_offset);
out:
if (rc)
kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int cast_type,
int ipv)
{ {
const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
unsigned int frame_len = skb->len;
unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL; struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0; unsigned int hd_len = 0;
unsigned int elements; unsigned int elements;
...@@ -682,15 +658,16 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, ...@@ -682,15 +658,16 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
if (rc) if (rc)
return rc; return rc;
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
&elements); &elements);
if (push_len < 0) if (push_len < 0)
return push_len; return push_len;
if (!push_len) { if (!push_len) {
/* hdr was allocated from cache */ /* HW header needs its own buffer element. */
hd_len = sizeof(*hdr); hd_len = hw_hdr_len + proto_len;
data_offset = proto_len;
} }
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats) if (card->options.performance_stats)
...@@ -698,9 +675,15 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, ...@@ -698,9 +675,15 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
} }
is_sg = skb_is_nonlinear(skb); is_sg = skb_is_nonlinear(skb);
/* TODO: remove the skb_orphan() once TX completion is fast enough */ if (IS_IQD(card)) {
skb_orphan(skb); rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); hd_len);
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
hd_len, elements);
}
if (!rc) { if (!rc) {
if (card->options.performance_stats) { if (card->options.performance_stats) {
...@@ -759,16 +742,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, ...@@ -759,16 +742,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
} }
netif_stop_queue(dev); netif_stop_queue(dev);
switch (card->info.type) { if (IS_OSN(card))
case QETH_CARD_TYPE_OSN:
rc = qeth_l2_xmit_osn(card, skb, queue); rc = qeth_l2_xmit_osn(card, skb, queue);
break; else
case QETH_CARD_TYPE_IQD: rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
}
if (!rc) { if (!rc) {
card->stats.tx_packets++; card->stats.tx_packets++;
...@@ -927,6 +904,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) ...@@ -927,6 +904,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->flags |= IFF_NOARP; card->dev->flags |= IFF_NOARP;
} else { } else {
card->dev->ethtool_ops = &qeth_l2_ethtool_ops; card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
} }
if (card->info.type == QETH_CARD_TYPE_OSM) if (card->info.type == QETH_CARD_TYPE_OSM)
...@@ -934,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) ...@@ -934,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
else else
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type != QETH_CARD_TYPE_OSN &&
card->info.type != QETH_CARD_TYPE_IQD) {
card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->hw_features |= NETIF_F_SG;
card->dev->vlan_features |= NETIF_F_SG;
}
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG; card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */ /* OSA 3S and earlier has no RX/TX support */
......
...@@ -2556,13 +2556,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) ...@@ -2556,13 +2556,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV; return -ENODEV;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->hw_features |= NETIF_F_SG;
card->dev->vlan_features |= NETIF_F_SG;
netif_keep_dst(card->dev); netif_keep_dst(card->dev);
if (card->dev->hw_features & NETIF_F_TSO) if (card->dev->hw_features & NETIF_F_TSO)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment