Commit ace4cede authored by David S. Miller's avatar David S. Miller

Merge branch 's390-qeth-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2019-08-23

please apply one more round of qeth patches. These implement support for
a bunch of TX-related features - namely TX NAPI, BQL and xmit_more.

Note that this includes two qdio patches which lay the necessary
groundwork, and have been acked by Vasily.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fbbdbc64 9549d70a
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define QDIO_MAX_QUEUES_PER_IRQ 4 #define QDIO_MAX_QUEUES_PER_IRQ 4
#define QDIO_MAX_BUFFERS_PER_Q 128 #define QDIO_MAX_BUFFERS_PER_Q 128
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK)
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define QDIO_SBAL_SIZE 256 #define QDIO_SBAL_SIZE 256
...@@ -359,7 +360,7 @@ struct qdio_initialize { ...@@ -359,7 +360,7 @@ struct qdio_initialize {
qdio_handler_t *output_handler; qdio_handler_t *output_handler;
void (**queue_start_poll_array) (struct ccw_device *, int, void (**queue_start_poll_array) (struct ccw_device *, int,
unsigned long); unsigned long);
int scan_threshold; unsigned int scan_threshold;
unsigned long int_parm; unsigned long int_parm;
struct qdio_buffer **input_sbal_addr_array; struct qdio_buffer **input_sbal_addr_array;
struct qdio_buffer **output_sbal_addr_array; struct qdio_buffer **output_sbal_addr_array;
...@@ -416,6 +417,9 @@ extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, ...@@ -416,6 +417,9 @@ extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
extern int qdio_start_irq(struct ccw_device *, int); extern int qdio_start_irq(struct ccw_device *, int);
extern int qdio_stop_irq(struct ccw_device *, int); extern int qdio_stop_irq(struct ccw_device *, int);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
bool is_input, unsigned int *bufnr,
unsigned int *error);
extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *); extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
......
...@@ -206,8 +206,6 @@ struct qdio_output_q { ...@@ -206,8 +206,6 @@ struct qdio_output_q {
struct qdio_outbuf_state *sbal_state; struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */ /* timer to check for more outbound work */
struct timer_list timer; struct timer_list timer;
/* used SBALs before tasklet schedule */
int scan_threshold;
}; };
/* /*
...@@ -295,6 +293,7 @@ struct qdio_irq { ...@@ -295,6 +293,7 @@ struct qdio_irq {
struct qdio_ssqd_desc ssqd_desc; struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
unsigned int scan_threshold; /* used SBALs before tasklet schedule */
int perf_stat_enabled; int perf_stat_enabled;
struct qdr *qdr; struct qdr *qdr;
......
...@@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) ...@@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
qperf_inc(q, outbound_handler); qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count); start, count);
if (q->u.out.use_cq)
qdio_handle_aobs(q, start, count);
} }
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
...@@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) ...@@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
count = get_outbound_buffer_frontier(q, start); count = get_outbound_buffer_frontier(q, start);
if (count) if (count) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
if (q->u.out.use_cq)
qdio_handle_aobs(q, start, count);
}
return count; return count;
} }
...@@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) ...@@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
struct qdio_q *out; struct qdio_q *out;
int i; int i;
if (!pci_out_supported(irq)) if (!pci_out_supported(irq) || !irq->scan_threshold)
return; return;
for_each_output_queue(irq, out, i) for_each_output_queue(irq, out, i)
...@@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) ...@@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
} }
} }
if (!pci_out_supported(irq_ptr)) if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
return; return;
for_each_output_queue(irq_ptr, q, i) { for_each_output_queue(irq_ptr, q, i) {
...@@ -1527,6 +1528,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, ...@@ -1527,6 +1528,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
static int handle_outbound(struct qdio_q *q, unsigned int callflags, static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count) int bufnr, int count)
{ {
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0; unsigned char state = 0;
int used, rc = 0; int used, rc = 0;
...@@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, ...@@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
rc = qdio_kick_outbound_q(q, 0); rc = qdio_kick_outbound_q(q, 0);
} }
/* Let drivers implement their own completion scanning: */
if (!scan_threshold)
return rc;
/* in case of SIGA errors we must process the error immediately */ /* in case of SIGA errors we must process the error immediately */
if (used >= q->u.out.scan_threshold || rc) if (used >= scan_threshold || rc)
qdio_tasklet_schedule(q); qdio_tasklet_schedule(q);
else else
/* free the SBALs in case of no further traffic */ /* free the SBALs in case of no further traffic */
...@@ -1655,6 +1661,44 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) ...@@ -1655,6 +1661,44 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
} }
EXPORT_SYMBOL(qdio_start_irq); EXPORT_SYMBOL(qdio_start_irq);
static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
unsigned int *error)
{
unsigned int start = q->first_to_check;
int count;
count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
qdio_outbound_q_moved(q, start);
if (count == 0)
return 0;
*bufnr = start;
*error = q->qdio_error;
/* for the next time */
q->first_to_check = add_buf(start, count);
q->qdio_error = 0;
return count;
}
int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
unsigned int *bufnr, unsigned int *error)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
if (!irq_ptr)
return -ENODEV;
q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
if (need_siga_sync(q))
qdio_siga_sync_q(q);
return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL_GPL(qdio_inspect_queue);
/** /**
* qdio_get_next_buffers - process input buffers * qdio_get_next_buffers - process input buffers
* @cdev: associated ccw_device for the qdio subchannel * @cdev: associated ccw_device for the qdio subchannel
...@@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, ...@@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
{ {
struct qdio_q *q; struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_irq *irq_ptr = cdev->private->qdio_data;
unsigned int start;
int count;
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
q = irq_ptr->input_qs[nr]; q = irq_ptr->input_qs[nr];
start = q->first_to_check;
/* /*
* Cannot rely on automatic sync after interrupt since queues may * Cannot rely on automatic sync after interrupt since queues may
...@@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, ...@@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
qdio_check_outbound_pci_queues(irq_ptr); qdio_check_outbound_pci_queues(irq_ptr);
count = qdio_inbound_q_moved(q, start);
if (count == 0)
return 0;
start = add_buf(start, count);
q->first_to_check = start;
/* Note: upper-layer MUST stop processing immediately here ... */ /* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO; return -EIO;
*bufnr = q->first_to_kick; return __qdio_inspect_queue(q, bufnr, error);
*error = q->qdio_error;
/* for the next time */
q->first_to_kick = add_buf(q->first_to_kick, count);
q->qdio_error = 0;
return count;
} }
EXPORT_SYMBOL(qdio_get_next_buffers); EXPORT_SYMBOL(qdio_get_next_buffers);
......
...@@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, ...@@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
q->is_input_q = 0; q->is_input_q = 0;
q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i); setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
...@@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) ...@@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs; irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->cdev = init_data->cdev; irq_ptr->cdev = init_data->cdev;
irq_ptr->scan_threshold = init_data->scan_threshold;
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data); setup_queues(irq_ptr, init_data);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/timer.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/if_inet6.h> #include <net/if_inet6.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <net/sch_generic.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <asm/debug.h> #include <asm/debug.h>
...@@ -376,6 +378,28 @@ enum qeth_header_ids { ...@@ -376,6 +378,28 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
struct qeth_hdr_layer2 *h2)
{
return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
h1->vlan_id == h2->vlan_id;
}
static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
struct qeth_hdr_layer3 *h2)
{
return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
h1->vlan_id == h2->vlan_id;
}
static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
struct qeth_hdr_layer3 *h2)
{
return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
ipv6_addr_equal(&h1->next_hop.ipv6_addr,
&h2->next_hop.ipv6_addr);
}
enum qeth_qdio_info_states { enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED, QETH_QDIO_ALLOCATED,
...@@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer { ...@@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer; struct qdio_buffer *buffer;
atomic_t state; atomic_t state;
int next_element_to_fill; int next_element_to_fill;
unsigned int bytes;
struct sk_buff_head skb_list; struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
...@@ -473,6 +498,8 @@ struct qeth_out_q_stats { ...@@ -473,6 +498,8 @@ struct qeth_out_q_stats {
u64 tso_bytes; u64 tso_bytes;
u64 packing_mode_switch; u64 packing_mode_switch;
u64 stopped; u64 stopped;
u64 completion_yield;
u64 completion_timer;
/* rtnl_link_stats64 */ /* rtnl_link_stats64 */
u64 tx_packets; u64 tx_packets;
...@@ -481,6 +508,8 @@ struct qeth_out_q_stats { ...@@ -481,6 +508,8 @@ struct qeth_out_q_stats {
u64 tx_dropped; u64 tx_dropped;
}; };
#define QETH_TX_TIMER_USECS 500
struct qeth_qdio_out_q { struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
...@@ -499,13 +528,36 @@ struct qeth_qdio_out_q { ...@@ -499,13 +528,36 @@ struct qeth_qdio_out_q {
atomic_t used_buffers; atomic_t used_buffers;
/* indicates whether PCI flag must be set (or if one is outstanding) */ /* indicates whether PCI flag must be set (or if one is outstanding) */
atomic_t set_pci_flags_count; atomic_t set_pci_flags_count;
struct napi_struct napi;
struct timer_list timer;
struct qeth_hdr *prev_hdr;
u8 bulk_start;
}; };
#define qeth_for_each_output_queue(card, q, i) \
for (i = 0; i < card->qdio.no_out_queues && \
(q = card->qdio.out_qs[i]); i++)
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
{
if (timer_pending(&queue->timer))
return;
mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
jiffies);
}
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
{ {
return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
} }
static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
{
return atomic_read(&queue->used_buffers) == 0;
}
struct qeth_qdio_info { struct qeth_qdio_info {
atomic_t state; atomic_t state;
/* input */ /* input */
......
This diff is collapsed.
...@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = { ...@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped), QETH_TXQ_STAT("Queue stopped", stopped),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
}; };
static const struct qeth_stats card_stats[] = { static const struct qeth_stats card_stats[] = {
......
...@@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, ...@@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
} else { } else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
QETH_TXQ_STAT_INC(queue, skbs_csum);
}
} }
/* set byte byte 3 to casting flags */ /* set byte byte 3 to casting flags */
...@@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, ...@@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
u16 txq = skb_get_queue_mapping(skb); u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue; struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc; int rc;
if (!skb_is_gso(skb))
qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card)) if (IS_IQD(card))
txq = qeth_iqd_translate_txq(dev, txq); txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq]; queue = card->qdio.out_qs[txq];
...@@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, ...@@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
qeth_l2_fill_header); qeth_l2_fill_header);
if (!rc) { if (!rc)
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
QETH_TXQ_STAT_INC(queue, tx_dropped); QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
......
...@@ -1957,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, ...@@ -1957,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
/* some HW requires combined L3+L4 csum offload: */ /* some HW requires combined L3+L4 csum offload: */
if (ipv == 4) if (ipv == 4)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
QETH_TXQ_STAT_INC(queue, skbs_csum);
} }
} }
...@@ -2044,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2044,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
u16 txq = skb_get_queue_mapping(skb); u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb); int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue; struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc; int rc;
if (!skb_is_gso(skb))
qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card)) { if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
...@@ -2069,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2069,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
else else
rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
if (!rc) { if (!rc)
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
tx_drop: tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped); QETH_TXQ_STAT_INC(queue, tx_dropped);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment