Commit cea0aa9c authored by David S. Miller's avatar David S. Miller

Merge branch 's390-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2019-04-17

please apply some additional qeth patches to net-next. This patchset
converts the driver to use the kernel's multiqueue model.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a6f7892 54a50941
...@@ -219,6 +219,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, ...@@ -219,6 +219,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* QDIO queue and buffer handling */ /* QDIO queue and buffer handling */
/*****************************************************************************/ /*****************************************************************************/
#define QETH_MAX_QUEUES 4 #define QETH_MAX_QUEUES 4
#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
#define QETH_IQD_MCAST_TXQ 0
#define QETH_IQD_MIN_UCAST_TXQ 1
#define QETH_IN_BUF_SIZE_DEFAULT 65536 #define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64 #define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128 #define QETH_IN_BUF_COUNT_HSDEFAULT 128
...@@ -464,7 +467,6 @@ struct qeth_card_stats { ...@@ -464,7 +467,6 @@ struct qeth_card_stats {
u64 rx_errors; u64 rx_errors;
u64 rx_dropped; u64 rx_dropped;
u64 rx_multicast; u64 rx_multicast;
u64 tx_errors;
}; };
struct qeth_out_q_stats { struct qeth_out_q_stats {
...@@ -479,6 +481,7 @@ struct qeth_out_q_stats { ...@@ -479,6 +481,7 @@ struct qeth_out_q_stats {
u64 skbs_linearized_fail; u64 skbs_linearized_fail;
u64 tso_bytes; u64 tso_bytes;
u64 packing_mode_switch; u64 packing_mode_switch;
u64 stopped;
/* rtnl_link_stats64 */ /* rtnl_link_stats64 */
u64 tx_packets; u64 tx_packets;
...@@ -509,6 +512,11 @@ struct qeth_qdio_out_q { ...@@ -509,6 +512,11 @@ struct qeth_qdio_out_q {
atomic_t set_pci_flags_count; atomic_t set_pci_flags_count;
}; };
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
{
return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
}
struct qeth_qdio_info { struct qeth_qdio_info {
atomic_t state; atomic_t state;
/* input */ /* input */
...@@ -836,6 +844,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev) ...@@ -836,6 +844,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
return dev->netdev_ops != NULL; return dev->netdev_ops != NULL;
} }
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
if (txq == QETH_IQD_MCAST_TXQ)
return dev->num_tx_queues - 1;
if (txq == dev->num_tx_queues - 1)
return QETH_IQD_MCAST_TXQ;
return txq;
}
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements) unsigned int elements)
{ {
...@@ -931,18 +948,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, ...@@ -931,18 +948,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6); data, QETH_PROT_IPV6);
} }
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
int ipv);
static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
struct sk_buff *skb,
int ipv, int cast_type)
{
if (IS_IQD(card) && cast_type != RTN_UNICAST)
return card->qdio.out_qs[card->qdio.no_out_queues - 1];
if (!card->qdio.do_prio_queueing)
return card->qdio.out_qs[card->qdio.default_out_queue];
return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
}
extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline; extern struct qeth_discipline qeth_l3_discipline;
...@@ -988,7 +994,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *); ...@@ -988,7 +994,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int); int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *); void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *); void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *); void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *); int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *); void qeth_tx_timeout(struct net_device *);
...@@ -1023,6 +1029,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, ...@@ -1023,6 +1029,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev, struct net_device *dev,
netdev_features_t features); netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev); int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev); int qeth_stop(struct net_device *dev);
......
...@@ -67,7 +67,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, ...@@ -67,7 +67,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_free_buffer_pool(struct qeth_card *); static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_buffers(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification); enum iucv_tx_notify notification);
...@@ -1178,7 +1178,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, ...@@ -1178,7 +1178,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
} }
static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{ {
int j; int j;
...@@ -1194,19 +1194,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) ...@@ -1194,19 +1194,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
} }
} }
void qeth_clear_qdio_buffers(struct qeth_card *card) void qeth_drain_output_queues(struct qeth_card *card)
{ {
int i; int i;
QETH_CARD_TEXT(card, 2, "clearqdbf"); QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */ /* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) { for (i = 0; i < card->qdio.no_out_queues; ++i) {
if (card->qdio.out_qs[i]) { if (card->qdio.out_qs[i])
qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
} }
} }
EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static void qeth_free_buffer_pool(struct qeth_card *card) static void qeth_free_buffer_pool(struct qeth_card *card)
{ {
...@@ -1276,30 +1275,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) ...@@ -1276,30 +1275,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0; return 0;
} }
static void qeth_set_single_write_queues(struct qeth_card *card) static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{ {
if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && unsigned int count = single ? 1 : card->dev->num_tx_queues;
(card->qdio.no_out_queues == 4))
qeth_free_qdio_buffers(card);
card->qdio.no_out_queues = 1; rtnl_lock();
if (card->qdio.default_out_queue != 0) netif_set_real_num_tx_queues(card->dev, count);
dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); rtnl_unlock();
card->qdio.default_out_queue = 0; if (card->qdio.no_out_queues == count)
} return;
static void qeth_set_multiple_write_queues(struct qeth_card *card) if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
{ qeth_free_qdio_queues(card);
if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
(card->qdio.no_out_queues == 1)) { if (count == 1)
qeth_free_qdio_buffers(card); dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
card->qdio.default_out_queue = 2;
} card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = 4; card->qdio.no_out_queues = count;
} }
static void qeth_update_from_chp_desc(struct qeth_card *card) static int qeth_update_from_chp_desc(struct qeth_card *card)
{ {
struct ccw_device *ccwdev; struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc; struct channel_path_desc_fmt0 *chp_dsc;
...@@ -1309,21 +1306,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) ...@@ -1309,21 +1306,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
ccwdev = card->data.ccwdev; ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
if (!chp_dsc) if (!chp_dsc)
goto out; return -ENOMEM;
card->info.func_level = 0x4100 + chp_dsc->desc; card->info.func_level = 0x4100 + chp_dsc->desc;
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */ /* CHPP field bit 6 == 1 -> single queue */
if ((chp_dsc->chpp & 0x02) == 0x02) qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
qeth_set_single_write_queues(card);
else
qeth_set_multiple_write_queues(card);
out:
kfree(chp_dsc); kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
return 0;
} }
static void qeth_init_qdio_info(struct qeth_card *card) static void qeth_init_qdio_info(struct qeth_card *card)
...@@ -1332,7 +1326,6 @@ static void qeth_init_qdio_info(struct qeth_card *card) ...@@ -1332,7 +1326,6 @@ static void qeth_init_qdio_info(struct qeth_card *card)
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = QETH_MAX_QUEUES;
/* inbound */ /* inbound */
card->qdio.no_in_queues = 1; card->qdio.no_in_queues = 1;
...@@ -2177,7 +2170,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) ...@@ -2177,7 +2170,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
/* adjust RX buffer size to new max MTU: */ /* adjust RX buffer size to new max MTU: */
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
if (dev->max_mtu && dev->max_mtu != max_mtu) if (dev->max_mtu && dev->max_mtu != max_mtu)
qeth_free_qdio_buffers(card); qeth_free_qdio_queues(card);
} else { } else {
if (dev->mtu) if (dev->mtu)
new_mtu = dev->mtu; new_mtu = dev->mtu;
...@@ -2350,12 +2343,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q) ...@@ -2350,12 +2343,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
if (!q) if (!q)
return; return;
qeth_clear_outq_buffers(q, 1); qeth_drain_output_queue(q, true);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q); kfree(q);
} }
static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{ {
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
...@@ -2369,7 +2362,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) ...@@ -2369,7 +2362,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
return q; return q;
} }
static int qeth_alloc_qdio_buffers(struct qeth_card *card) static int qeth_alloc_qdio_queues(struct qeth_card *card)
{ {
int i, j; int i, j;
...@@ -2390,7 +2383,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) ...@@ -2390,7 +2383,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
/* outbound */ /* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) { for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); card->qdio.out_qs[i] = qeth_alloc_output_queue();
if (!card->qdio.out_qs[i]) if (!card->qdio.out_qs[i])
goto out_freeoutq; goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
...@@ -2431,7 +2424,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) ...@@ -2431,7 +2424,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
return -ENOMEM; return -ENOMEM;
} }
static void qeth_free_qdio_buffers(struct qeth_card *card) static void qeth_free_qdio_queues(struct qeth_card *card)
{ {
int i, j; int i, j;
...@@ -2538,7 +2531,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) ...@@ -2538,7 +2531,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio; goto out_qdio;
} }
rc = qeth_alloc_qdio_buffers(card); rc = qeth_alloc_qdio_queues(card);
if (rc) { if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio; goto out_qdio;
...@@ -2546,7 +2539,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) ...@@ -2546,7 +2539,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
rc = qeth_qdio_establish(card); rc = qeth_qdio_establish(card);
if (rc) { if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
qeth_free_qdio_buffers(card); qeth_free_qdio_queues(card);
goto out_qdio; goto out_qdio;
} }
rc = qeth_qdio_activate(card); rc = qeth_qdio_activate(card);
...@@ -3371,11 +3364,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, ...@@ -3371,11 +3364,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
} }
QETH_TXQ_STAT_ADD(queue, bufs, count); QETH_TXQ_STAT_ADD(queue, bufs, count);
netif_trans_update(queue->card->dev);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT; qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count)) if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT; qdio_flags |= QDIO_FLAG_PCI_OUT;
atomic_add(count, &queue->used_buffers);
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count); queue->queue_no, index, count);
if (rc) { if (rc) {
...@@ -3415,7 +3406,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) ...@@ -3415,7 +3406,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
* do_send_packet. So, we check if there is a * do_send_packet. So, we check if there is a
* packing buffer to be flushed here. * packing buffer to be flushed here.
*/ */
netif_stop_queue(queue->card->dev);
index = queue->next_buf_to_fill; index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack; q_was_packing = queue->do_pack;
/* queue->do_pack may change */ /* queue->do_pack may change */
...@@ -3460,7 +3450,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) ...@@ -3460,7 +3450,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out; goto out;
} }
qeth_free_qdio_buffers(card); qeth_free_qdio_queues(card);
card->options.cq = cq; card->options.cq = cq;
rc = 0; rc = 0;
} }
...@@ -3486,7 +3476,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, ...@@ -3486,7 +3476,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) { if (qdio_err) {
netif_stop_queue(card->dev); netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
return; return;
} }
...@@ -3542,12 +3532,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, ...@@ -3542,12 +3532,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer; struct qeth_qdio_out_buffer *buffer;
struct net_device *dev = card->dev;
struct netdev_queue *txq;
int i; int i;
QETH_CARD_TEXT(card, 6, "qdouhdl"); QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) { if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond"); QETH_CARD_TEXT(card, 2, "achkcond");
netif_stop_queue(card->dev); netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
return; return;
} }
...@@ -3596,30 +3588,29 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, ...@@ -3596,30 +3588,29 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
if (card->info.type != QETH_CARD_TYPE_IQD) if (card->info.type != QETH_CARD_TYPE_IQD)
qeth_check_outbound_queue(queue); qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev); if (IS_IQD(card))
} __queue = qeth_iqd_translate_txq(dev, __queue);
txq = netdev_get_tx_queue(dev, __queue);
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ /* xmit may have observed the full-condition, but not yet stopped the
static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) * txq. In which case the code below won't trigger. So before returning,
{ * xmit will re-check the txq's fill level and wake it up if needed.
if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) */
return 2; if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
return queue_num; netif_tx_wake_queue(txq);
} }
/** /**
* Note: Function assumes that we have 4 outbound queues. * Note: Function assumes that we have 4 outbound queues.
*/ */
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
int ipv)
{ {
__be16 *tci; struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos; u8 tos;
switch (card->qdio.do_prio_queueing) { switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC: case QETH_PRIO_Q_ING_PREC:
switch (ipv) { switch (qeth_get_ip_version(skb)) {
case 4: case 4:
tos = ipv4_get_dsfield(ip_hdr(skb)); tos = ipv4_get_dsfield(ip_hdr(skb));
break; break;
...@@ -3630,9 +3621,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, ...@@ -3630,9 +3621,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue; return card->qdio.default_out_queue;
} }
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); return ~tos >> 6 & 3;
if (tos & IPTOS_MINCOST) if (tos & IPTOS_MINCOST)
return qeth_cut_iqd_prio(card, 3); return 3;
if (tos & IPTOS_RELIABILITY) if (tos & IPTOS_RELIABILITY)
return 2; return 2;
if (tos & IPTOS_THROUGHPUT) if (tos & IPTOS_THROUGHPUT)
...@@ -3643,12 +3634,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, ...@@ -3643,12 +3634,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB: case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5) if (skb->priority > 5)
return 0; return 0;
return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); return ~skb->priority >> 1 & 3;
case QETH_PRIO_Q_ING_VLAN: case QETH_PRIO_Q_ING_VLAN:
tci = &((struct ethhdr *)skb->data)->h_proto; if (veth->h_vlan_proto == htons(ETH_P_8021Q))
if (be16_to_cpu(*tci) == ETH_P_8021Q) return ~ntohs(veth->h_vlan_TCI) >>
return qeth_cut_iqd_prio(card, (VLAN_PRIO_SHIFT + 1) & 3;
~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
break; break;
default: default:
break; break;
...@@ -3860,11 +3850,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, ...@@ -3860,11 +3850,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
* from qeth_core_header_cache. * from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset * @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size * @hd_len: if > 0, build a dedicated header element of this size
* flush: Prepare the buffer to be flushed, regardless of its fill level.
*/ */
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr, struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len) unsigned int offset, unsigned int hd_len,
bool flush)
{ {
struct qdio_buffer *buffer = buf->buffer; struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true; bool is_first_elem = true;
...@@ -3893,7 +3885,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -3893,7 +3885,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
QETH_TXQ_STAT_INC(queue, skbs_pack); QETH_TXQ_STAT_INC(queue, skbs_pack);
/* If the buffer still has free elements, keep using it. */ /* If the buffer still has free elements, keep using it. */
if (buf->next_element_to_fill < if (!flush && buf->next_element_to_fill <
QETH_MAX_BUFFER_ELEMENTS(queue->card)) QETH_MAX_BUFFER_ELEMENTS(queue->card))
return 0; return 0;
} }
...@@ -3911,15 +3903,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, ...@@ -3911,15 +3903,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
{ {
int index = queue->next_buf_to_fill; int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
struct netdev_queue *txq;
bool stopped = false;
/* /* Just a sanity check, the wake/stop logic should ensure that we always
* check if buffer is empty to make sure that we do not 'overtake' * get a free buffer.
* ourselves and try to fill a buffer that is already primed
*/ */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY; return -EBUSY;
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
/* If a TX completion happens right _here_ and misses to wake
* the txq, then our re-check below will catch the race.
*/
QETH_TXQ_STAT_INC(queue, stopped);
netif_tx_stop_queue(txq);
stopped = true;
}
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
qeth_flush_buffers(queue, index, 1); qeth_flush_buffers(queue, index, 1);
if (stopped && !qeth_out_queue_is_full(queue))
netif_tx_start_queue(txq);
return 0; return 0;
} }
...@@ -3929,6 +3937,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3929,6 +3937,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
int elements_needed) int elements_needed)
{ {
struct qeth_qdio_out_buffer *buffer; struct qeth_qdio_out_buffer *buffer;
struct netdev_queue *txq;
bool stopped = false;
int start_index; int start_index;
int flush_count = 0; int flush_count = 0;
int do_pack = 0; int do_pack = 0;
...@@ -3940,14 +3950,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3940,14 +3950,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill; start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill]; buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake' /* Just a sanity check, the wake/stop logic should ensure that we always
* ourselves and try to fill a buffer that is already primed * get a free buffer.
*/ */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY; return -EBUSY;
} }
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* check if we need to switch packing state of this queue */ /* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue); qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) { if (queue->do_pack) {
...@@ -3962,8 +3975,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3962,8 +3975,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
(queue->next_buf_to_fill + 1) % (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q; QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[queue->next_buf_to_fill]; buffer = queue->bufs[queue->next_buf_to_fill];
/* we did a step forward, so check buffer state
* again */ /* We stepped forward, so sanity-check again: */
if (atomic_read(&buffer->state) != if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) { QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index, qeth_flush_buffers(queue, start_index,
...@@ -3976,8 +3989,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3976,8 +3989,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
} }
} }
flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, if (buffer->next_element_to_fill == 0 &&
hd_len); atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
/* If a TX completion happens right _here_ and misses to wake
* the txq, then our re-check below will catch the race.
*/
QETH_TXQ_STAT_INC(queue, stopped);
netif_tx_stop_queue(txq);
stopped = true;
}
flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
stopped);
if (flush_count) if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count); qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count)) else if (!atomic_read(&queue->set_pci_flags_count))
...@@ -4008,6 +4031,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -4008,6 +4031,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (do_pack) if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
if (stopped && !qeth_out_queue_is_full(queue))
netif_tx_start_queue(txq);
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(qeth_do_send_packet); EXPORT_SYMBOL_GPL(qeth_do_send_packet);
...@@ -4094,9 +4119,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, ...@@ -4094,9 +4119,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else { } else {
if (!push_len) if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr); kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY)
/* roll back to ETH header */
skb_pull(skb, push_len);
} }
return rc; return rc;
} }
...@@ -4341,7 +4363,6 @@ void qeth_tx_timeout(struct net_device *dev) ...@@ -4341,7 +4363,6 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv; card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo"); QETH_CARD_TEXT(card, 4, "txtimeo");
QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
} }
EXPORT_SYMBOL_GPL(qeth_tx_timeout); EXPORT_SYMBOL_GPL(qeth_tx_timeout);
...@@ -4930,7 +4951,7 @@ static void qeth_core_free_card(struct qeth_card *card) ...@@ -4930,7 +4951,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->write); qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data); qeth_clean_channel(&card->data);
destroy_workqueue(card->event_wq); destroy_workqueue(card->event_wq);
qeth_free_qdio_buffers(card); qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level); unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL); dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card); kfree(card);
...@@ -4979,7 +5000,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) ...@@ -4979,7 +5000,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0); atomic_set(&card->force_alloc_skb, 0);
qeth_update_from_chp_desc(card); rc = qeth_update_from_chp_desc(card);
if (rc)
return rc;
retry: retry:
if (retries < 3) if (retries < 3)
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
...@@ -5557,13 +5580,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) ...@@ -5557,13 +5580,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
switch (card->info.type) { switch (card->info.type) {
case QETH_CARD_TYPE_IQD: case QETH_CARD_TYPE_IQD:
dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
ether_setup, QETH_MAX_QUEUES, 1);
break;
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(0);
break; break;
case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSN:
dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
break; break;
default: default:
dev = alloc_etherdev(0); dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
} }
if (!dev) if (!dev)
...@@ -5585,9 +5612,11 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) ...@@ -5585,9 +5612,11 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG; dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card)) if (IS_IQD(card)) {
netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG; dev->features |= NETIF_F_SG;
} }
}
return dev; return dev;
} }
...@@ -5636,14 +5665,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) ...@@ -5636,14 +5665,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
} }
qeth_setup_card(card); qeth_setup_card(card);
qeth_update_from_chp_desc(card);
card->dev = qeth_alloc_netdev(card); card->dev = qeth_alloc_netdev(card);
if (!card->dev) { if (!card->dev) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_card; goto err_card;
} }
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
goto err_chp_desc;
qeth_determine_capabilities(card); qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card); enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) { switch (enforced_disc) {
...@@ -5670,6 +5701,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) ...@@ -5670,6 +5701,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc: err_disc:
qeth_core_free_discipline(card); qeth_core_free_discipline(card);
err_load: err_load:
err_chp_desc:
free_netdev(card->dev); free_netdev(card->dev);
err_card: err_card:
qeth_core_free_card(card); qeth_core_free_card(card);
...@@ -5732,7 +5764,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) ...@@ -5732,7 +5764,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0); qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card); qeth_drain_output_queues(card);
qdio_free(CARD_DDEV(card)); qdio_free(CARD_DDEV(card));
} }
...@@ -6188,7 +6220,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -6188,7 +6220,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = card->stats.rx_errors; stats->rx_errors = card->stats.rx_errors;
stats->rx_dropped = card->stats.rx_dropped; stats->rx_dropped = card->stats.rx_dropped;
stats->multicast = card->stats.rx_multicast; stats->multicast = card->stats.rx_multicast;
stats->tx_errors = card->stats.tx_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) { for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i]; queue = card->qdio.out_qs[i];
...@@ -6201,6 +6232,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -6201,6 +6232,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
} }
EXPORT_SYMBOL_GPL(qeth_get_stats64); EXPORT_SYMBOL_GPL(qeth_get_stats64);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
{
if (cast_type != RTN_UNICAST)
return QETH_IQD_MCAST_TXQ;
return QETH_IQD_MIN_UCAST_TXQ;
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
int qeth_open(struct net_device *dev) int qeth_open(struct net_device *dev)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
...@@ -6211,7 +6251,7 @@ int qeth_open(struct net_device *dev) ...@@ -6211,7 +6251,7 @@ int qeth_open(struct net_device *dev)
return -EIO; return -EIO;
card->data.state = CH_STATE_UP; card->data.state = CH_STATE_UP;
netif_start_queue(dev); netif_tx_start_all_queues(dev);
napi_enable(&card->napi); napi_enable(&card->napi);
local_bh_disable(); local_bh_disable();
......
...@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, ...@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
if (!card) if (!card)
return -EINVAL; return -EINVAL;
if (IS_IQD(card))
return -EOPNOTSUPP;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) { if (card->state != CARD_STATE_DOWN) {
rc = -EPERM; rc = -EPERM;
...@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, ...@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2; card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) { } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
if (card->info.type == QETH_CARD_TYPE_IQD) {
rc = -EPERM;
goto out;
}
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3; card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) { } else if (sysfs_streq(buf, "no_prio_queueing")) {
......
...@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = { ...@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
}; };
static const struct qeth_stats card_stats[] = { static const struct qeth_stats card_stats[] = {
...@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev, ...@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
} }
static void qeth_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qeth_card *card = dev->ml_priv;
channels->max_rx = dev->num_rx_queues;
channels->max_tx = card->qdio.no_out_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = dev->real_num_rx_queues;
channels->tx_count = dev->real_num_tx_queues;
channels->other_count = 0;
channels->combined_count = 0;
}
/* Helper function to fill 'advertising' and 'supported' which are the same. */ /* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */ /* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */ /* Always advertise and support all speeds up to specified, and only one */
...@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = { ...@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ethtool_stats = qeth_get_ethtool_stats, .get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count, .get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo, .get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
.get_link_ksettings = qeth_get_link_ksettings, .get_link_ksettings = qeth_get_link_ksettings,
}; };
......
...@@ -161,10 +161,8 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) ...@@ -161,10 +161,8 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
} }
} }
static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) static int qeth_l2_get_cast_type(struct sk_buff *skb)
{ {
if (card->info.type == QETH_CARD_TYPE_OSN)
return RTN_UNICAST;
if (is_broadcast_ether_addr(skb->data)) if (is_broadcast_ether_addr(skb->data))
return RTN_BROADCAST; return RTN_BROADCAST;
if (is_multicast_ether_addr(skb->data)) if (is_multicast_ether_addr(skb->data))
...@@ -299,7 +297,7 @@ static void qeth_l2_stop_card(struct qeth_card *card) ...@@ -299,7 +297,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
} }
if (card->state == CARD_STATE_HARDSETUP) { if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0); qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card); qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card); qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN; card->state = CARD_STATE_DOWN;
} }
...@@ -603,37 +601,44 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, ...@@ -603,37 +601,44 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb); u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue; struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len; int tx_bytes = skb->len;
int rc; int rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type); if (IS_IQD(card))
txq = qeth_iqd_translate_txq(dev, txq);
netif_stop_queue(dev); queue = card->qdio.out_qs[txq];
if (IS_OSN(card)) if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue); rc = qeth_l2_xmit_osn(card, skb, queue);
else else
rc = qeth_xmit(card, skb, queue, ipv, cast_type, rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
qeth_l2_fill_header); qeth_l2_get_cast_type(skb), qeth_l2_fill_header);
if (!rc) { if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} else if (rc == -EBUSY) { }
return NETDEV_TX_BUSY;
} /* else fall through */
QETH_TXQ_STAT_INC(queue, tx_dropped); QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct qeth_card *card = dev->ml_priv;
if (IS_IQD(card))
return qeth_iqd_select_queue(dev, skb,
qeth_l2_get_cast_type(skb),
sb_dev);
return qeth_get_priority_queue(card, skb);
}
static const struct device_type qeth_l2_devtype = { static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2", .name = "qeth_layer2",
.groups = qeth_l2_attr_groups, .groups = qeth_l2_attr_groups,
...@@ -687,6 +692,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { ...@@ -687,6 +692,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64, .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check, .ndo_features_check = qeth_features_check,
.ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr, .ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl, .ndo_do_ioctl = qeth_do_ioctl,
......
...@@ -1433,7 +1433,7 @@ static void qeth_l3_stop_card(struct qeth_card *card) ...@@ -1433,7 +1433,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
} }
if (card->state == CARD_STATE_HARDSETUP) { if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0); qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card); qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card); qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN; card->state = CARD_STATE_DOWN;
} }
...@@ -2036,7 +2036,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb) ...@@ -2036,7 +2036,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type) struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{ {
unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len; unsigned int hw_hdr_len;
int rc; int rc;
...@@ -2046,45 +2045,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, ...@@ -2046,45 +2045,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc) if (rc)
return rc; return rc;
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN); skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb); qeth_l3_fixup_headers(skb);
rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
if (rc == -EBUSY) {
/* roll back to ETH header */
skb_push(skb, ETH_HLEN);
skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
}
return rc;
} }
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb); int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue; struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len; int tx_bytes = skb->len;
int rc; int cast_type, rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
if (IS_IQD(card)) { if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
if (card->options.sniffer) if (card->options.sniffer)
goto tx_drop; goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED && (card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV))) skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop; goto tx_drop;
if (txq == QETH_IQD_MCAST_TXQ)
cast_type = qeth_l3_get_cast_type(skb);
else
cast_type = RTN_UNICAST;
} else {
queue = card->qdio.out_qs[txq];
cast_type = qeth_l3_get_cast_type(skb);
} }
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop; goto tx_drop;
netif_stop_queue(dev);
if (ipv == 4 || IS_IQD(card)) if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
else else
...@@ -2094,16 +2092,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2094,16 +2092,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (!rc) { if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} else if (rc == -EBUSY) { }
return NETDEV_TX_BUSY;
} /* else fall through */
tx_drop: tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped); QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -2147,11 +2141,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, ...@@ -2147,11 +2141,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
return qeth_features_check(skb, dev, features); return qeth_features_check(skb, dev, features);
} }
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
sb_dev);
}
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct qeth_card *card = dev->ml_priv;
return qeth_get_priority_queue(card, skb);
}
static const struct net_device_ops qeth_l3_netdev_ops = { static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open, .ndo_open = qeth_open,
.ndo_stop = qeth_stop, .ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64, .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl, .ndo_do_ioctl = qeth_do_ioctl,
...@@ -2168,6 +2178,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { ...@@ -2168,6 +2178,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64, .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check, .ndo_features_check = qeth_l3_osa_features_check,
.ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl, .ndo_do_ioctl = qeth_do_ioctl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment