Commit a264d830 authored by David S. Miller's avatar David S. Miller

Merge branch 'qeth-next'

Ursula Braun says:

====================
s390: qeth patches

here are patches for the s390 qeth driver for net-next:

Patch 01 is a minor improvement for the bridgeport code in qeth.

Patches 02-07 take care about scatter/gather handling in qeth.

Patch 08 improves handling of multicast IP addresses in the qeth layer3
discipline.

Patches 09-11 improve netdev features related functions in qeth.

Patch 12 implements an outbound queue restriction for HiperSockets.

Patch 13 fixes a wrong indentation in qeth_l3_main.c causing a warning
with gcc-6.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 318d3cc0 77a83ed1
......@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/ethtool.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
......@@ -144,6 +145,7 @@ struct qeth_perf_stats {
unsigned int sg_alloc_page_rx;
unsigned int tx_csum;
unsigned int tx_lin;
unsigned int tx_linfail;
};
/* Routing stuff */
......@@ -559,7 +561,6 @@ enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
QETH_IP_TYPE_RXIP,
QETH_IP_TYPE_DEL_ALL_MC,
};
enum qeth_cmd_buffer_state {
......@@ -740,17 +741,10 @@ struct qeth_vlan_vid {
unsigned short vid;
};
enum qeth_mac_disposition {
QETH_DISP_MAC_DELETE = 0,
QETH_DISP_MAC_DO_NOTHING = 1,
QETH_DISP_MAC_ADD = 2,
};
struct qeth_mac {
u8 mac_addr[OSA_ADDR_LEN];
u8 is_uc:1;
u8 disp_flag:2;
struct hlist_node hnode;
enum qeth_addr_disposition {
QETH_DISP_ADDR_DELETE = 0,
QETH_DISP_ADDR_DO_NOTHING = 1,
QETH_DISP_ADDR_ADD = 2,
};
struct qeth_rx {
......@@ -798,6 +792,8 @@ struct qeth_card {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
DECLARE_HASHTABLE(ip_mc_htable, 4);
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
......@@ -805,8 +801,6 @@ struct qeth_card {
unsigned long thread_running_mask;
struct task_struct *recovery_task;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
......@@ -844,6 +838,19 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
/**
* qeth_get_elements_for_range() - find number of SBALEs to cover range.
* @start: Start of the address range.
* @end: Address after the end of the range.
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* the specified address range.
*/
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
{
return PFN_UP(end - 1) - PFN_DOWN(start);
}
static inline int qeth_get_micros(void)
{
return (int) (get_tod_clock() >> 12);
......@@ -865,6 +872,11 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
static inline int qeth_get_ip_protocol(struct sk_buff *skb)
{
return ip_hdr(skb)->protocol;
}
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
......@@ -981,12 +993,13 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
int (*reply_cb)(struct qeth_card *,
struct qeth_reply *, unsigned long),
void *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
__u16, __u16,
enum qeth_prot_versions);
int qeth_start_ipa_tx_checksum(struct qeth_card *);
int qeth_set_rx_csum(struct qeth_card *, int);
int qeth_set_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
......
......@@ -1464,8 +1464,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
/* initial options */
......@@ -1500,11 +1498,6 @@ static struct qeth_card *qeth_alloc_card(void)
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!card->ip_tbd_list) {
QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
goto out_card;
}
if (qeth_setup_channel(&card->read))
goto out_ip;
if (qeth_setup_channel(&card->write))
......@@ -1517,8 +1510,6 @@ static struct qeth_card *qeth_alloc_card(void)
out_channel:
qeth_clean_channel(&card->read);
out_ip:
kfree(card->ip_tbd_list);
out_card:
kfree(card);
out:
return NULL;
......@@ -3757,6 +3748,14 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
{
if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
return 2;
return queue_num;
}
/**
* Note: Function assumes that we have 4 outbound queues.
*/
......@@ -3784,9 +3783,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
return ~tos >> 6 & 3;
return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
if (tos & IPTOS_MINCOST)
return 3;
return qeth_cut_iqd_prio(card, 3);
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
......@@ -3797,11 +3796,12 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
return ~skb->priority >> 1 & 3;
return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
case QETH_PRIO_Q_ING_VLAN:
tci = &((struct ethhdr *)skb->data)->h_proto;
if (*tci == ETH_P_8021Q)
return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
return qeth_cut_iqd_prio(card, ~*(tci + 1) >>
(VLAN_PRIO_SHIFT + 1) & 3);
break;
default:
break;
......@@ -3810,41 +3810,54 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
/**
* qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
* @skb: SKB address
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* fragmented part of the SKB. Returns zero for linear SKB.
*/
int qeth_get_elements_for_frags(struct sk_buff *skb)
{
int cnt, length, e, elements = 0;
struct skb_frag_struct *frag;
char *data;
int cnt, elements = 0;
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
data = (char *)page_to_phys(skb_frag_page(frag)) +
frag->page_offset;
length = frag->size;
e = PFN_UP((unsigned long)data + length - 1) -
PFN_DOWN((unsigned long)data);
elements += e;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
elements += qeth_get_elements_for_range(
(addr_t)skb_frag_address(frag),
(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
}
return elements;
}
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
/**
* qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
* @card: qeth card structure, to check max. elems.
* @skb: SKB address
* @extra_elems: extra elems needed, to check against max.
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* skb data, including linear part and fragments. Checks if the result plus
* extra_elems fits under the limit for the card. Returns 0 if it does not.
* Note: extra_elems is not included in the returned result.
*/
int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int elems)
struct sk_buff *skb, int extra_elems)
{
int dlen = skb->len - skb->data_len;
int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
PFN_DOWN((unsigned long)skb->data);
int elements = qeth_get_elements_for_range(
(addr_t)skb->data,
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb);
elements_needed += qeth_get_elements_for_frags(skb);
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
(elements_needed+elems), skb->len);
elements + extra_elems, skb->len);
return 0;
}
return elements_needed;
return elements;
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
......@@ -3859,7 +3872,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
rest = len - inpage;
if (rest > hroom)
return 1;
memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
memmove(skb->data - rest, skb->data, skb_headlen(skb));
skb->data -= rest;
skb->tail -= rest;
*hdr = (struct qeth_hdr *)skb->data;
......@@ -3873,7 +3886,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
int length = skb->len - skb->data_len;
int length = skb_headlen(skb);
int length_here;
int element;
char *data;
......@@ -4967,7 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->write);
if (card->dev)
free_netdev(card->dev);
kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
......@@ -5265,7 +5277,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
static int qeth_setassparms_cb(struct qeth_card *card,
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
......@@ -5294,6 +5306,7 @@ static int qeth_setassparms_cb(struct qeth_card *card,
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
......@@ -5788,6 +5801,7 @@ static struct {
{"tx do_QDIO count"},
{"tx csum"},
{"tx lin"},
{"tx linfail"},
{"cq handler count"},
{"cq handler time"}
};
......@@ -5848,8 +5862,9 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[32] = card->perf_stats.outbound_do_qdio_cnt;
data[33] = card->perf_stats.tx_csum;
data[34] = card->perf_stats.tx_lin;
data[35] = card->perf_stats.cq_cnt;
data[36] = card->perf_stats.cq_time;
data[35] = card->perf_stats.tx_linfail;
data[36] = card->perf_stats.cq_cnt;
data[37] = card->perf_stats.cq_time;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
......@@ -6048,74 +6063,136 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
static int qeth_send_checksum_command(struct qeth_card *card)
static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
{
long rxtx_arg;
int rc;
rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
IPA_CMD_ASS_START, 0);
rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
if (rc) {
dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
"failed, using SW checksumming\n",
dev_warn(&card->gdev->dev,
"Starting HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
IPA_CMD_ASS_ENABLE,
card->info.csum_mask);
rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
: card->info.csum_mask;
rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
rxtx_arg);
if (rc) {
dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
"failed, using SW checksumming\n",
dev_warn(&card->gdev->dev,
"Enabling HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
return rc;
}
dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out");
return 0;
}
int qeth_set_rx_csum(struct qeth_card *card, int on)
static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
{
int rc;
if (on) {
rc = qeth_send_checksum_command(card);
rc = qeth_send_checksum_on(card, cstype);
if (rc)
return -EIO;
dev_info(&card->gdev->dev,
"HW Checksumming (inbound) enabled\n");
} else {
rc = qeth_send_simple_setassparms(card,
IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
rc = qeth_send_simple_setassparms(card, cstype,
IPA_CMD_ASS_STOP, 0);
if (rc)
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_rx_csum);
int qeth_start_ipa_tx_checksum(struct qeth_card *card)
static int qeth_set_ipa_tso(struct qeth_card *card, int on)
{
int rc = 0;
int rc;
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
return rc;
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
QETH_CARD_TEXT(card, 3, "sttso");
if (on) {
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting outbound TCP segmentation offload for %s failed\n",
QETH_CARD_IFNAME(card));
return -EIO;
}
dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
} else {
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_STOP, 0);
}
return rc;
}
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t changed = dev->features ^ features;
int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "setfeat");
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card,
features & NETIF_F_IP_CSUM ? 1 : 0,
IPA_OUTBOUND_CHECKSUM);
if (rc)
goto err_out;
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
IPA_CMD_ASS_ENABLE,
card->info.tx_csum_mask);
changed ^= NETIF_F_IP_CSUM;
}
if ((changed & NETIF_F_RXCSUM)) {
rc = qeth_set_ipa_csum(card,
features & NETIF_F_RXCSUM ? 1 : 0,
IPA_INBOUND_CHECKSUM);
if (rc)
goto err_out;
changed ^= NETIF_F_RXCSUM;
}
if ((changed & NETIF_F_TSO)) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
if (rc)
changed ^= NETIF_F_TSO;
}
dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
return rc;
err_out:
dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
"failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
return rc;
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
return 0;
/* something went wrong. save changed features and return error */
dev->features ^= changed;
return -EIO;
}
EXPORT_SYMBOL_GPL(qeth_set_features);
netdev_features_t qeth_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(SETUP, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
features &= ~NETIF_F_TSO;
dev_info(&card->gdev->dev, "Outbound TSO not supported on %s\n",
QETH_CARD_IFNAME(card));
}
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
features = features & ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO);
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
EXPORT_SYMBOL_GPL(qeth_start_ipa_tx_checksum);
EXPORT_SYMBOL_GPL(qeth_fix_features);
static int __init qeth_core_init(void)
{
......
......@@ -243,6 +243,10 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
if (card->info.type == QETH_CARD_TYPE_IQD) {
rc = -EPERM;
goto out;
}
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
......
......@@ -12,4 +12,11 @@ int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
struct qeth_mac {
u8 mac_addr[OSA_ADDR_LEN];
u8 is_uc:1;
u8 disp_flag:2;
struct hlist_node hnode;
};
#endif /* __QETH_L2_H__ */
......@@ -404,38 +404,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return rc;
}
static netdev_features_t qeth_l2_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(SETUP, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
features &= ~NETIF_F_RXCSUM;
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
static int qeth_l2_set_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t changed = dev->features ^ features;
QETH_DBF_TEXT(SETUP, 2, "setfeat");
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
return 0;
if (!(changed & NETIF_F_RXCSUM))
return 0;
return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
}
static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
{
QETH_DBF_TEXT(SETUP , 2, "stopcard");
......@@ -780,7 +748,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
qeth_l2_mac_hash(ha->addr)) {
if (is_uc == mac->is_uc &&
!memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
mac->disp_flag = QETH_DISP_MAC_DO_NOTHING;
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
return;
}
}
......@@ -792,7 +760,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
mac->is_uc = is_uc;
mac->disp_flag = QETH_DISP_MAC_ADD;
mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->mac_htable, &mac->hnode,
qeth_l2_mac_hash(mac->mac_addr));
......@@ -825,7 +793,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_l2_add_mac(card, ha, 1);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
if (mac->disp_flag == QETH_DISP_MAC_DELETE) {
if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
if (!mac->is_uc)
rc = qeth_l2_send_delgroupmac(card,
mac->mac_addr);
......@@ -837,15 +805,15 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
hash_del(&mac->hnode);
kfree(mac);
} else if (mac->disp_flag == QETH_DISP_MAC_ADD) {
} else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l2_write_mac(card, mac);
if (rc) {
hash_del(&mac->hnode);
kfree(mac);
} else
mac->disp_flag = QETH_DISP_MAC_DELETE;
mac->disp_flag = QETH_DISP_ADDR_DELETE;
} else
mac->disp_flag = QETH_DISP_MAC_DELETE;
mac->disp_flag = QETH_DISP_ADDR_DELETE;
}
spin_unlock_bh(&card->mclock);
......@@ -869,6 +837,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
int nr_frags;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
......@@ -892,6 +861,23 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
netif_stop_queue(dev);
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
!qeth_get_elements_no(card, new_skb, 0)) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
if (lin_rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
if (lin_rc)
goto tx_drop;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
else {
......@@ -943,6 +929,14 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats) {
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
}
if (new_skb != skb)
dev_kfree_skb_any(skb);
rc = NETDEV_TX_OK;
......@@ -1086,8 +1080,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_l2_fix_features,
.ndo_set_features = qeth_l2_set_features
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features
};
static int qeth_l2_setup_netdev(struct qeth_card *card)
......@@ -1118,12 +1112,25 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
&qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
/* Turn on RX offloading per default */
card->dev->features |= NETIF_F_RXCSUM;
card->dev->hw_features = NETIF_F_SG;
card->dev->vlan_features = NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
card->dev->hw_features |= NETIF_F_IP_CSUM;
card->dev->vlan_features |= NETIF_F_IP_CSUM;
}
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
/* Turn on SG per default */
card->dev->features |= NETIF_F_SG;
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
......@@ -1135,9 +1142,6 @@ static int qeth_l2_start_ipassists(struct qeth_card *card)
/* configure isolation level */
if (qeth_set_access_ctrl_online(card, 0))
return -ENODEV;
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
qeth_set_rx_csum(card, 1);
qeth_start_ipa_tx_checksum(card);
return 0;
}
......@@ -1206,7 +1210,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
if (qeth_l2_start_ipassists(card))
rc = qeth_l2_start_ipassists(card);
if (rc)
goto out_remove;
}
......@@ -1800,6 +1805,12 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
dev_err(&card->gdev->dev,
"The device is not configured as a Bridge Port\n");
break;
case 0x2B10:
case 0x0010: /* OS mismatch */
rc = -EPERM;
dev_err(&card->gdev->dev,
"A Bridge Port is already configured by a different operating system\n");
break;
case 0x2B14:
case 0x0014: /* Another device is Primary */
switch (setcmd) {
......
......@@ -10,16 +10,23 @@
#define __QETH_L3_H__
#include "qeth_core.h"
#include <linux/hashtable.h>
#define QETH_SNIFF_AVAIL 0x0008
struct qeth_ipaddr {
struct list_head entry;
struct hlist_node hnode;
enum qeth_ip_types type;
enum qeth_ipa_setdelip_flags set_flags;
enum qeth_ipa_setdelip_flags del_flags;
int is_multicast;
int users;
u8 is_multicast:1;
u8 in_progress:1;
u8 disp_flag:2;
/* is changed only for normal ip addresses
* for non-normal addresses it always is 1
*/
int ref_counter;
enum qeth_prot_versions proto;
unsigned char mac[OSA_ADDR_LEN];
union {
......@@ -32,7 +39,24 @@ struct qeth_ipaddr {
unsigned int pfxlen;
} a6;
} u;
};
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
u64 ret = 0;
u8 *point;
if (addr->proto == QETH_PROT_IPV6) {
point = (u8 *) &addr->u.a6.addr;
ret = get_unaligned((u64 *)point) ^
get_unaligned((u64 *) (point + 8));
}
if (addr->proto == QETH_PROT_IPV4) {
point = (u8 *) &addr->u.a4.addr;
ret = get_unaligned((u32 *) point);
}
return ret;
}
struct qeth_ipato_entry {
struct list_head entry;
......@@ -60,6 +84,5 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
void qeth_l3_set_ip_addr_list(struct qeth_card *);
#endif /* __QETH_L3_H__ */
......@@ -30,6 +30,7 @@
#include <net/ip6_fib.h>
#include <net/ip6_checksum.h>
#include <net/iucv/af_iucv.h>
#include <linux/hashtable.h>
#include "qeth_l3.h"
......@@ -57,7 +58,7 @@ static int qeth_l3_isxdigit(char *buf)
static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
sprintf(buf, "%pI4", addr);
}
static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
......@@ -204,104 +205,129 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return rc;
}
/*
* Add IP to be added to todo list. If there is already an "add todo"
* in this list we just incremenent the reference count.
* Returns 0 if we just incremented reference count.
*/
static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
struct qeth_ipaddr *addr, int add)
inline int
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
struct qeth_ipaddr *tmp, *t;
int found = 0;
return addr1->proto == addr2->proto &&
!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
}
if (card->options.sniffer)
return 0;
list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
(tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
return 0;
if ((tmp->proto == QETH_PROT_IPV4) &&
(addr->proto == QETH_PROT_IPV4) &&
(tmp->type == addr->type) &&
(tmp->is_multicast == addr->is_multicast) &&
(tmp->u.a4.addr == addr->u.a4.addr) &&
(tmp->u.a4.mask == addr->u.a4.mask)) {
found = 1;
break;
}
if ((tmp->proto == QETH_PROT_IPV6) &&
(addr->proto == QETH_PROT_IPV6) &&
(tmp->type == addr->type) &&
(tmp->is_multicast == addr->is_multicast) &&
(tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
(memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
sizeof(struct in6_addr)) == 0)) {
found = 1;
break;
}
}
if (found) {
if (addr->users != 0)
tmp->users += addr->users;
else
tmp->users += add ? 1 : -1;
if (tmp->users == 0) {
list_del(&tmp->entry);
kfree(tmp);
}
return 0;
static struct qeth_ipaddr *
qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
struct qeth_ipaddr *addr;
if (tmp_addr->is_multicast) {
hash_for_each_possible(card->ip_mc_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
} else {
if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
list_add(&addr->entry, card->ip_tbd_list);
else {
if (addr->users == 0)
addr->users += add ? 1 : -1;
if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
list_add_tail(&addr->entry, card->ip_tbd_list);
}
return 1;
hash_for_each_possible(card->ip_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
}
return NULL;
}
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
unsigned long flags;
int rc = 0;
struct qeth_ipaddr *addr;
QETH_CARD_TEXT(card, 4, "delip");
if (addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
if (tmp_addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 0);
spin_unlock_irqrestore(&card->ip_lock, flags);
addr = qeth_l3_ip_from_hash(card, tmp_addr);
if (!addr)
return -ENOENT;
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
return rc;
if (addr->in_progress)
return -EINPROGRESS;
rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
return rc;
}
int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
unsigned long flags;
int rc = 0;
struct qeth_ipaddr *addr;
QETH_CARD_TEXT(card, 4, "addip");
if (addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
if (tmp_addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
addr = qeth_l3_ip_from_hash(card, tmp_addr);
if (!addr) {
addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
if (!addr)
return -ENOMEM;
memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
addr->ref_counter = 1;
if (addr->type == QETH_IP_TYPE_NORMAL &&
qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
hash_add(card->ip_htable, &addr->hnode,
qeth_l3_ipaddr_hash(addr));
/* qeth_l3_register_addr_entry can go to sleep
* if we add a IPV4 addr. It is caused by the reason
* that SETIP ipa cmd starts ARP staff for IPV4 addr.
* Thus we should unlock spinlock, and make a protection
* using in_progress variable to indicate that there is
* an hardware operation with this IPV4 address
*/
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
spin_lock_bh(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
(rc == IPA_RC_LAN_OFFLINE)) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
qeth_l3_delete_ip(card, addr);
kfree(addr);
}
} else {
hash_del(&addr->hnode);
kfree(addr);
}
} else {
if (addr->type == QETH_IP_TYPE_NORMAL)
addr->ref_counter++;
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 1);
spin_unlock_irqrestore(&card->ip_lock, flags);
return rc;
}
......@@ -312,229 +338,88 @@ struct qeth_ipaddr *qeth_l3_get_addr_buffer(
struct qeth_ipaddr *addr;
addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
if (addr == NULL) {
if (!addr)
return NULL;
}
addr->type = QETH_IP_TYPE_NORMAL;
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
addr->proto = prot;
return addr;
}
static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *iptodo;
unsigned long flags;
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
QETH_CARD_TEXT(card, 4, "clearip");
QETH_CARD_TEXT(card, 4, "delmc");
iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!iptodo) {
QETH_CARD_TEXT(card, 2, "dmcnomem");
if (recover && card->options.sniffer)
return;
}
iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
spin_lock_irqsave(&card->ip_lock, flags);
if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
kfree(iptodo);
spin_unlock_irqrestore(&card->ip_lock, flags);
}
/*
* Add/remove address to/from card's ip list, i.e. try to add or remove
* reference to/from an IP address that is already registered on the card.
* Returns:
* 0 address was on card and its reference count has been adjusted,
* but is still > 0, so nothing has to be done
* also returns 0 if card was not on card and the todo was to delete
* the address -> there is also nothing to be done
* 1 address was not on card and the todo is to add it to the card's ip
* list
* -1 address was on card and its reference count has been decremented
* to <= 0 by the todo -> address must be removed from card
*/
static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
{
struct qeth_ipaddr *addr;
int found = 0;
list_for_each_entry(addr, &card->ip_list, entry) {
if ((addr->proto == QETH_PROT_IPV4) &&
(todo->proto == QETH_PROT_IPV4) &&
(addr->type == todo->type) &&
(addr->u.a4.addr == todo->u.a4.addr) &&
(addr->u.a4.mask == todo->u.a4.mask)) {
found = 1;
break;
}
if ((addr->proto == QETH_PROT_IPV6) &&
(todo->proto == QETH_PROT_IPV6) &&
(addr->type == todo->type) &&
(addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
(memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
sizeof(struct in6_addr)) == 0)) {
found = 1;
break;
}
}
if (found) {
addr->users += todo->users;
if (addr->users <= 0) {
*__addr = addr;
return -1;
} else {
/* for VIPA and RXIP limit refcount to 1 */
if (addr->type != QETH_IP_TYPE_NORMAL)
addr->users = 1;
return 0;
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
continue;
}
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
if (todo->users > 0) {
/* for VIPA and RXIP limit refcount to 1 */
if (todo->type != QETH_IP_TYPE_NORMAL)
todo->users = 1;
return 1;
} else
return 0;
}
static void __qeth_l3_delete_all_mc(struct qeth_card *card,
unsigned long *flags)
{
struct list_head fail_list;
struct qeth_ipaddr *addr, *tmp;
int rc;
spin_unlock_bh(&card->ip_lock);
INIT_LIST_HEAD(&fail_list);
again:
list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
if (addr->is_multicast) {
list_del(&addr->entry);
spin_unlock_irqrestore(&card->ip_lock, *flags);
rc = qeth_l3_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, *flags);
if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
spin_lock_bh(&card->mclock);
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
hash_del(&addr->hnode);
kfree(addr);
else
list_add_tail(&addr->entry, &fail_list);
goto again;
}
}
list_splice(&fail_list, &card->ip_list);
}
void qeth_l3_set_ip_addr_list(struct qeth_card *card)
spin_unlock_bh(&card->mclock);
}
static void qeth_l3_recover_ip(struct qeth_card *card)
{
struct list_head *tbd_list;
struct qeth_ipaddr *todo, *addr;
unsigned long flags;
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
int rc;
QETH_CARD_TEXT(card, 2, "sdiplist");
QETH_CARD_HEX(card, 2, &card, sizeof(void *));
QETH_CARD_TEXT(card, 4, "recoverip");
if (!qeth_card_hw_is_reachable(card) || card->options.sniffer)
return;
spin_lock_bh(&card->ip_lock);
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!card->ip_tbd_list) {
QETH_CARD_TEXT(card, 0, "silnomem");
card->ip_tbd_list = tbd_list;
spin_unlock_irqrestore(&card->ip_lock, flags);
return;
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
spin_lock_bh(&card->ip_lock);
addr->in_progress = 0;
} else
INIT_LIST_HEAD(card->ip_tbd_list);
while (!list_empty(tbd_list)) {
todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
list_del(&todo->entry);
if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
__qeth_l3_delete_all_mc(card, &flags);
kfree(todo);
continue;
}
rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
if (rc == 0) {
/* nothing to be done; only adjusted refcount */
kfree(todo);
} else if (rc == 1) {
/* new entry to be added to on-card list */
spin_unlock_irqrestore(&card->ip_lock, flags);
rc = qeth_l3_register_addr_entry(card, todo);
spin_lock_irqsave(&card->ip_lock, flags);
if (!rc || (rc == IPA_RC_LAN_OFFLINE))
list_add_tail(&todo->entry, &card->ip_list);
else
kfree(todo);
} else if (rc == -1) {
/* on-card entry to be removed */
list_del_init(&addr->entry);
spin_unlock_irqrestore(&card->ip_lock, flags);
rc = qeth_l3_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, flags);
if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
kfree(addr);
else
list_add_tail(&addr->entry, &card->ip_list);
kfree(todo);
}
}
spin_unlock_irqrestore(&card->ip_lock, flags);
kfree(tbd_list);
}
static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr, *tmp;
unsigned long flags;
rc = qeth_l3_register_addr_entry(card, addr);
QETH_CARD_TEXT(card, 4, "clearip");
if (recover && card->options.sniffer)
return;
spin_lock_irqsave(&card->ip_lock, flags);
/* clear todo list */
list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
list_del(&addr->entry);
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
qeth_l3_delete_ip(card, addr);
kfree(addr);
}
while (!list_empty(&card->ip_list)) {
addr = list_entry(card->ip_list.next,
struct qeth_ipaddr, entry);
list_del_init(&addr->entry);
if (!recover || addr->is_multicast) {
} else {
hash_del(&addr->hnode);
kfree(addr);
continue;
}
list_add_tail(&addr->entry, card->ip_tbd_list);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
}
static int qeth_l3_address_exists_in_list(struct list_head *list,
struct qeth_ipaddr *addr, int same_type)
{
struct qeth_ipaddr *tmp;
list_for_each_entry(tmp, list, entry) {
if ((tmp->proto == QETH_PROT_IPV4) &&
(addr->proto == QETH_PROT_IPV4) &&
((same_type && (tmp->type == addr->type)) ||
(!same_type && (tmp->type != addr->type))) &&
(tmp->u.a4.addr == addr->u.a4.addr))
return 1;
}
if ((tmp->proto == QETH_PROT_IPV6) &&
(addr->proto == QETH_PROT_IPV6) &&
((same_type && (tmp->type == addr->type)) ||
(!same_type && (tmp->type != addr->type))) &&
(memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
sizeof(struct in6_addr)) == 0))
return 1;
spin_unlock_bh(&card->ip_lock);
}
return 0;
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
......@@ -712,27 +597,28 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
*/
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
unsigned long flags;
spin_lock_irqsave(&card->ip_lock, flags);
spin_lock_bh(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
kfree(ipatoe);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *new)
{
struct qeth_ipato_entry *ipatoe;
unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 2, "addipato");
spin_lock_irqsave(&card->ip_lock, flags);
spin_lock_bh(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
continue;
......@@ -743,10 +629,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
break;
}
}
if (!rc)
list_add_tail(&new->entry, &card->ipato.entries);
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
return rc;
}
......@@ -754,10 +642,11 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr, int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
unsigned long flags;
QETH_CARD_TEXT(card, 2, "delipato");
spin_lock_irqsave(&card->ip_lock, flags);
spin_lock_bh(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
......@@ -768,7 +657,8 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
kfree(ipatoe);
}
}
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
}
/*
......@@ -778,7 +668,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
unsigned long flags;
int rc = 0;
ipaddr = qeth_l3_get_addr_buffer(proto);
......@@ -797,18 +686,18 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
} else
return -ENOMEM;
spin_lock_irqsave(&card->ip_lock, flags);
if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
spin_lock_bh(&card->ip_lock);
if (!qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
spin_unlock_irqrestore(&card->ip_lock, flags);
if (rc) {
kfree(ipaddr);
return rc;
}
if (!qeth_l3_add_ip(card, ipaddr))
else
qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
qeth_l3_set_ip_addr_list(card);
return rc;
}
......@@ -831,9 +720,12 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->type = QETH_IP_TYPE_VIPA;
} else
return;
if (!qeth_l3_delete_ip(card, ipaddr))
spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
qeth_l3_set_ip_addr_list(card);
}
/*
......@@ -843,7 +735,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
unsigned long flags;
int rc = 0;
ipaddr = qeth_l3_get_addr_buffer(proto);
......@@ -857,24 +748,25 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
ipaddr->type = QETH_IP_TYPE_RXIP;
ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
ipaddr->del_flags = 0;
} else
return -ENOMEM;
spin_lock_irqsave(&card->ip_lock, flags);
if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
spin_lock_bh(&card->ip_lock);
if (!qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
spin_unlock_irqrestore(&card->ip_lock, flags);
if (rc) {
else
qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
return rc;
}
if (!qeth_l3_add_ip(card, ipaddr))
kfree(ipaddr);
qeth_l3_set_ip_addr_list(card);
return 0;
}
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
......@@ -896,9 +788,12 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->type = QETH_IP_TYPE_RXIP;
} else
return;
if (!qeth_l3_delete_ip(card, ipaddr))
spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
qeth_l3_set_ip_addr_list(card);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
......@@ -908,6 +803,7 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int rc = 0;
int cnt = 3;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
......@@ -1013,36 +909,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
}
if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.tx_csum_mask =
cmd->data.setassparms.data.flags_32bit;
QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
}
return 0;
}
#ifdef CONFIG_QETH_IPV6
static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
......@@ -1056,7 +922,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob, 0, 0,
qeth_l3_default_setassparms_cb, NULL);
qeth_setassparms_cb, NULL);
return rc;
}
#endif
......@@ -1291,47 +1157,6 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
return rc;
}
static void qeth_l3_start_ipa_checksum(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtcsum");
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)
&& (card->dev->features & NETIF_F_RXCSUM))
qeth_set_rx_csum(card, 1);
}
static void qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strttxcs");
qeth_start_ipa_tx_checksum(card);
}
static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 3, "sttso");
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
dev_info(&card->gdev->dev,
"Outbound TSO not supported on %s\n",
QETH_CARD_IFNAME(card));
rc = -EOPNOTSUPP;
} else {
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0);
if (rc)
dev_warn(&card->gdev->dev, "Starting outbound TCP "
"segmentation offload for %s failed\n",
QETH_CARD_IFNAME(card));
else
dev_info(&card->gdev->dev,
"Outbound TSO enabled\n");
}
if (rc)
card->dev->features &= ~NETIF_F_TSO;
return rc;
}
static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
......@@ -1345,9 +1170,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
qeth_l3_start_ipa_checksum(card); /* go on*/
qeth_l3_start_ipa_tx_checksum(card);
qeth_l3_start_ipa_tso(card); /* go on*/
return 0;
}
......@@ -1507,31 +1329,99 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
struct net_device *dev)
static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac)
{
ip_eth_mc_map(ipm, mac);
}
static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
int i;
hash_for_each(card->ip_mc_htable, i, addr, hnode)
addr->disp_flag = QETH_DISP_ADDR_DELETE;
}
static void qeth_l3_add_all_new_mc(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
int rc;
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || (rc == IPA_RC_LAN_OFFLINE))
addr->ref_counter = 1;
else {
hash_del(&addr->hnode);
kfree(addr);
}
}
}
}
static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
int rc;
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
rc = qeth_l3_deregister_addr_entry(card, addr);
if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
hash_del(&addr->hnode);
kfree(addr);
}
}
}
}
static void
qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
{
struct qeth_ipaddr *ipm;
struct ip_mc_list *im4;
struct qeth_ipaddr *tmp, *ipm;
char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!tmp)
return;
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
tmp->u.a4.addr = im4->multiaddr;
memcpy(tmp->mac, buf, sizeof(tmp->mac));
ipm = qeth_l3_ip_from_hash(card, tmp);
if (ipm) {
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!ipm)
continue;
memcpy(ipm->mac, buf, sizeof(tmp->mac));
ipm->u.a4.addr = im4->multiaddr;
memcpy(ipm->mac, buf, OSA_ADDR_LEN);
ipm->is_multicast = 1;
if (!qeth_l3_add_ip(card, ipm))
kfree(ipm);
ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
}
}
kfree(tmp);
}
/* called with rcu_read_lock */
......@@ -1541,6 +1431,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
u16 vid;
QETH_CARD_TEXT(card, 4, "addmcvl");
if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
......@@ -1555,7 +1446,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
in_dev = __in_dev_get_rcu(netdev);
if (!in_dev)
continue;
qeth_l3_add_mc(card, in_dev);
qeth_l3_add_mc_to_hash(card, in_dev);
}
}
......@@ -1564,36 +1455,60 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
struct in_device *in4_dev;
QETH_CARD_TEXT(card, 4, "chkmcv4");
rcu_read_lock();
in4_dev = __in_dev_get_rcu(card->dev);
if (in4_dev == NULL)
goto unlock;
qeth_l3_add_mc(card, in4_dev);
qeth_l3_add_mc_to_hash(card, in4_dev);
qeth_l3_add_vlan_mc(card);
unlock:
rcu_read_unlock();
}
#ifdef CONFIG_QETH_IPV6
static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
static void
qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
{
struct qeth_ipaddr *ipm;
struct ifmcaddr6 *im6;
struct qeth_ipaddr *tmp;
char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc6");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (!tmp)
return;
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
memcpy(tmp->mac, buf, sizeof(tmp->mac));
memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp);
if (ipm) {
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (!ipm)
continue;
ipm->is_multicast = 1;
memcpy(ipm->mac, buf, OSA_ADDR_LEN);
memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
if (!qeth_l3_add_ip(card, ipm))
kfree(ipm);
ipm->is_multicast = 1;
ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
}
kfree(tmp);
}
/* called with rcu_read_lock */
......@@ -1603,6 +1518,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
u16 vid;
QETH_CARD_TEXT(card, 4, "admc6vl");
if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
......@@ -1618,7 +1534,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
if (!in_dev)
continue;
read_lock_bh(&in_dev->lock);
qeth_l3_add_mc6(card, in_dev);
qeth_l3_add_mc6_to_hash(card, in_dev);
read_unlock_bh(&in_dev->lock);
in6_dev_put(in_dev);
}
......@@ -1629,14 +1545,16 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
struct inet6_dev *in6_dev;
QETH_CARD_TEXT(card, 4, "chkmcv6");
if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
if (in6_dev == NULL)
if (!in6_dev)
return;
rcu_read_lock();
read_lock_bh(&in6_dev->lock);
qeth_l3_add_mc6(card, in6_dev);
qeth_l3_add_mc6_to_hash(card, in6_dev);
qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
rcu_read_unlock();
......@@ -1660,16 +1578,23 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
in_dev = in_dev_get(netdev);
if (!in_dev)
return;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (addr) {
if (!addr)
return;
spin_lock_bh(&card->ip_lock);
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
addr->u.a4.addr = ifa->ifa_address;
addr->u.a4.mask = ifa->ifa_mask;
addr->type = QETH_IP_TYPE_NORMAL;
if (!qeth_l3_delete_ip(card, addr))
kfree(addr);
}
qeth_l3_delete_ip(card, addr);
}
spin_unlock_bh(&card->ip_lock);
kfree(addr);
in_dev_put(in_dev);
}
......@@ -1687,20 +1612,28 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
if (!netdev)
return;
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
return;
list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr) {
if (!addr)
return;
spin_lock_bh(&card->ip_lock);
list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
memcpy(&addr->u.a6.addr, &ifa->addr,
sizeof(struct in6_addr));
addr->u.a6.pfxlen = ifa->prefix_len;
addr->type = QETH_IP_TYPE_NORMAL;
if (!qeth_l3_delete_ip(card, addr))
kfree(addr);
}
qeth_l3_delete_ip(card, addr);
}
spin_unlock_bh(&card->ip_lock);
kfree(addr);
in6_dev_put(in6_dev);
#endif /* CONFIG_QETH_IPV6 */
}
......@@ -1727,18 +1660,16 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
return 0;
}
......@@ -1994,8 +1925,8 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
static int qeth_l3_verify_dev(struct net_device *dev)
{
struct qeth_card *card;
unsigned long flags;
int rc = 0;
unsigned long flags;
read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_for_each_entry(card, &qeth_core_card_list.list, list) {
......@@ -2051,7 +1982,7 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_list(card, 1);
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
......@@ -2106,12 +2037,20 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
(card->state != CARD_STATE_UP))
return;
if (!card->options.sniffer) {
qeth_l3_delete_mc_addresses(card);
spin_lock_bh(&card->mclock);
qeth_l3_mark_all_mc_to_be_deleted(card);
qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
qeth_l3_add_multicast_ipv6(card);
#endif
qeth_l3_set_ip_addr_list(card);
qeth_l3_delete_nonused_mc(card);
qeth_l3_add_all_new_mc(card);
spin_unlock_bh(&card->mclock);
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
}
......@@ -2376,12 +2315,11 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, 4))
rc = -EFAULT;
goto free_and_out;
} else {
}
#ifdef CONFIG_QETH_IPV6
if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
/* fails in case of GuestLAN QDIO mode */
qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
&qinfo);
qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
}
#endif
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
......@@ -2390,7 +2328,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
goto free_and_out;
}
QETH_CARD_TEXT(card, 4, "qacts");
}
free_and_out:
kfree(qinfo.udata);
out:
......@@ -2427,7 +2365,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
rc = qeth_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
qeth_l3_default_setassparms_cb, NULL);
qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
......@@ -2467,7 +2405,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
return -ENOMEM;
rc = qeth_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_l3_default_setassparms_cb, NULL);
qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
memset(buf, 0, 16);
......@@ -2793,15 +2731,34 @@ static void qeth_tso_fill_header(struct qeth_card *card,
}
}
static inline int qeth_l3_tso_elements(struct sk_buff *skb)
/**
* qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
* @card: qeth card structure, to check max. elems.
* @skb: SKB address
* @extra_elems: extra elems needed, to check against max.
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* skb data, including linear part and fragments, but excluding TCP header.
* (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
* Checks if the result plus extra_elems fits under the limit for the card.
* Returns 0 if it does not.
* Note: extra_elems is not included in the returned result.
*/
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
struct sk_buff *skb, int extra_elems)
{
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
tcp_hdr(skb)->doff * 4;
int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data);
int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
elements += qeth_get_elements_for_frags(skb);
addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
int elements = qeth_get_elements_for_range(
tcpdptr,
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2,
"Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
elements + extra_elems, skb->len);
return 0;
}
return elements;
}
......@@ -2810,8 +2767,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int rc;
u16 *tag;
struct qeth_hdr *hdr = NULL;
int elements_needed = 0;
int elems;
int hdr_elements = 0;
int elements;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
......@@ -2822,7 +2779,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
qeth_get_priority_queue(card, skb, ipv, cast_type) :
card->qdio.default_out_queue];
int tx_bytes = skb->len;
bool large_send;
bool use_tso;
int data_offset = -1;
int nr_frags;
......@@ -2847,10 +2804,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
card->perf_stats.outbound_start_time = qeth_get_micros();
}
large_send = skb_is_gso(skb);
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) &&
(qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
!skb_is_nonlinear(skb)) {
new_skb = skb;
if (new_skb->protocol == ETH_P_AF_IUCV)
data_offset = 0;
......@@ -2859,7 +2818,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
elements_needed++;
hdr_elements++;
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
......@@ -2894,22 +2853,28 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
if (large_send) {
if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
if (skb_linearize(new_skb))
goto tx_drop;
if (card->options.performance_stats)
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
(!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
if (lin_rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
if (lin_rc)
goto tx_drop;
}
if (large_send && (cast_type == RTN_UNSPEC)) {
if (use_tso) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
elements_needed++;
hdr_elements++;
} else {
if (data_offset < 0) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
......@@ -2930,31 +2895,31 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
qeth_l3_hdr_csum(card, hdr, new_skb);
}
elems = qeth_get_elements_no(card, new_skb, elements_needed);
if (!elems) {
elements = use_tso ?
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
qeth_get_elements_no(card, new_skb, hdr_elements);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
goto tx_drop;
}
elements_needed += elems;
nr_frags = skb_shinfo(new_skb)->nr_frags;
elements += hdr_elements;
if (card->info.type != QETH_CARD_TYPE_IQD) {
int len;
if (large_send)
if (use_tso)
len = ((unsigned long)tcp_hdr(new_skb) +
tcp_hdr(new_skb)->doff * 4) -
tcp_hdrlen(new_skb)) -
(unsigned long)new_skb->data;
else
len = sizeof(struct qeth_hdr_layer3);
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed);
rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, data_offset, 0);
elements, data_offset, 0);
if (!rc) {
card->stats.tx_packets++;
......@@ -2962,7 +2927,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
if (large_send) {
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
}
......@@ -3048,36 +3014,6 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
features &= ~NETIF_F_RXCSUM;
return features;
}
static int qeth_l3_set_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
return 0;
return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
}
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = qeth_core_get_strings,
......@@ -3120,8 +3056,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_l3_fix_features,
.ndo_set_features = qeth_l3_set_features,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
......@@ -3136,8 +3072,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_l3_fix_features,
.ndo_set_features = qeth_l3_set_features,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
......@@ -3169,7 +3105,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
card->dev->features = NETIF_F_RXCSUM;
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
card->dev->features = NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
......@@ -3195,7 +3134,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
card->dev->gso_max_size = 15 * PAGE_SIZE;
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
......@@ -3230,7 +3171,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
card->dev = NULL;
}
qeth_l3_clear_ip_list(card, 0);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
return;
}
......@@ -3315,7 +3256,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_set_ip_addr_list(card);
qeth_l3_recover_ip(card);
if (card->lan_online)
netif_carrier_on(card->dev);
else
......@@ -3516,6 +3457,7 @@ EXPORT_SYMBOL_GPL(qeth_l3_discipline);
static int qeth_l3_ip_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
struct qeth_ipaddr *addr;
......@@ -3530,27 +3472,27 @@ static int qeth_l3_ip_event(struct notifier_block *this,
QETH_CARD_TEXT(card, 3, "ipevent");
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (addr != NULL) {
if (addr) {
addr->u.a4.addr = ifa->ifa_address;
addr->u.a4.mask = ifa->ifa_mask;
addr->type = QETH_IP_TYPE_NORMAL;
} else
goto out;
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
if (!qeth_l3_add_ip(card, addr))
kfree(addr);
spin_lock_bh(&card->ip_lock);
qeth_l3_add_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
break;
case NETDEV_DOWN:
if (!qeth_l3_delete_ip(card, addr))
kfree(addr);
break;
default:
spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
break;
}
qeth_l3_set_ip_addr_list(card);
out:
kfree(addr);
return NOTIFY_DONE;
}
......@@ -3579,27 +3521,27 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
return NOTIFY_DONE;
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr != NULL) {
if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
addr->u.a6.pfxlen = ifa->prefix_len;
addr->type = QETH_IP_TYPE_NORMAL;
} else
goto out;
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
if (!qeth_l3_add_ip(card, addr))
kfree(addr);
spin_lock_bh(&card->ip_lock);
qeth_l3_add_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
break;
case NETDEV_DOWN:
if (!qeth_l3_delete_ip(card, addr))
kfree(addr);
break;
default:
spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
break;
}
qeth_l3_set_ip_addr_list(card);
out:
kfree(addr);
return NOTIFY_DONE;
}
......
......@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <linux/hashtable.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
......@@ -285,7 +286,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (card->options.hsuid[0]) {
/* delete old ip address */
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr != NULL) {
if (!addr)
return -ENOMEM;
addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
addr->u.a6.addr.s6_addr32[1] = 0x00000000;
for (i = 8; i < 16; i++)
......@@ -293,11 +296,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
card->options.hsuid[i - 8];
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
if (!qeth_l3_delete_ip(card, addr))
qeth_l3_delete_ip(card, addr);
kfree(addr);
qeth_l3_set_ip_addr_list(card);
}
if (strlen(tmp) == 0) {
......@@ -328,9 +329,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
if (!qeth_l3_add_ip(card, addr))
qeth_l3_add_ip(card, addr);
kfree(addr);
qeth_l3_set_ip_addr_list(card);
return count;
}
......@@ -367,8 +367,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *tmpipa, *t;
int rc = 0;
struct qeth_ipaddr *addr;
int i, rc = 0;
if (!card)
return -EINVAL;
......@@ -384,19 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
} else if (sysfs_streq(buf, "1")) {
card->ipato.enabled = 1;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
tmpipa->set_flags |=
hash_for_each(card->ip_htable, i, addr, hnode) {
if ((addr->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, addr))
addr->set_flags |=
QETH_IPA_SETIP_TAKEOVER_FLAG;
}
} else if (sysfs_streq(buf, "0")) {
card->ipato.enabled = 0;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if (tmpipa->set_flags &
hash_for_each(card->ip_htable, i, addr, hnode) {
if (addr->set_flags &
QETH_IPA_SETIP_TAKEOVER_FLAG)
tmpipa->set_flags &=
addr->set_flags &=
~QETH_IPA_SETIP_TAKEOVER_FLAG;
}
} else
......@@ -452,7 +451,6 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
unsigned long flags;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i = 0;
......@@ -460,7 +458,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
spin_lock_irqsave(&card->ip_lock, flags);
spin_lock_bh(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
......@@ -473,7 +471,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
......@@ -689,15 +687,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
struct hlist_node *tmp;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipaddr, &card->ip_list, entry) {
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
......@@ -711,7 +709,7 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
......@@ -851,15 +849,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
struct hlist_node *tmp;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipaddr, &card->ip_list, entry) {
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
......@@ -873,7 +871,7 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment