Commit 3c90a941 authored by David S. Miller's avatar David S. Miller

Merge branch 'hv_netvsc-cleanups'

Stephen Hemminger says:

====================
Hyper-V network driver cleanups.

The only new functionality is minor extensions to ethtool.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 01555e64 4323b47c
......@@ -84,8 +84,6 @@ struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
#define ITAB_NUM 128
#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
extern u8 netvsc_hash_key[];
struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
struct ndis_obj_header hdr;
......@@ -175,7 +173,7 @@ struct rndis_device {
struct rndis_message;
struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
......@@ -654,6 +652,14 @@ struct netvsc_stats {
struct u64_stats_sync syncp;
};
struct netvsc_ethtool_stats {
unsigned long tx_scattered;
unsigned long tx_no_memory;
unsigned long tx_no_space;
unsigned long tx_too_big;
unsigned long tx_busy;
};
struct netvsc_reconfig {
struct list_head list;
u32 event;
......@@ -683,6 +689,7 @@ struct net_device_context {
/* Ethtool settings */
u8 duplex;
u32 speed;
struct netvsc_ethtool_stats eth_stats;
/* the device is going away */
bool start_remove;
......
This diff is collapsed.
This diff is collapsed.
......@@ -663,13 +663,14 @@ rndis_filter_set_offload_params(struct net_device *ndev,
return ret;
}
u8 netvsc_hash_key[HASH_KEYLEN] = {
static const u8 netvsc_hash_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
......@@ -720,7 +721,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
for (i = 0; i < HASH_KEYLEN; i++)
keyp[i] = netvsc_hash_key[i];
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
......@@ -738,7 +738,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
return ret;
}
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
......@@ -814,7 +813,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
return ret;
}
static int rndis_filter_init_device(struct rndis_device *dev)
{
struct rndis_request *request;
......@@ -902,7 +900,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
if (request)
put_rndis_request(dev, request);
return;
}
static int rndis_filter_open_device(struct rndis_device *dev)
......@@ -1054,7 +1051,6 @@ int rndis_filter_device_add(struct hv_device *dev,
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
goto err_dev_remv;
......@@ -1180,7 +1176,6 @@ void rndis_filter_device_remove(struct hv_device *dev)
netvsc_device_remove(dev);
}
int rndis_filter_open(struct netvsc_device *nvdev)
{
if (!nvdev)
......
......@@ -1114,6 +1114,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
{
const struct kobject *kobj = &device_obj->device.kobj;
return kobj->name;
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
......@@ -1422,88 +1429,4 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
return false;
}
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
#define VMBUS_PKT_TRAILER 8
static inline struct vmpacket_descriptor *
get_next_pkt_raw(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
struct vmpacket_descriptor *cur_desc;
u32 packetlen;
u32 dsize = ring_info->ring_datasize;
u32 delta = read_loc - ring_info->ring_buffer->read_index;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
if ((read_loc + sizeof(*cur_desc)) > dsize)
return NULL;
cur_desc = ring_buffer + read_loc;
packetlen = cur_desc->len8 << 3;
/*
* If the packet under consideration is wrapping around,
* return failure.
*/
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
return NULL;
return cur_desc;
}
/*
* A helper function to step through packets "in-place"
* This API is to be called after each successful call
* get_next_pkt_raw().
*/
static inline void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
u32 read_loc = ring_info->priv_read_index;
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
BUG();
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
}
/*
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
* }
* if (packets processed in place)
* commit_rd_index();
*/
static inline void commit_rd_index(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *ring_info = &channel->inbound;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
if (hv_need_to_signal_on_read(ring_info))
vmbus_set_event(channel);
}
#endif /* _HYPERV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment