Commit 81fa846a authored by David S. Miller's avatar David S. Miller

Merge branch 'netvsc-NAPI'

Stephen Hemminger says:

====================
NAPI support for Hyper-V

These patches enable NAPI, GRO and napi_alloc_skb for Hyper-V netvsc
driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1a4691b2 e91e7dd7
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#include "hyperv_vmbus.h" #include "hyperv_vmbus.h"
#define VMBUS_PKT_TRAILER 8
/* /*
* When we write to the ring buffer, check if the host needs to * When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol: * be signaled. Here is the details of this protocol:
...@@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, ...@@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
return 0; return 0;
} }
static inline void
init_cached_read_index(struct hv_ring_buffer_info *rbi)
{
rbi->cached_read_index = rbi->ring_buffer->read_index;
}
int hv_ringbuffer_read(struct vmbus_channel *channel, int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len, void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw) u64 *requestid, bool raw)
...@@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, ...@@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret; return ret;
} }
init_cached_read_index(channel); init_cached_read_index(inring_info);
next_read_location = hv_get_next_read_location(inring_info); next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc), sizeof(desc),
...@@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, ...@@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret; return ret;
} }
/*
* Determine number of bytes available in ring buffer after
* the current iterator (priv_read_index) location.
*
* This is similar to hv_get_bytes_to_read but with private
* read index instead.
*/
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;
else
return (rbi->ring_datasize - priv_read_loc) + write_loc;
}
/*
* Get first vmbus packet from ring buffer after read_index
*
* If ring buffer is empty, returns NULL and no other action needed.
*/
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
/* set state for later hv_signal_on_read() */
init_cached_read_index(rbi);
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
/*
* Get next vmbus packet from ring buffer.
*
* Advances the current location (priv_read_index) and checks for more
* data. If the end of the ring buffer is reached, then return NULL.
*/
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 packetlen = desc->len8 << 3;
u32 dsize = rbi->ring_datasize;
/* bump offset to next potential packet */
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
if (rbi->priv_read_index >= dsize)
rbi->priv_read_index -= dsize;
/* more data? */
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
else
return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
/*
* Update host ring buffer after iterating over packets.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
rbi->ring_buffer->read_index = rbi->priv_read_index;
hv_signal_on_read(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
...@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net, ...@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net,
const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan); const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context); void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev, int rndis_filter_device_add(struct hv_device *dev,
...@@ -720,6 +721,7 @@ struct net_device_context { ...@@ -720,6 +721,7 @@ struct net_device_context {
/* Per channel data */ /* Per channel data */
struct netvsc_channel { struct netvsc_channel {
struct vmbus_channel *channel; struct vmbus_channel *channel;
struct napi_struct napi;
struct multi_send_data msd; struct multi_send_data msd;
struct multi_recv_comp mrc; struct multi_recv_comp mrc;
atomic_t queue_sends; atomic_t queue_sends;
......
This diff is collapsed.
...@@ -589,13 +589,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, ...@@ -589,13 +589,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
} }
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct napi_struct *napi,
const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan, const struct ndis_pkt_8021q_info *vlan,
void *data, u32 buflen) void *data, u32 buflen)
{ {
struct sk_buff *skb; struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(net, buflen); skb = napi_alloc_skb(napi, buflen);
if (!skb) if (!skb)
return skb; return skb;
...@@ -642,11 +643,11 @@ int netvsc_recv_callback(struct net_device *net, ...@@ -642,11 +643,11 @@ int netvsc_recv_callback(struct net_device *net,
{ {
struct net_device_context *net_device_ctx = netdev_priv(net); struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device = net_device_ctx->nvdev; struct netvsc_device *net_device = net_device_ctx->nvdev;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
struct net_device *vf_netdev; struct net_device *vf_netdev;
struct sk_buff *skb; struct sk_buff *skb;
struct netvsc_stats *rx_stats; struct netvsc_stats *rx_stats;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
if (net->reg_state != NETREG_REGISTERED) if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL; return NVSP_STAT_FAIL;
...@@ -664,7 +665,8 @@ int netvsc_recv_callback(struct net_device *net, ...@@ -664,7 +665,8 @@ int netvsc_recv_callback(struct net_device *net,
net = vf_netdev; net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */ /* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
csum_info, vlan, data, len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
++net->stats.rx_dropped; ++net->stats.rx_dropped;
rcu_read_unlock(); rcu_read_unlock();
...@@ -679,7 +681,7 @@ int netvsc_recv_callback(struct net_device *net, ...@@ -679,7 +681,7 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device * on the synthetic device because modifying the VF device
* statistics will not work correctly. * statistics will not work correctly.
*/ */
rx_stats = &net_device->chan_table[q_idx].rx_stats; rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp); u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++; rx_stats->packets++;
rx_stats->bytes += len; rx_stats->bytes += len;
...@@ -690,12 +692,7 @@ int netvsc_recv_callback(struct net_device *net, ...@@ -690,12 +692,7 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast; ++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp); u64_stats_update_end(&rx_stats->syncp);
/* napi_gro_receive(&nvchan->napi, skb);
* Pass the skb back up. Network stack will deallocate the skb when it
* is done.
* TODO - use NAPI?
*/
netif_receive_skb(skb);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -1012,6 +1012,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) ...@@ -1012,6 +1012,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (ret == 0) if (ret == 0)
nvscdev->chan_table[chn_index].channel = new_sc; nvscdev->chan_table[chn_index].channel = new_sc;
napi_enable(&nvscdev->chan_table[chn_index].napi);
spin_lock_irqsave(&nvscdev->sc_lock, flags); spin_lock_irqsave(&nvscdev->sc_lock, flags);
nvscdev->num_sc_offered--; nvscdev->num_sc_offered--;
spin_unlock_irqrestore(&nvscdev->sc_lock, flags); spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
......
...@@ -1504,14 +1504,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel) ...@@ -1504,14 +1504,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
return; return;
} }
static inline void
init_cached_read_index(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi->cached_read_index = rbi->ring_buffer->read_index;
}
/* /*
* Mask off host interrupt callback notifications * Mask off host interrupt callback notifications
*/ */
...@@ -1545,76 +1537,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) ...@@ -1545,76 +1537,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
/* /*
* An API to support in-place processing of incoming VMBUS packets. * An API to support in-place processing of incoming VMBUS packets.
*/ */
#define VMBUS_PKT_TRAILER 8
static inline struct vmpacket_descriptor * /* Get data payload associated with descriptor */
get_next_pkt_raw(struct vmbus_channel *channel) static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
{ {
struct hv_ring_buffer_info *ring_info = &channel->inbound; return (void *)((unsigned long)desc + (desc->offset8 << 3));
u32 priv_read_loc = ring_info->priv_read_index;
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 dsize = ring_info->ring_datasize;
/*
* delta is the difference between what is available to read and
* what was already consumed in place. We commit read index after
* the whole batch is processed.
*/
u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
priv_read_loc - ring_info->ring_buffer->read_index :
(dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
return NULL;
return ring_buffer + priv_read_loc;
} }
/* /* Get data size associated with descriptor */
* A helper function to step through packets "in-place" static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
* This API is to be called after each successful call
* get_next_pkt_raw().
*/
static inline void put_pkt_raw(struct vmbus_channel *channel,
struct vmpacket_descriptor *desc)
{ {
struct hv_ring_buffer_info *ring_info = &channel->inbound; return (desc->len8 << 3) - (desc->offset8 << 3);
u32 packetlen = desc->len8 << 3;
u32 dsize = ring_info->ring_datasize;
/*
* Include the packet trailer.
*/
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
ring_info->priv_read_index %= dsize;
} }
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt);
void hv_pkt_iter_close(struct vmbus_channel *channel);
/* /*
* This call commits the read index and potentially signals the host. * Get next packet descriptor from iterator
* Here is the pattern for using the "in-place" consumption APIs: * If at end of list, return NULL and update host.
*
* init_cached_read_index();
*
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
* }
* if (packets processed in place)
* commit_rd_index();
*/ */
static inline void commit_rd_index(struct vmbus_channel *channel) static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
{ {
struct hv_ring_buffer_info *ring_info = &channel->inbound; struct vmpacket_descriptor *nxt;
/*
* Make sure all reads are done before we update the read index since nxt = __hv_pkt_iter_next(channel, pkt);
* the writer may start writing to the read area once the read index if (!nxt)
* is updated. hv_pkt_iter_close(channel);
*/
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
hv_signal_on_read(channel); return nxt;
} }
#define foreach_vmbus_pkt(pkt, channel) \
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
#endif /* _HYPERV_H */ #endif /* _HYPERV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment