Commit a4688132 authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netback-synchronization'

Wei Liu says:

====================
xen-netback: synchronisation between core driver and netback

The zero-copy netback has far more interactions with core network driver than
the old copying backend. One significant thing is that netback now relies on
a callback from core driver to correctly release resources.

However correct synchronisation between core driver and netback is missing.
Currently netback relies on a loop to wait for core driver to release
resources. This is proven not enough and erroneous recently, partly due to code
structure, partly due to missing synchronisation. Short-live domains like
OpenMirage unikernels can easily trigger race in backend, rendering backend
unresponsive.

This patch series aims to slove this issue by introducing proper
synchronisation between core driver and netback.

Chagges in v4:
* avoid using wait queue
* remove dedicated loop for netif_napi_del
* remove unnecessary check on callback

Change in v3: improve commit message in patch 1

Change in v2: fix Zoltan's email address in commit message
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 68809958 b1252858
...@@ -165,6 +165,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ ...@@ -165,6 +165,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
u16 dealloc_ring[MAX_PENDING_REQS]; u16 dealloc_ring[MAX_PENDING_REQS];
struct task_struct *dealloc_task; struct task_struct *dealloc_task;
wait_queue_head_t dealloc_wq; wait_queue_head_t dealloc_wq;
atomic_t inflight_packets;
/* Use kthread for guest RX */ /* Use kthread for guest RX */
struct task_struct *task; struct task_struct *task;
...@@ -329,4 +330,8 @@ extern unsigned int xenvif_max_queues; ...@@ -329,4 +330,8 @@ extern unsigned int xenvif_max_queues;
extern struct dentry *xen_netback_dbg_root; extern struct dentry *xen_netback_dbg_root;
#endif #endif
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
#endif /* __XEN_NETBACK__COMMON_H__ */ #endif /* __XEN_NETBACK__COMMON_H__ */
...@@ -43,6 +43,23 @@ ...@@ -43,6 +43,23 @@
#define XENVIF_QUEUE_LENGTH 32 #define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64 #define XENVIF_NAPI_WEIGHT 64
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete.
*/
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb)
{
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
atomic_inc(&queue->inflight_packets);
}
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
}
static inline void xenvif_stop_queue(struct xenvif_queue *queue) static inline void xenvif_stop_queue(struct xenvif_queue *queue)
{ {
struct net_device *dev = queue->vif->dev; struct net_device *dev = queue->vif->dev;
...@@ -524,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) ...@@ -524,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
init_timer(&queue->rx_stalled); init_timer(&queue->rx_stalled);
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
return 0; return 0;
} }
...@@ -560,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, ...@@ -560,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
init_waitqueue_head(&queue->wq); init_waitqueue_head(&queue->wq);
init_waitqueue_head(&queue->dealloc_wq); init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
if (tx_evtchn == rx_evtchn) { if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */ /* feature-split-event-channels == 0 */
...@@ -614,6 +629,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, ...@@ -614,6 +629,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
wake_up_process(queue->task); wake_up_process(queue->task);
wake_up_process(queue->dealloc_task); wake_up_process(queue->dealloc_task);
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
return 0; return 0;
err_rx_unbind: err_rx_unbind:
...@@ -642,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif) ...@@ -642,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif)
rtnl_unlock(); rtnl_unlock();
} }
static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
unsigned int worst_case_skb_lifetime)
{
int i, unmap_timeout = 0;
for (i = 0; i < MAX_PENDING_REQS; ++i) {
if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
unmap_timeout++;
schedule_timeout(msecs_to_jiffies(1000));
if (unmap_timeout > worst_case_skb_lifetime &&
net_ratelimit())
netdev_err(queue->vif->dev,
"Page still granted! Index: %x\n",
i);
i = -1;
}
}
}
void xenvif_disconnect(struct xenvif *vif) void xenvif_disconnect(struct xenvif *vif)
{ {
struct xenvif_queue *queue = NULL; struct xenvif_queue *queue = NULL;
...@@ -672,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -672,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) { for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index]; queue = &vif->queues[queue_index];
netif_napi_del(&queue->napi);
if (queue->task) { if (queue->task) {
del_timer_sync(&queue->rx_stalled); del_timer_sync(&queue->rx_stalled);
kthread_stop(queue->task); kthread_stop(queue->task);
...@@ -704,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -704,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif)
void xenvif_deinit_queue(struct xenvif_queue *queue) void xenvif_deinit_queue(struct xenvif_queue *queue)
{ {
free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
netif_napi_del(&queue->napi);
} }
void xenvif_free(struct xenvif *vif) void xenvif_free(struct xenvif *vif)
...@@ -712,21 +712,11 @@ void xenvif_free(struct xenvif *vif) ...@@ -712,21 +712,11 @@ void xenvif_free(struct xenvif *vif)
struct xenvif_queue *queue = NULL; struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues; unsigned int num_queues = vif->num_queues;
unsigned int queue_index; unsigned int queue_index;
/* Here we want to avoid timeout messages if an skb can be legitimately
* stuck somewhere else. Realistically this could be an another vif's
* internal or QDisc queue. That another vif also has this
* rx_drain_timeout_msecs timeout, so give it time to drain out.
* Although if that other guest wakes up just before its timeout happens
* and takes only one skb from QDisc, it can hold onto other skbs for a
* longer period.
*/
unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
unregister_netdev(vif->dev); unregister_netdev(vif->dev);
for (queue_index = 0; queue_index < num_queues; ++queue_index) { for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index]; queue = &vif->queues[queue_index];
xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
xenvif_deinit_queue(queue); xenvif_deinit_queue(queue);
} }
......
...@@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s ...@@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
/* remove traces of mapped pages and frag_list */ /* remove traces of mapped pages and frag_list */
skb_frag_list_init(skb); skb_frag_list_init(skb);
uarg = skb_shinfo(skb)->destructor_arg; uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
uarg->callback(uarg, true); uarg->callback(uarg, true);
skb_shinfo(skb)->destructor_arg = NULL; skb_shinfo(skb)->destructor_arg = NULL;
skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; xenvif_skb_zerocopy_prepare(queue, nskb);
kfree_skb(nskb); kfree_skb(nskb);
return 0; return 0;
...@@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) ...@@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
if (net_ratelimit()) if (net_ratelimit())
netdev_err(queue->vif->dev, netdev_err(queue->vif->dev,
"Not enough memory to consolidate frag_list!\n"); "Not enough memory to consolidate frag_list!\n");
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; xenvif_skb_zerocopy_prepare(queue, skb);
kfree_skb(skb); kfree_skb(skb);
continue; continue;
} }
...@@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) ...@@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
"Can't setup checksum in net_tx_action\n"); "Can't setup checksum in net_tx_action\n");
/* We have to set this flag to trigger the callback */ /* We have to set this flag to trigger the callback */
if (skb_shinfo(skb)->destructor_arg) if (skb_shinfo(skb)->destructor_arg)
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; xenvif_skb_zerocopy_prepare(queue, skb);
kfree_skb(skb); kfree_skb(skb);
continue; continue;
} }
...@@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) ...@@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
* skb. E.g. the __pskb_pull_tail earlier can do such thing. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
*/ */
if (skb_shinfo(skb)->destructor_arg) { if (skb_shinfo(skb)->destructor_arg) {
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; xenvif_skb_zerocopy_prepare(queue, skb);
queue->stats.tx_zerocopy_sent++; queue->stats.tx_zerocopy_sent++;
} }
...@@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) ...@@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
queue->stats.tx_zerocopy_success++; queue->stats.tx_zerocopy_success++;
else else
queue->stats.tx_zerocopy_fail++; queue->stats.tx_zerocopy_fail++;
xenvif_skb_zerocopy_complete(queue);
} }
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
...@@ -2058,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data) ...@@ -2058,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data)
return 0; return 0;
} }
static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
{
/* Dealloc thread must remain running until all inflight
* packets complete.
*/
return kthread_should_stop() &&
!atomic_read(&queue->inflight_packets);
}
int xenvif_dealloc_kthread(void *data) int xenvif_dealloc_kthread(void *data)
{ {
struct xenvif_queue *queue = data; struct xenvif_queue *queue = data;
while (!kthread_should_stop()) { for (;;) {
wait_event_interruptible(queue->dealloc_wq, wait_event_interruptible(queue->dealloc_wq,
tx_dealloc_work_todo(queue) || tx_dealloc_work_todo(queue) ||
kthread_should_stop()); xenvif_dealloc_kthread_should_stop(queue));
if (kthread_should_stop()) if (xenvif_dealloc_kthread_should_stop(queue))
break; break;
xenvif_tx_dealloc_action(queue); xenvif_tx_dealloc_action(queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment