Commit 84841384 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  virtio: fix race in enable_cb
  virtio: Enable netpoll interface for netconsole logging
  virtio: handle > 2 billion page balloon targets
  virtio: Fix sysfs bits to have proper block symlink
  virtio: Use spin_lock_irqsave/restore for virtio-pci
parents 3d10a15d 4265f161
...@@ -238,6 +238,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -238,6 +238,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->first_minor = index_to_minor(index); vblk->disk->first_minor = index_to_minor(index);
vblk->disk->private_data = vblk; vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops; vblk->disk->fops = &virtblk_fops;
vblk->disk->driverfs_dev = &vdev->dev;
index++; index++;
/* If barriers are supported, tell block layer that queue is ordered */ /* If barriers are supported, tell block layer that queue is ordered */
......
...@@ -203,8 +203,11 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -203,8 +203,11 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget) { if (received < budget) {
netif_rx_complete(vi->dev, napi); netif_rx_complete(vi->dev, napi);
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
&& netif_rx_reschedule(vi->dev, napi)) && napi_schedule_prep(napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq);
__netif_rx_schedule(vi->dev, napi);
goto again; goto again;
}
} }
return received; return received;
...@@ -278,10 +281,11 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -278,10 +281,11 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
pr_debug("%s: virtio not prepared to send\n", dev->name); pr_debug("%s: virtio not prepared to send\n", dev->name);
netif_stop_queue(dev); netif_stop_queue(dev);
/* Activate callback for using skbs: if this fails it /* Activate callback for using skbs: if this returns false it
* means some were used in the meantime. */ * means some were used in the meantime. */
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
printk("Unlikely: restart svq failed\n"); printk("Unlikely: restart svq race\n");
vi->svq->vq_ops->disable_cb(vi->svq);
netif_start_queue(dev); netif_start_queue(dev);
goto again; goto again;
} }
...@@ -294,6 +298,15 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -294,6 +298,15 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
napi_schedule(&vi->napi);
}
#endif
static int virtnet_open(struct net_device *dev) static int virtnet_open(struct net_device *dev)
{ {
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
...@@ -336,6 +349,9 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -336,6 +349,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->stop = virtnet_close; dev->stop = virtnet_close;
dev->hard_start_xmit = start_xmit; dev->hard_start_xmit = start_xmit;
dev->features = NETIF_F_HIGHDMA; dev->features = NETIF_F_HIGHDMA;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = virtnet_netpoll;
#endif
SET_NETDEV_DEV(dev, &vdev->dev); SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */ /* Do we support "hardware" checksums? */
......
...@@ -152,7 +152,7 @@ static void virtballoon_changed(struct virtio_device *vdev) ...@@ -152,7 +152,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
wake_up(&vb->config_change); wake_up(&vb->config_change);
} }
static inline int towards_target(struct virtio_balloon *vb) static inline s64 towards_target(struct virtio_balloon *vb)
{ {
u32 v; u32 v;
__virtio_config_val(vb->vdev, __virtio_config_val(vb->vdev,
...@@ -176,7 +176,7 @@ static int balloon(void *_vballoon) ...@@ -176,7 +176,7 @@ static int balloon(void *_vballoon)
set_freezable(); set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
int diff; s64 diff;
try_to_freeze(); try_to_freeze();
wait_event_interruptible(vb->config_change, wait_event_interruptible(vb->config_change,
......
...@@ -177,6 +177,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -177,6 +177,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_device *vp_dev = opaque;
struct virtio_pci_vq_info *info; struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
unsigned long flags;
u8 isr; u8 isr;
/* reading the ISR has the effect of also clearing it so it's very /* reading the ISR has the effect of also clearing it so it's very
...@@ -197,12 +198,12 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -197,12 +198,12 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
drv->config_changed(&vp_dev->vdev); drv->config_changed(&vp_dev->vdev);
} }
spin_lock(&vp_dev->lock); spin_lock_irqsave(&vp_dev->lock, flags);
list_for_each_entry(info, &vp_dev->virtqueues, node) { list_for_each_entry(info, &vp_dev->virtqueues, node) {
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
spin_unlock(&vp_dev->lock); spin_unlock_irqrestore(&vp_dev->lock, flags);
return ret; return ret;
} }
...@@ -214,6 +215,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, ...@@ -214,6 +215,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info; struct virtio_pci_vq_info *info;
struct virtqueue *vq; struct virtqueue *vq;
unsigned long flags;
u16 num; u16 num;
int err; int err;
...@@ -255,9 +257,9 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, ...@@ -255,9 +257,9 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info; vq->priv = info;
info->vq = vq; info->vq = vq;
spin_lock(&vp_dev->lock); spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues); list_add(&info->node, &vp_dev->virtqueues);
spin_unlock(&vp_dev->lock); spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq; return vq;
...@@ -274,10 +276,11 @@ static void vp_del_vq(struct virtqueue *vq) ...@@ -274,10 +276,11 @@ static void vp_del_vq(struct virtqueue *vq)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv; struct virtio_pci_vq_info *info = vq->priv;
unsigned long flags;
spin_lock(&vp_dev->lock); spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node); list_del(&info->node);
spin_unlock(&vp_dev->lock); spin_unlock_irqrestore(&vp_dev->lock, flags);
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
......
...@@ -232,7 +232,6 @@ static bool vring_enable_cb(struct virtqueue *_vq) ...@@ -232,7 +232,6 @@ static bool vring_enable_cb(struct virtqueue *_vq)
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
mb(); mb();
if (unlikely(more_used(vq))) { if (unlikely(more_used(vq))) {
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
END_USE(vq); END_USE(vq);
return false; return false;
} }
......
...@@ -43,8 +43,9 @@ struct virtqueue ...@@ -43,8 +43,9 @@ struct virtqueue
* vq: the struct virtqueue we're talking about. * vq: the struct virtqueue we're talking about.
* @enable_cb: restart callbacks after disable_cb. * @enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about. * vq: the struct virtqueue we're talking about.
* This returns "false" (and doesn't re-enable) if there are pending * This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to avoid a race. * buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
* *
* Locking rules are straightforward: the driver is responsible for * Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously. * locking. No two operations may be invoked simultaneously.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment