Commit a7e8ddd8 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc update from David Miller:
 "Not a lot of stuff this time around, mostly bug fixing:

   - Fix alignment of 32-bit crosscall datastructure on Leon, from
     Andreas Larsson.

   - Several fixes to the virtual disk driver on sparc64 by Dwight
     Engen, including handling resets of the service domain properly"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sunvdc: reconnect ldc after vds service domain restarts
  sparc/ldc: create separate ldc_unbind from ldc_free
  vio: create routines for inc,dec vio dring indexes
  sunvdc: fix module unload/reload
  sparc32, leon: Align ccall_info to prevent unaligned traps on crosscall
parents ad8f723a 76e74bbe
...@@ -61,6 +61,7 @@ void ldc_free(struct ldc_channel *lp); ...@@ -61,6 +61,7 @@ void ldc_free(struct ldc_channel *lp);
/* Register TX and RX queues of the link with the hypervisor. */ /* Register TX and RX queues of the link with the hypervisor. */
int ldc_bind(struct ldc_channel *lp); int ldc_bind(struct ldc_channel *lp);
void ldc_unbind(struct ldc_channel *lp);
/* For non-RAW protocols we need to complete a handshake before /* For non-RAW protocols we need to complete a handshake before
* communication can proceed. ldc_connect() does that, if the * communication can proceed. ldc_connect() does that, if the
......
...@@ -300,6 +300,21 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr, ...@@ -300,6 +300,21 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
((dr->prod - dr->cons) & (ring_size - 1)) - 1); ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
} }
static inline u32 vio_dring_next(struct vio_dring_state *dr, u32 index)
{
if (++index == dr->num_entries)
index = 0;
return index;
}
static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
{
if (index == 0)
return dr->num_entries - 1;
else
return index - 1;
}
#define VIO_MAX_TYPE_LEN 32 #define VIO_MAX_TYPE_LEN 32
#define VIO_MAX_COMPAT_LEN 64 #define VIO_MAX_COMPAT_LEN 64
......
...@@ -1222,11 +1222,12 @@ struct ldc_channel *ldc_alloc(unsigned long id, ...@@ -1222,11 +1222,12 @@ struct ldc_channel *ldc_alloc(unsigned long id,
} }
EXPORT_SYMBOL(ldc_alloc); EXPORT_SYMBOL(ldc_alloc);
void ldc_free(struct ldc_channel *lp) void ldc_unbind(struct ldc_channel *lp)
{ {
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp); free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp); free_irq(lp->cfg.tx_irq, lp);
lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
} }
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
...@@ -1240,10 +1241,15 @@ void ldc_free(struct ldc_channel *lp) ...@@ -1240,10 +1241,15 @@ void ldc_free(struct ldc_channel *lp)
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
} }
hlist_del(&lp->list); ldc_set_state(lp, LDC_STATE_INIT);
}
EXPORT_SYMBOL(ldc_unbind);
void ldc_free(struct ldc_channel *lp)
{
ldc_unbind(lp);
hlist_del(&lp->list);
kfree(lp->mssbuf); kfree(lp->mssbuf);
ldc_iommu_release(lp); ldc_iommu_release(lp);
kfree(lp); kfree(lp);
......
...@@ -368,7 +368,7 @@ static struct smp_funcall { ...@@ -368,7 +368,7 @@ static struct smp_funcall {
unsigned long arg5; unsigned long arg5;
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
} ccall_info; } ccall_info __attribute__((aligned(8)));
static DEFINE_SPINLOCK(cross_call_lock); static DEFINE_SPINLOCK(cross_call_lock);
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#define DRV_MODULE_NAME "sunvdc" #define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_VERSION "1.2"
#define DRV_MODULE_RELDATE "February 13, 2013" #define DRV_MODULE_RELDATE "November 24, 2014"
static char version[] = static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
...@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); ...@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04 #define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1 #define WAITING_FOR_ANY -1
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry { struct vdc_req_entry {
struct request *req; struct request *req;
}; };
...@@ -60,6 +62,10 @@ struct vdc_port { ...@@ -60,6 +62,10 @@ struct vdc_port {
u64 max_xfer_size; u64 max_xfer_size;
u32 vdisk_block_size; u32 vdisk_block_size;
u64 ldc_timeout;
struct timer_list ldc_reset_timer;
struct work_struct ldc_reset_work;
/* The server fills these in for us in the disk attribute /* The server fills these in for us in the disk attribute
* ACK packet. * ACK packet.
*/ */
...@@ -71,6 +77,10 @@ struct vdc_port { ...@@ -71,6 +77,10 @@ struct vdc_port {
char disk_name[32]; char disk_name[32];
}; };
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
static void vdc_ldc_reset_timer(unsigned long _arg);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{ {
return container_of(vio, struct vdc_port, vio); return container_of(vio, struct vdc_port, vio);
...@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = { ...@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
.ioctl = vdc_ioctl, .ioctl = vdc_ioctl,
}; };
static void vdc_blk_queue_start(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
/* restart blk queue when ring is half emptied. also called after
* handshake completes, so check for initial handshake before we've
* allocated a disk.
*/
if (port->disk && blk_queue_stopped(port->disk->queue) &&
vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
blk_start_queue(port->disk->queue);
}
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
{ {
if (vio->cmp && if (vio->cmp &&
...@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) ...@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
static void vdc_handshake_complete(struct vio_driver_state *vio) static void vdc_handshake_complete(struct vio_driver_state *vio)
{ {
struct vdc_port *port = to_vdc_port(vio);
del_timer(&port->ldc_reset_timer);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP); vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
vdc_blk_queue_start(port);
} }
static int vdc_handle_unknown(struct vdc_port *port, void *arg) static int vdc_handle_unknown(struct vdc_port *port, void *arg)
...@@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, ...@@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE; desc->hdr.state = VIO_DESC_FREE;
dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); dr->cons = vio_dring_next(dr, index);
req = rqe->req; req = rqe->req;
if (req == NULL) { if (req == NULL) {
...@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, ...@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size); __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
/* restart blk queue when ring is half emptied */ vdc_blk_queue_start(port);
if (blk_queue_stopped(port->disk->queue) &&
vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
blk_start_queue(port->disk->queue);
} }
static int vdc_ack(struct vdc_port *port, void *msgbuf) static int vdc_ack(struct vdc_port *port, void *msgbuf)
...@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event) ...@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event)
spin_lock_irqsave(&vio->lock, flags); spin_lock_irqsave(&vio->lock, flags);
if (unlikely(event == LDC_EVENT_RESET || if (unlikely(event == LDC_EVENT_RESET)) {
event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event); vio_link_state_change(vio, event);
spin_unlock_irqrestore(&vio->lock, flags); queue_work(sunvdc_wq, &port->ldc_reset_work);
return; goto out;
}
if (unlikely(event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
goto out;
} }
if (unlikely(event != LDC_EVENT_DATA_READY)) { if (unlikely(event != LDC_EVENT_DATA_READY)) {
printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); pr_warn(PFX "Unexpected LDC event %d\n", event);
spin_unlock_irqrestore(&vio->lock, flags); goto out;
return;
} }
err = 0; err = 0;
...@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event) ...@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event)
} }
if (err < 0) if (err < 0)
vdc_finish(&port->vio, err, WAITING_FOR_ANY); vdc_finish(&port->vio, err, WAITING_FOR_ANY);
out:
spin_unlock_irqrestore(&vio->lock, flags); spin_unlock_irqrestore(&vio->lock, flags);
} }
...@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port) ...@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
delay = 128; delay = 128;
} while (err == -EAGAIN); } while (err == -EAGAIN);
if (err == -ENOTCONN)
vdc_ldc_reset(port);
return err; return err;
} }
...@@ -472,7 +504,7 @@ static int __send_request(struct request *req) ...@@ -472,7 +504,7 @@ static int __send_request(struct request *req)
printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
} else { } else {
port->req_id++; port->req_id++;
dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); dr->prod = vio_dring_next(dr, dr->prod);
} }
return err; return err;
...@@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) ...@@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
err = __vdc_tx_trigger(port); err = __vdc_tx_trigger(port);
if (err >= 0) { if (err >= 0) {
port->req_id++; port->req_id++;
dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); dr->prod = vio_dring_next(dr, dr->prod);
spin_unlock_irqrestore(&port->vio.lock, flags); spin_unlock_irqrestore(&port->vio.lock, flags);
wait_for_completion(&comp.com); wait_for_completion(&comp.com);
...@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port) ...@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
} }
} }
static int probe_disk(struct vdc_port *port) static int vdc_port_up(struct vdc_port *port)
{ {
struct vio_completion comp; struct vio_completion comp;
struct request_queue *q;
struct gendisk *g;
int err;
init_completion(&comp.com); init_completion(&comp.com);
comp.err = 0; comp.err = 0;
...@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port) ...@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port)
port->vio.cmp = &comp; port->vio.cmp = &comp;
vio_port_up(&port->vio); vio_port_up(&port->vio);
wait_for_completion(&comp.com); wait_for_completion(&comp.com);
if (comp.err)
return comp.err; return comp.err;
}
static void vdc_port_down(struct vdc_port *port)
{
ldc_disconnect(port->vio.lp);
ldc_unbind(port->vio.lp);
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
}
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
struct gendisk *g;
int err;
err = vdc_port_up(port);
if (err)
return err;
if (vdc_version_supported(port, 1, 1)) { if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't /* vdisk_size should be set during the handshake, if it wasn't
...@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct mdesc_handle *hp; struct mdesc_handle *hp;
struct vdc_port *port; struct vdc_port *port;
int err; int err;
const u64 *ldc_timeout;
print_version(); print_version();
...@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
port->vdisk_size = -1; port->vdisk_size = -1;
/* Actual wall time may be double due to do_generic_file_read() doing
* a readahead I/O first, and once that fails it will try to read a
* single page.
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
(unsigned long)port);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK, err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions), vdc_versions, ARRAY_SIZE(vdc_versions),
&vdc_vio_ops, port->disk_name); &vdc_vio_ops, port->disk_name);
...@@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev) ...@@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev); struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) { if (port) {
unsigned long flags;
spin_lock_irqsave(&port->vio.lock, flags);
blk_stop_queue(port->disk->queue);
spin_unlock_irqrestore(&port->vio.lock, flags);
flush_work(&port->ldc_reset_work);
del_timer_sync(&port->ldc_reset_timer);
del_timer_sync(&port->vio.timer); del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
blk_cleanup_queue(port->disk->queue);
put_disk(port->disk);
port->disk = NULL;
vdc_free_tx_ring(port); vdc_free_tx_ring(port);
vio_ldc_free(&port->vio); vio_ldc_free(&port->vio);
...@@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev) ...@@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
return 0; return 0;
} }
static void vdc_requeue_inflight(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
u32 idx;
for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
struct vdc_req_entry *rqe = &port->rq_arr[idx];
struct request *req;
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
dr->cons = vio_dring_next(dr, idx);
req = rqe->req;
if (req == NULL) {
vdc_end_special(port, desc);
continue;
}
rqe->req = NULL;
blk_requeue_request(port->disk->queue, req);
}
}
static void vdc_queue_drain(struct vdc_port *port)
{
struct request *req;
while ((req = blk_fetch_request(port->disk->queue)) != NULL)
__blk_end_request_all(req, -EIO);
}
static void vdc_ldc_reset_timer(unsigned long _arg)
{
struct vdc_port *port = (struct vdc_port *) _arg;
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
spin_lock_irqsave(&vio->lock, flags);
if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
port->disk_name, port->ldc_timeout);
vdc_queue_drain(port);
vdc_blk_queue_start(port);
}
spin_unlock_irqrestore(&vio->lock, flags);
}
static void vdc_ldc_reset_work(struct work_struct *work)
{
struct vdc_port *port;
struct vio_driver_state *vio;
unsigned long flags;
port = container_of(work, struct vdc_port, ldc_reset_work);
vio = &port->vio;
spin_lock_irqsave(&vio->lock, flags);
vdc_ldc_reset(port);
spin_unlock_irqrestore(&vio->lock, flags);
}
static void vdc_ldc_reset(struct vdc_port *port)
{
int err;
assert_spin_locked(&port->vio.lock);
pr_warn(PFX "%s ldc link reset\n", port->disk_name);
blk_stop_queue(port->disk->queue);
vdc_requeue_inflight(port);
vdc_port_down(port);
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
if (err) {
pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
return;
}
err = vdc_alloc_tx_ring(port);
if (err) {
pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
goto err_free_ldc;
}
if (port->ldc_timeout)
mod_timer(&port->ldc_reset_timer,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
err_free_ldc:
vio_ldc_free(&port->vio);
}
static const struct vio_device_id vdc_port_match[] = { static const struct vio_device_id vdc_port_match[] = {
{ {
.type = "vdc-port", .type = "vdc-port",
...@@ -927,9 +1093,13 @@ static int __init vdc_init(void) ...@@ -927,9 +1093,13 @@ static int __init vdc_init(void)
{ {
int err; int err;
sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
if (!sunvdc_wq)
return -ENOMEM;
err = register_blkdev(0, VDCBLK_NAME); err = register_blkdev(0, VDCBLK_NAME);
if (err < 0) if (err < 0)
goto out_err; goto out_free_wq;
vdc_major = err; vdc_major = err;
...@@ -943,7 +1113,8 @@ static int __init vdc_init(void) ...@@ -943,7 +1113,8 @@ static int __init vdc_init(void)
unregister_blkdev(vdc_major, VDCBLK_NAME); unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0; vdc_major = 0;
out_err: out_free_wq:
destroy_workqueue(sunvdc_wq);
return err; return err;
} }
...@@ -951,6 +1122,7 @@ static void __exit vdc_exit(void) ...@@ -951,6 +1122,7 @@ static void __exit vdc_exit(void)
{ {
vio_unregister_driver(&vdc_port_driver); vio_unregister_driver(&vdc_port_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME); unregister_blkdev(vdc_major, VDCBLK_NAME);
destroy_workqueue(sunvdc_wq);
} }
module_init(vdc_init); module_init(vdc_init);
......
...@@ -466,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, ...@@ -466,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
return err; return err;
} }
static u32 next_idx(u32 idx, struct vio_dring_state *dr)
{
if (++idx == dr->num_entries)
idx = 0;
return idx;
}
static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
{
if (idx == 0)
idx = dr->num_entries - 1;
else
idx--;
return idx;
}
static struct vio_net_desc *get_rx_desc(struct vnet_port *port, static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
struct vio_dring_state *dr, struct vio_dring_state *dr,
u32 index) u32 index)
...@@ -556,7 +539,8 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, ...@@ -556,7 +539,8 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
int ack_start = -1, ack_end = -1; int ack_start = -1, ack_end = -1;
bool send_ack = true; bool send_ack = true;
end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); end = (end == (u32) -1) ? vio_dring_prev(dr, start)
: vio_dring_next(dr, end);
viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
...@@ -570,7 +554,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, ...@@ -570,7 +554,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
if (ack_start == -1) if (ack_start == -1)
ack_start = start; ack_start = start;
ack_end = start; ack_end = start;
start = next_idx(start, dr); start = vio_dring_next(dr, start);
if (ack && start != end) { if (ack && start != end) {
err = vnet_send_ack(port, dr, ack_start, ack_end, err = vnet_send_ack(port, dr, ack_start, ack_end,
VIO_DRING_ACTIVE); VIO_DRING_ACTIVE);
...@@ -584,7 +568,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, ...@@ -584,7 +568,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
} }
} }
if (unlikely(ack_start == -1)) if (unlikely(ack_start == -1))
ack_start = ack_end = prev_idx(start, dr); ack_start = ack_end = vio_dring_prev(dr, start);
if (send_ack) { if (send_ack) {
port->napi_resume = false; port->napi_resume = false;
return vnet_send_ack(port, dr, ack_start, ack_end, return vnet_send_ack(port, dr, ack_start, ack_end,
...@@ -633,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end) ...@@ -633,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
found = 1; found = 1;
break; break;
} }
idx = next_idx(idx, dr); idx = vio_dring_next(dr, idx);
} }
return found; return found;
} }
...@@ -663,7 +647,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) ...@@ -663,7 +647,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
/* sync for race conditions with vnet_start_xmit() and tell xmit it /* sync for race conditions with vnet_start_xmit() and tell xmit it
* is time to send a trigger. * is time to send a trigger.
*/ */
dr->cons = next_idx(end, dr); dr->cons = vio_dring_next(dr, end);
desc = vio_dring_entry(dr, dr->cons); desc = vio_dring_entry(dr, dr->cons);
if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
/* vnet_start_xmit() just populated this dring but missed /* vnet_start_xmit() just populated this dring but missed
...@@ -784,7 +768,7 @@ static int vnet_event_napi(struct vnet_port *port, int budget) ...@@ -784,7 +768,7 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
pkt->tag.stype = VIO_SUBTYPE_INFO; pkt->tag.stype = VIO_SUBTYPE_INFO;
pkt->tag.stype_env = VIO_DRING_DATA; pkt->tag.stype_env = VIO_DRING_DATA;
pkt->seq = dr->rcv_nxt; pkt->seq = dr->rcv_nxt;
pkt->start_idx = next_idx(port->napi_stop_idx, dr); pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
pkt->end_idx = -1; pkt->end_idx = -1;
goto napi_resume; goto napi_resume;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment