Commit 7376419a authored by Wei Liu's avatar Wei Liu Committed by David S. Miller

xen-netback: rename functions

As we move to 1:1 model and melt xen_netbk and xenvif together, it would
be better to use single prefix for all functions in xen-netback.
Signed-off-by: default avatarWei Liu <wei.liu2@citrix.com>
Acked-by: default avatarIan Campbell <ian.campbell@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b3f980bd
...@@ -190,21 +190,21 @@ void xenvif_xenbus_fini(void); ...@@ -190,21 +190,21 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif); int xenvif_schedulable(struct xenvif *vif);
int xen_netbk_rx_ring_full(struct xenvif *vif); int xenvif_rx_ring_full(struct xenvif *vif);
int xen_netbk_must_stop_queue(struct xenvif *vif); int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */ /* (Un)Map communication rings. */
void xen_netbk_unmap_frontend_rings(struct xenvif *vif); void xenvif_unmap_frontend_rings(struct xenvif *vif);
int xen_netbk_map_frontend_rings(struct xenvif *vif, int xenvif_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref, grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref); grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */ /* Check for SKBs from frontend and schedule backend processing */
void xen_netbk_check_rx_xenvif(struct xenvif *vif); void xenvif_check_rx_xenvif(struct xenvif *vif);
/* Queue an SKB for transmission to the frontend */ /* Queue an SKB for transmission to the frontend */
void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */ /* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif); void xenvif_notify_tx_completion(struct xenvif *vif);
...@@ -212,12 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif); ...@@ -212,12 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif);
void xenvif_carrier_off(struct xenvif *vif); void xenvif_carrier_off(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */ /* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
int xen_netbk_tx_action(struct xenvif *vif, int budget); int xenvif_tx_action(struct xenvif *vif, int budget);
void xen_netbk_rx_action(struct xenvif *vif); void xenvif_rx_action(struct xenvif *vif);
int xen_netbk_kthread(void *data); int xenvif_kthread(void *data);
extern bool separate_tx_rx_irq; extern bool separate_tx_rx_irq;
......
...@@ -48,7 +48,7 @@ int xenvif_schedulable(struct xenvif *vif) ...@@ -48,7 +48,7 @@ int xenvif_schedulable(struct xenvif *vif)
static int xenvif_rx_schedulable(struct xenvif *vif) static int xenvif_rx_schedulable(struct xenvif *vif)
{ {
return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
} }
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
...@@ -66,7 +66,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) ...@@ -66,7 +66,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
struct xenvif *vif = container_of(napi, struct xenvif, napi); struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done; int work_done;
work_done = xen_netbk_tx_action(vif, budget); work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) { if (work_done < budget) {
int more_to_do = 0; int more_to_do = 0;
...@@ -133,12 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -133,12 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop; goto drop;
/* Reserve ring slots for the worst-case number of fragments. */ /* Reserve ring slots for the worst-case number of fragments. */
vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
if (vif->can_queue && xen_netbk_must_stop_queue(vif)) if (vif->can_queue && xenvif_must_stop_queue(vif))
netif_stop_queue(dev); netif_stop_queue(dev);
xen_netbk_queue_tx_skb(vif, skb); xenvif_queue_tx_skb(vif, skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -166,7 +166,7 @@ static void xenvif_up(struct xenvif *vif) ...@@ -166,7 +166,7 @@ static void xenvif_up(struct xenvif *vif)
enable_irq(vif->tx_irq); enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq) if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq); enable_irq(vif->rx_irq);
xen_netbk_check_rx_xenvif(vif); xenvif_check_rx_xenvif(vif);
} }
static void xenvif_down(struct xenvif *vif) static void xenvif_down(struct xenvif *vif)
...@@ -368,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -368,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0) if (err < 0)
goto err; goto err;
...@@ -405,7 +405,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -405,7 +405,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
} }
init_waitqueue_head(&vif->wq); init_waitqueue_head(&vif->wq);
vif->task = kthread_create(xen_netbk_kthread, vif->task = kthread_create(xenvif_kthread,
(void *)vif, vif->dev->name); (void *)vif, vif->dev->name);
if (IS_ERR(vif->task)) { if (IS_ERR(vif->task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name); pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
...@@ -433,7 +433,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -433,7 +433,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unbind_from_irqhandler(vif->tx_irq, vif); unbind_from_irqhandler(vif->tx_irq, vif);
vif->tx_irq = 0; vif->tx_irq = 0;
err_unmap: err_unmap:
xen_netbk_unmap_frontend_rings(vif); xenvif_unmap_frontend_rings(vif);
err: err:
module_put(THIS_MODULE); module_put(THIS_MODULE);
return err; return err;
...@@ -481,7 +481,7 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -481,7 +481,7 @@ void xenvif_disconnect(struct xenvif *vif)
unregister_netdev(vif->dev); unregister_netdev(vif->dev);
xen_netbk_unmap_frontend_rings(vif); xenvif_unmap_frontend_rings(vif);
free_netdev(vif->dev); free_netdev(vif->dev);
......
...@@ -80,8 +80,9 @@ static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) ...@@ -80,8 +80,9 @@ static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
} }
static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status); u8 status);
static void make_tx_response(struct xenvif *vif, static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
s8 st); s8 st);
...@@ -150,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif) ...@@ -150,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif)
return max; return max;
} }
int xen_netbk_rx_ring_full(struct xenvif *vif) int xenvif_rx_ring_full(struct xenvif *vif)
{ {
RING_IDX peek = vif->rx_req_cons_peek; RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif); RING_IDX needed = max_required_rx_slots(vif);
...@@ -159,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif) ...@@ -159,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
} }
int xen_netbk_must_stop_queue(struct xenvif *vif) int xenvif_must_stop_queue(struct xenvif *vif)
{ {
if (!xen_netbk_rx_ring_full(vif)) if (!xenvif_rx_ring_full(vif))
return 0; return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek + vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif); max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */ mb(); /* request notification /then/ check the queue */
return xen_netbk_rx_ring_full(vif); return xenvif_rx_ring_full(vif);
} }
/* /*
...@@ -214,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) ...@@ -214,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
/* /*
* Figure out how many ring slots we're going to need to send @skb to * Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of * the guest. This function is essentially a dry run of
* netbk_gop_frag_copy. * xenvif_gop_frag_copy.
*/ */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{ {
unsigned int count; unsigned int count;
int i, copy_off; int i, copy_off;
...@@ -296,7 +297,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, ...@@ -296,7 +297,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
* Set up the grant operations for this fragment. If it's a flipping * Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here. * interface, we also set up the unmap request from here.
*/ */
static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo, struct netrx_pending_operations *npo,
struct page *page, unsigned long size, struct page *page, unsigned long size,
unsigned long offset, int *head) unsigned long offset, int *head)
...@@ -382,7 +383,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -382,7 +383,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
* zero GSO descriptors (for non-GSO packets) or one descriptor (for * zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO). * frontend-side LRO).
*/ */
static int netbk_gop_skb(struct sk_buff *skb, static int xenvif_gop_skb(struct sk_buff *skb,
struct netrx_pending_operations *npo) struct netrx_pending_operations *npo)
{ {
struct xenvif *vif = netdev_priv(skb->dev); struct xenvif *vif = netdev_priv(skb->dev);
...@@ -426,13 +427,13 @@ static int netbk_gop_skb(struct sk_buff *skb, ...@@ -426,13 +427,13 @@ static int netbk_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb)) if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data; len = skb_tail_pointer(skb) - data;
netbk_gop_frag_copy(vif, skb, npo, xenvif_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head); virt_to_page(data), len, offset, &head);
data += len; data += len;
} }
for (i = 0; i < nr_frags; i++) { for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo, xenvif_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].page_offset,
...@@ -443,12 +444,12 @@ static int netbk_gop_skb(struct sk_buff *skb, ...@@ -443,12 +444,12 @@ static int netbk_gop_skb(struct sk_buff *skb,
} }
/* /*
* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
* used to set up the operations on the top of * used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that * netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them. * they didn't give any errors and advance over them.
*/ */
static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
struct netrx_pending_operations *npo) struct netrx_pending_operations *npo)
{ {
struct gnttab_copy *copy_op; struct gnttab_copy *copy_op;
...@@ -468,7 +469,7 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, ...@@ -468,7 +469,7 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
return status; return status;
} }
static void netbk_add_frag_responses(struct xenvif *vif, int status, static void xenvif_add_frag_responses(struct xenvif *vif, int status,
struct xenvif_rx_meta *meta, struct xenvif_rx_meta *meta,
int nr_meta_slots) int nr_meta_slots)
{ {
...@@ -498,12 +499,12 @@ struct skb_cb_overlay { ...@@ -498,12 +499,12 @@ struct skb_cb_overlay {
int meta_slots_used; int meta_slots_used;
}; };
static void xen_netbk_kick_thread(struct xenvif *vif) static void xenvif_kick_thread(struct xenvif *vif)
{ {
wake_up(&vif->wq); wake_up(&vif->wq);
} }
void xen_netbk_rx_action(struct xenvif *vif) void xenvif_rx_action(struct xenvif *vif)
{ {
s8 status; s8 status;
u16 flags; u16 flags;
...@@ -532,7 +533,7 @@ void xen_netbk_rx_action(struct xenvif *vif) ...@@ -532,7 +533,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb; sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = netbk_gop_skb(skb, &npo); sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
count += nr_frags + 1; count += nr_frags + 1;
...@@ -575,7 +576,7 @@ void xen_netbk_rx_action(struct xenvif *vif) ...@@ -575,7 +576,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++; vif->dev->stats.tx_packets++;
status = netbk_check_gop(vif, sco->meta_slots_used, &npo); status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1) if (sco->meta_slots_used == 1)
flags = 0; flags = 0;
...@@ -611,7 +612,7 @@ void xen_netbk_rx_action(struct xenvif *vif) ...@@ -611,7 +612,7 @@ void xen_netbk_rx_action(struct xenvif *vif)
gso->flags = 0; gso->flags = 0;
} }
netbk_add_frag_responses(vif, status, xenvif_add_frag_responses(vif, status,
vif->meta + npo.meta_cons + 1, vif->meta + npo.meta_cons + 1,
sco->meta_slots_used); sco->meta_slots_used);
...@@ -631,17 +632,17 @@ void xen_netbk_rx_action(struct xenvif *vif) ...@@ -631,17 +632,17 @@ void xen_netbk_rx_action(struct xenvif *vif)
/* More work to do? */ /* More work to do? */
if (!skb_queue_empty(&vif->rx_queue)) if (!skb_queue_empty(&vif->rx_queue))
xen_netbk_kick_thread(vif); xenvif_kick_thread(vif);
} }
void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{ {
skb_queue_tail(&vif->rx_queue, skb); skb_queue_tail(&vif->rx_queue, skb);
xen_netbk_kick_thread(vif); xenvif_kick_thread(vif);
} }
void xen_netbk_check_rx_xenvif(struct xenvif *vif) void xenvif_check_rx_xenvif(struct xenvif *vif)
{ {
int more_to_do; int more_to_do;
...@@ -675,10 +676,10 @@ static void tx_credit_callback(unsigned long data) ...@@ -675,10 +676,10 @@ static void tx_credit_callback(unsigned long data)
{ {
struct xenvif *vif = (struct xenvif *)data; struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif); tx_add_credit(vif);
xen_netbk_check_rx_xenvif(vif); xenvif_check_rx_xenvif(vif);
} }
static void netbk_tx_err(struct xenvif *vif, static void xenvif_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end) struct xen_netif_tx_request *txp, RING_IDX end)
{ {
RING_IDX cons = vif->tx.req_cons; RING_IDX cons = vif->tx.req_cons;
...@@ -692,13 +693,13 @@ static void netbk_tx_err(struct xenvif *vif, ...@@ -692,13 +693,13 @@ static void netbk_tx_err(struct xenvif *vif,
vif->tx.req_cons = cons; vif->tx.req_cons = cons;
} }
static void netbk_fatal_tx_err(struct xenvif *vif) static void xenvif_fatal_tx_err(struct xenvif *vif)
{ {
netdev_err(vif->dev, "fatal error; disabling device\n"); netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif); xenvif_carrier_off(vif);
} }
static int netbk_count_requests(struct xenvif *vif, static int xenvif_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first, struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
int work_to_do) int work_to_do)
...@@ -718,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -718,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev, netdev_err(vif->dev,
"Asked for %d slots but exceeds this limit\n", "Asked for %d slots but exceeds this limit\n",
work_to_do); work_to_do);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -ENODATA; return -ENODATA;
} }
...@@ -729,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -729,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev, netdev_err(vif->dev,
"Malicious frontend using %d slots, threshold %u\n", "Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots); slots, fatal_skb_slots);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -E2BIG; return -E2BIG;
} }
...@@ -777,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -777,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif,
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size); txp->offset, txp->size);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
...@@ -789,14 +790,14 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -789,14 +790,14 @@ static int netbk_count_requests(struct xenvif *vif,
} while (more_data); } while (more_data);
if (drop_err) { if (drop_err) {
netbk_tx_err(vif, first, cons + slots); xenvif_tx_err(vif, first, cons + slots);
return drop_err; return drop_err;
} }
return slots; return slots;
} }
static struct page *xen_netbk_alloc_page(struct xenvif *vif, static struct page *xenvif_alloc_page(struct xenvif *vif,
u16 pending_idx) u16 pending_idx)
{ {
struct page *page; struct page *page;
...@@ -809,7 +810,7 @@ static struct page *xen_netbk_alloc_page(struct xenvif *vif, ...@@ -809,7 +810,7 @@ static struct page *xen_netbk_alloc_page(struct xenvif *vif,
return page; return page;
} }
static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
struct sk_buff *skb, struct sk_buff *skb,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
struct gnttab_copy *gop) struct gnttab_copy *gop)
...@@ -835,7 +836,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, ...@@ -835,7 +836,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
/* Coalesce tx requests, at this point the packet passed in /* Coalesce tx requests, at this point the packet passed in
* should be <= 64K. Any packets larger than 64K have been * should be <= 64K. Any packets larger than 64K have been
* handled in netbk_count_requests(). * handled in xenvif_count_requests().
*/ */
for (shinfo->nr_frags = slot = start; slot < nr_slots; for (shinfo->nr_frags = slot = start; slot < nr_slots;
shinfo->nr_frags++) { shinfo->nr_frags++) {
...@@ -918,18 +919,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, ...@@ -918,18 +919,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif,
err: err:
/* Unwind, freeing all pages and sending error responses. */ /* Unwind, freeing all pages and sending error responses. */
while (shinfo->nr_frags-- > start) { while (shinfo->nr_frags-- > start) {
xen_netbk_idx_release(vif, xenvif_idx_release(vif,
frag_get_pending_idx(&frags[shinfo->nr_frags]), frag_get_pending_idx(&frags[shinfo->nr_frags]),
XEN_NETIF_RSP_ERROR); XEN_NETIF_RSP_ERROR);
} }
/* The head too, if necessary. */ /* The head too, if necessary. */
if (start) if (start)
xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
return NULL; return NULL;
} }
static int xen_netbk_tx_check_gop(struct xenvif *vif, static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb, struct sk_buff *skb,
struct gnttab_copy **gopp) struct gnttab_copy **gopp)
{ {
...@@ -944,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, ...@@ -944,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
/* Check status of header. */ /* Check status of header. */
err = gop->status; err = gop->status;
if (unlikely(err)) if (unlikely(err))
xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */ /* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
...@@ -968,13 +969,13 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, ...@@ -968,13 +969,13 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
if (likely(!newerr)) { if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */ /* Had a previous error? Invalidate this fragment. */
if (unlikely(err)) if (unlikely(err))
xen_netbk_idx_release(vif, pending_idx, xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_OKAY); XEN_NETIF_RSP_OKAY);
continue; continue;
} }
/* Error on this fragment: respond to client with an error. */ /* Error on this fragment: respond to client with an error. */
xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */ /* Not the first error? Preceding frags already invalidated. */
if (err) if (err)
...@@ -982,10 +983,10 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, ...@@ -982,10 +983,10 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
/* First error: invalidate header and preceding fragments. */ /* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data); pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) { for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]); pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(vif, pending_idx, xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_OKAY); XEN_NETIF_RSP_OKAY);
} }
...@@ -997,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, ...@@ -997,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif,
return err; return err;
} }
static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags; int nr_frags = shinfo->nr_frags;
...@@ -1018,13 +1019,13 @@ static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) ...@@ -1018,13 +1019,13 @@ static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->data_len += txp->size; skb->data_len += txp->size;
skb->truesize += txp->size; skb->truesize += txp->size;
/* Take an extra reference to offset xen_netbk_idx_release */ /* Take an extra reference to offset xenvif_idx_release */
get_page(vif->mmap_pages[pending_idx]); get_page(vif->mmap_pages[pending_idx]);
xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
} }
} }
static int xen_netbk_get_extras(struct xenvif *vif, static int xenvif_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras, struct xen_netif_extra_info *extras,
int work_to_do) int work_to_do)
{ {
...@@ -1034,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, ...@@ -1034,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do { do {
if (unlikely(work_to_do-- <= 0)) { if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n"); netdev_err(vif->dev, "Missing extra info\n");
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -EBADR; return -EBADR;
} }
...@@ -1045,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, ...@@ -1045,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
vif->tx.req_cons = ++cons; vif->tx.req_cons = ++cons;
netdev_err(vif->dev, netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type); "Invalid extra type: %d\n", extra.type);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
...@@ -1056,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif, ...@@ -1056,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif,
return work_to_do; return work_to_do;
} }
static int netbk_set_skb_gso(struct xenvif *vif, static int xenvif_set_skb_gso(struct xenvif *vif,
struct sk_buff *skb, struct sk_buff *skb,
struct xen_netif_extra_info *gso) struct xen_netif_extra_info *gso)
{ {
if (!gso->u.gso.size) { if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n"); netdev_err(vif->dev, "GSO size must not be zero.\n");
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
/* Currently only TCPv4 S.O. is supported. */ /* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
...@@ -1180,7 +1181,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) ...@@ -1180,7 +1181,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false; return false;
} }
static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) static unsigned xenvif_tx_build_gops(struct xenvif *vif)
{ {
struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1205,7 +1206,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1205,7 +1206,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
"req_prod %d, req_cons %d, size %ld\n", "req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons, vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE); XEN_NETIF_TX_RING_SIZE);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
continue; continue;
} }
...@@ -1229,14 +1230,14 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1229,14 +1230,14 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
memset(extras, 0, sizeof(extras)); memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) { if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xen_netbk_get_extras(vif, extras, work_to_do = xenvif_get_extras(vif, extras,
work_to_do); work_to_do);
idx = vif->tx.req_cons; idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0)) if (unlikely(work_to_do < 0))
break; break;
} }
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
break; break;
...@@ -1245,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1245,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
if (unlikely(txreq.size < ETH_HLEN)) { if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev, netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size); "Bad packet size: %d\n", txreq.size);
netbk_tx_err(vif, &txreq, idx); xenvif_tx_err(vif, &txreq, idx);
break; break;
} }
...@@ -1255,7 +1256,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1255,7 +1256,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
"txreq.offset: %x, size: %u, end: %lu\n", "txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size, txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size); (txreq.offset&~PAGE_MASK) + txreq.size);
netbk_fatal_tx_err(vif); xenvif_fatal_tx_err(vif);
break; break;
} }
...@@ -1271,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1271,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
if (unlikely(skb == NULL)) { if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev, netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n"); "Can't allocate a skb in start_xmit.\n");
netbk_tx_err(vif, &txreq, idx); xenvif_tx_err(vif, &txreq, idx);
break; break;
} }
...@@ -1282,18 +1283,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1282,18 +1283,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
struct xen_netif_extra_info *gso; struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) { if (xenvif_set_skb_gso(vif, skb, gso)) {
/* Failure in netbk_set_skb_gso is fatal. */ /* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb); kfree_skb(skb);
break; break;
} }
} }
/* XXX could copy straight to head */ /* XXX could copy straight to head */
page = xen_netbk_alloc_page(vif, pending_idx); page = xenvif_alloc_page(vif, pending_idx);
if (!page) { if (!page) {
kfree_skb(skb); kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx); xenvif_tx_err(vif, &txreq, idx);
break; break;
} }
...@@ -1329,10 +1330,10 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1329,10 +1330,10 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
vif->pending_cons++; vif->pending_cons++;
request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop); request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
if (request_gop == NULL) { if (request_gop == NULL) {
kfree_skb(skb); kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx); xenvif_tx_err(vif, &txreq, idx);
break; break;
} }
gop = request_gop; gop = request_gop;
...@@ -1349,7 +1350,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) ...@@ -1349,7 +1350,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif)
} }
static int xen_netbk_tx_submit(struct xenvif *vif, int budget) static int xenvif_tx_submit(struct xenvif *vif, int budget)
{ {
struct gnttab_copy *gop = vif->tx_copy_ops; struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1365,7 +1366,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) ...@@ -1365,7 +1366,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
txp = &vif->pending_tx_info[pending_idx].req; txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */ /* Check the remap error code. */
if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) { if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n"); netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb); kfree_skb(skb);
...@@ -1382,7 +1383,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) ...@@ -1382,7 +1383,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
txp->size -= data_len; txp->size -= data_len;
} else { } else {
/* Schedule a response immediately. */ /* Schedule a response immediately. */
xen_netbk_idx_release(vif, pending_idx, xenvif_idx_release(vif, pending_idx,
XEN_NETIF_RSP_OKAY); XEN_NETIF_RSP_OKAY);
} }
...@@ -1391,7 +1392,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) ...@@ -1391,7 +1392,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
else if (txp->flags & XEN_NETTXF_data_validated) else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
xen_netbk_fill_frags(vif, skb); xenvif_fill_frags(vif, skb);
/* /*
* If the initial fragment was < PKT_PROT_LEN then * If the initial fragment was < PKT_PROT_LEN then
...@@ -1428,7 +1429,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) ...@@ -1428,7 +1429,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget)
} }
/* Called after netfront has transmitted */ /* Called after netfront has transmitted */
int xen_netbk_tx_action(struct xenvif *vif, int budget) int xenvif_tx_action(struct xenvif *vif, int budget)
{ {
unsigned nr_gops; unsigned nr_gops;
int work_done; int work_done;
...@@ -1436,19 +1437,19 @@ int xen_netbk_tx_action(struct xenvif *vif, int budget) ...@@ -1436,19 +1437,19 @@ int xen_netbk_tx_action(struct xenvif *vif, int budget)
if (unlikely(!tx_work_todo(vif))) if (unlikely(!tx_work_todo(vif)))
return 0; return 0;
nr_gops = xen_netbk_tx_build_gops(vif); nr_gops = xenvif_tx_build_gops(vif);
if (nr_gops == 0) if (nr_gops == 0)
return 0; return 0;
gnttab_batch_copy(vif->tx_copy_ops, nr_gops); gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
work_done = xen_netbk_tx_submit(vif, nr_gops); work_done = xenvif_tx_submit(vif, nr_gops);
return work_done; return work_done;
} }
static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status) u8 status)
{ {
struct pending_tx_info *pending_tx_info; struct pending_tx_info *pending_tx_info;
...@@ -1554,7 +1555,7 @@ static inline int tx_work_todo(struct xenvif *vif) ...@@ -1554,7 +1555,7 @@ static inline int tx_work_todo(struct xenvif *vif)
return 0; return 0;
} }
void xen_netbk_unmap_frontend_rings(struct xenvif *vif) void xenvif_unmap_frontend_rings(struct xenvif *vif)
{ {
if (vif->tx.sring) if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
...@@ -1564,7 +1565,7 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif) ...@@ -1564,7 +1565,7 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
vif->rx.sring); vif->rx.sring);
} }
int xen_netbk_map_frontend_rings(struct xenvif *vif, int xenvif_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref, grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref) grant_ref_t rx_ring_ref)
{ {
...@@ -1595,11 +1596,11 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif, ...@@ -1595,11 +1596,11 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
return 0; return 0;
err: err:
xen_netbk_unmap_frontend_rings(vif); xenvif_unmap_frontend_rings(vif);
return err; return err;
} }
int xen_netbk_kthread(void *data) int xenvif_kthread(void *data)
{ {
struct xenvif *vif = data; struct xenvif *vif = data;
...@@ -1611,7 +1612,7 @@ int xen_netbk_kthread(void *data) ...@@ -1611,7 +1612,7 @@ int xen_netbk_kthread(void *data)
break; break;
if (rx_work_todo(vif)) if (rx_work_todo(vif))
xen_netbk_rx_action(vif); xenvif_rx_action(vif);
cond_resched(); cond_resched();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment