Commit 3c4ef851 authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netback-fix-multiple-extra-info-handling'

Paul Durrant says:

====================
xen-netback: fix multiple extra info handling

If a frontend passes multiple extra info fragments to netback on the guest
transmit side, because xen-netback does not account for this properly, only
a single ack response will be sent. This will eventually cause processing
of the shared ring to wedge.

This series re-imports the canonical netif.h from Xen, where the ring
protocol documentation has been updated, fixes this issue in xen-netback
and also adds a patch to reduce log spam.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 136ba622 8e4ee59c
...@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t; ...@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t;
struct pending_tx_info { struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */ struct xen_netif_tx_request req; /* tx request */
unsigned int extra_count;
/* Callback data for released SKBs. The callback is always /* Callback data for released SKBs. The callback is always
* xenvif_zerocopy_callback, desc contains the pending_idx, which is * xenvif_zerocopy_callback, desc contains the pending_idx, which is
* also an index in pending_tx_info array. It is initialized in * also an index in pending_tx_info array. It is initialized in
......
...@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, ...@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue, static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
unsigned int extra_count,
s8 st); s8 st);
static void push_tx_responses(struct xenvif_queue *queue); static void push_tx_responses(struct xenvif_queue *queue);
...@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data) ...@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
} }
static void xenvif_tx_err(struct xenvif_queue *queue, static void xenvif_tx_err(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp, RING_IDX end) struct xen_netif_tx_request *txp,
unsigned int extra_count, RING_IDX end)
{ {
RING_IDX cons = queue->tx.req_cons; RING_IDX cons = queue->tx.req_cons;
unsigned long flags; unsigned long flags;
do { do {
spin_lock_irqsave(&queue->response_lock, flags); spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
push_tx_responses(queue); push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags); spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end) if (cons == end)
...@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif) ...@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
static int xenvif_count_requests(struct xenvif_queue *queue, static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first, struct xen_netif_tx_request *first,
unsigned int extra_count,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
int work_to_do) int work_to_do)
{ {
...@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, ...@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
} while (more_data); } while (more_data);
if (drop_err) { if (drop_err) {
xenvif_tx_err(queue, first, cons + slots); xenvif_tx_err(queue, first, extra_count, cons + slots);
return drop_err; return drop_err;
} }
...@@ -829,6 +832,7 @@ struct xenvif_tx_cb { ...@@ -829,6 +832,7 @@ struct xenvif_tx_cb {
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx, u16 pending_idx,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
unsigned int extra_count,
struct gnttab_map_grant_ref *mop) struct gnttab_map_grant_ref *mop)
{ {
queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
...@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, ...@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
memcpy(&queue->pending_tx_info[pending_idx].req, txp, memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp)); sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count = extra_count;
} }
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
...@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que ...@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) { shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++); index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index]; pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
} }
...@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que ...@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) { shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++); index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index]; pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx); pending_idx);
} }
...@@ -1096,6 +1102,7 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) ...@@ -1096,6 +1102,7 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
static int xenvif_get_extras(struct xenvif_queue *queue, static int xenvif_get_extras(struct xenvif_queue *queue,
struct xen_netif_extra_info *extras, struct xen_netif_extra_info *extras,
unsigned int *extra_count,
int work_to_do) int work_to_do)
{ {
struct xen_netif_extra_info extra; struct xen_netif_extra_info extra;
...@@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue, ...@@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
} }
RING_COPY_REQUEST(&queue->tx, cons, &extra); RING_COPY_REQUEST(&queue->tx, cons, &extra);
queue->tx.req_cons = ++cons;
(*extra_count)++;
if (unlikely(!extra.type || if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
queue->tx.req_cons = ++cons;
netdev_err(queue->vif->dev, netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type); "Invalid extra type: %d\n", extra.type);
xenvif_fatal_tx_err(queue->vif); xenvif_fatal_tx_err(queue->vif);
...@@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue, ...@@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
} }
memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
queue->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do; return work_to_do;
...@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
struct xen_netif_tx_request txreq; struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
unsigned int extra_count;
u16 pending_idx; u16 pending_idx;
RING_IDX idx; RING_IDX idx;
int work_to_do; int work_to_do;
...@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx.req_cons = ++idx; queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras)); memset(extras, 0, sizeof(extras));
extra_count = 0;
if (txreq.flags & XEN_NETTXF_extra_info) { if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xenvif_get_extras(queue, extras, work_to_do = xenvif_get_extras(queue, extras,
&extra_count,
work_to_do); work_to_do);
idx = queue->tx.req_cons; idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0)) if (unlikely(work_to_do < 0))
...@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq, make_tx_response(queue, &txreq, extra_count,
(ret == 0) ? (ret == 0) ?
XEN_NETIF_RSP_OKAY : XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR); XEN_NETIF_RSP_ERROR);
...@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
xenvif_mcast_del(queue->vif, extra->u.mcast.addr); xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); make_tx_response(queue, &txreq, extra_count,
XEN_NETIF_RSP_OKAY);
push_tx_responses(queue); push_tx_responses(queue);
continue; continue;
} }
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
break; break;
...@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(txreq.size < ETH_HLEN)) { if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(queue->vif->dev, netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size); "Bad packet size: %d\n", txreq.size);
xenvif_tx_err(queue, &txreq, idx); xenvif_tx_err(queue, &txreq, extra_count, idx);
break; break;
} }
...@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(skb == NULL)) { if (unlikely(skb == NULL)) {
netdev_dbg(queue->vif->dev, netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n"); "Can't allocate a skb in start_xmit.\n");
xenvif_tx_err(queue, &txreq, idx); xenvif_tx_err(queue, &txreq, extra_count, idx);
break; break;
} }
...@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
nskb = xenvif_alloc_skb(0); nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) { if (unlikely(nskb == NULL)) {
kfree_skb(skb); kfree_skb(skb);
xenvif_tx_err(queue, &txreq, idx); xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(queue->vif->dev, netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n"); "Can't allocate the frag_list skb.\n");
...@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (data_len < txreq.size) { if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0], frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx); pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); xenvif_tx_create_map_op(queue, pending_idx, &txreq,
extra_count, gop);
gop++; gop++;
} else { } else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0], frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX); INVALID_PENDING_IDX);
memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, memcpy(&queue->pending_tx_info[pending_idx].req,
sizeof(txreq)); &txreq, sizeof(txreq));
queue->pending_tx_info[pending_idx].extra_count =
extra_count;
} }
queue->pending_cons++; queue->pending_cons++;
...@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, ...@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags); spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, &pending_tx_info->req, status); make_tx_response(queue, &pending_tx_info->req,
pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so /* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the * its available before a new Tx request is pushed by the
...@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, ...@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue, static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
unsigned int extra_count,
s8 st) s8 st)
{ {
RING_IDX i = queue->tx.rsp_prod_pvt; RING_IDX i = queue->tx.rsp_prod_pvt;
...@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue, ...@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
resp->id = txp->id; resp->id = txp->id;
resp->status = st; resp->status = st;
if (txp->flags & XEN_NETTXF_extra_info) while (extra_count-- != 0)
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i; queue->tx.rsp_prod_pvt = ++i;
......
...@@ -511,8 +511,6 @@ static void set_backend_state(struct backend_info *be, ...@@ -511,8 +511,6 @@ static void set_backend_state(struct backend_info *be,
switch (state) { switch (state) {
case XenbusStateInitWait: case XenbusStateInitWait:
case XenbusStateConnected: case XenbusStateConnected:
pr_info("%s: prepare for reconnect\n",
be->dev->nodename);
backend_switch_state(be, XenbusStateInitWait); backend_switch_state(be, XenbusStateInitWait);
break; break;
case XenbusStateClosing: case XenbusStateClosing:
......
/****************************************************************************** /******************************************************************************
* netif.h * xen_netif.h
* *
* Unified network-device I/O interface for Xen guest OSes. * Unified network-device I/O interface for Xen guest OSes.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2003-2004, Keir Fraser * Copyright (c) 2003-2004, Keir Fraser
*/ */
#ifndef __XEN_PUBLIC_IO_NETIF_H__ #ifndef __XEN_PUBLIC_IO_XEN_NETIF_H__
#define __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_XEN_NETIF_H__
#include <xen/interface/io/ring.h> #include "ring.h"
#include <xen/interface/grant_table.h> #include "../grant_table.h"
/* /*
* Older implementation of Xen network frontend / backend has an * Older implementation of Xen network frontend / backend has an
...@@ -38,10 +56,10 @@ ...@@ -38,10 +56,10 @@
* that it cannot safely queue packets (as it may not be kicked to send them). * that it cannot safely queue packets (as it may not be kicked to send them).
*/ */
/* /*
* "feature-split-event-channels" is introduced to separate guest TX * "feature-split-event-channels" is introduced to separate guest TX
* and RX notificaion. Backend either doesn't support this feature or * and RX notification. Backend either doesn't support this feature or
* advertise it via xenstore as 0 (disabled) or 1 (enabled). * advertises it via xenstore as 0 (disabled) or 1 (enabled).
* *
* To make use of this feature, frontend should allocate two event * To make use of this feature, frontend should allocate two event
* channels for TX and RX, advertise them to backend as * channels for TX and RX, advertise them to backend as
...@@ -118,14 +136,683 @@ ...@@ -118,14 +136,683 @@
*/ */
/* /*
* This is the 'wire' format for packets: * "feature-multicast-control" and "feature-dynamic-multicast-control"
* Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) * advertise the capability to filter ethernet multicast packets in the
* [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) * backend. If the frontend wishes to take advantage of this feature then
* [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) * it may set "request-multicast-control". If the backend only advertises
* Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data * "feature-multicast-control" then "request-multicast-control" must be set
* Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data * before the frontend moves into the connected state. The backend will
* sample the value on this state transition and any subsequent change in
* value will have no effect. However, if the backend also advertises
* "feature-dynamic-multicast-control" then "request-multicast-control"
* may be set by the frontend at any time. In this case, the backend will
* watch the value and re-sample on watch events.
*
* If the sampled value of "request-multicast-control" is set then the
* backend transmit side should no longer flood multicast packets to the
* frontend, it should instead drop any multicast packet that does not
* match in a filter list.
* The list is amended by the frontend by sending dummy transmit requests
* containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
* specified below.
* Note that the filter list may be amended even if the sampled value of
* "request-multicast-control" is not set, however the filter should only
* be applied if it is set.
*/
/*
* Control ring
* ============
*
* Some features, such as hashing (detailed below), require a
* significant amount of out-of-band data to be passed from frontend to
* backend. Use of xenstore is not suitable for large quantities of data
* because of quota limitations and so a dedicated 'control ring' is used.
* The ability of the backend to use a control ring is advertised by
* setting:
*
* /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1"
*
* The frontend provides a control ring to the backend by setting:
*
* /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
* /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
*
* where <gref> is the grant reference of the shared page used to
* implement the control ring and <port> is an event channel to be used
* as a mailbox interrupt. These keys must be set before the frontend
* moves into the connected state.
*
* The control ring uses a fixed request/response message size and is
* balanced (i.e. one request to one response), so operationally it is much
* the same as a transmit or receive ring.
* Note that there is no requirement that responses are issued in the same
* order as requests.
*/
/*
* Hash types
* ==========
*
* For the purposes of the definitions below, 'Packet[]' is an array of
* octets containing an IP packet without options, 'Array[X..Y]' means a
* sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
* used to indicate concatenation of arrays.
*/
/*
* A hash calculated over an IP version 4 header as follows:
*
* Buffer[0..8] = Packet[12..15] (source address) +
* Packet[16..19] (destination address)
*
* Result = Hash(Buffer, 8)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
/*
* A hash calculated over an IP version 4 header and TCP header as
* follows:
*
* Buffer[0..12] = Packet[12..15] (source address) +
* Packet[16..19] (destination address) +
* Packet[20..21] (source port) +
* Packet[22..23] (destination port)
*
* Result = Hash(Buffer, 12)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
/*
* A hash calculated over an IP version 6 header as follows:
*
* Buffer[0..32] = Packet[8..23] (source address ) +
* Packet[24..39] (destination address)
*
* Result = Hash(Buffer, 32)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
/*
* A hash calculated over an IP version 6 header and TCP header as
* follows:
*
* Buffer[0..36] = Packet[8..23] (source address) +
* Packet[24..39] (destination address) +
* Packet[40..41] (source port) +
* Packet[42..43] (destination port)
*
* Result = Hash(Buffer, 36)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
/*
* Hash algorithms
* ===============
*/
#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
/*
* Toeplitz hash:
*/
#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
/*
* This algorithm uses a 'key' as well as the data buffer itself.
* (Buffer[] and Key[] are treated as shift-registers where the MSB of
* Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
* is the 'right-most').
*
* Value = 0
* For number of bits in Buffer[]
* If (left-most bit of Buffer[] is 1)
* Value ^= left-most 32 bits of Key[]
* Key[] << 1
* Buffer[] << 1
*
* The code below is provided for convenience where an operating system
* does not already provide an implementation.
*/
#ifdef XEN_NETIF_DEFINE_TOEPLITZ
static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
unsigned int keylen,
const uint8_t *buf, unsigned int buflen)
{
unsigned int keyi, bufi;
uint64_t prefix = 0;
uint64_t hash = 0;
/* Pre-load prefix with the first 8 bytes of the key */
for (keyi = 0; keyi < 8; keyi++) {
prefix <<= 8;
prefix |= (keyi < keylen) ? key[keyi] : 0;
}
for (bufi = 0; bufi < buflen; bufi++) {
uint8_t byte = buf[bufi];
unsigned int bit;
for (bit = 0; bit < 8; bit++) {
if (byte & 0x80)
hash ^= prefix;
prefix <<= 1;
byte <<= 1;
}
/*
* 'prefix' has now been left-shifted by 8, so
* OR in the next byte.
*/
prefix |= (keyi < keylen) ? key[keyi] : 0;
keyi++;
}
/* The valid part of the hash is in the upper 32 bits. */
return hash >> 32;
}
#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
/*
* Control requests (struct xen_netif_ctrl_request)
* ================================================
*
* All requests have the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | type | data[0] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | data[1] | data[2] |
* +-----+-----+-----+-----+-----------------------+
*
* id: the request identifier, echoed in response.
* type: the type of request (see below)
* data[]: any data associated with the request (determined by type)
*/
struct xen_netif_ctrl_request {
uint16_t id;
uint16_t type;
#define XEN_NETIF_CTRL_TYPE_INVALID 0
#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
uint32_t data[3];
};
/*
* Control responses (struct xen_netif_ctrl_response)
* ==================================================
*
* All responses have the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | type | status |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | data |
* +-----+-----+-----+-----+
*
* id: the corresponding request identifier
* type: the type of the corresponding request
* status: the status of request processing
* data: any data associated with the response (determined by type and
* status)
*/
struct xen_netif_ctrl_response {
uint16_t id;
uint16_t type;
uint32_t status;
#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
uint32_t data;
};
/*
* Control messages
* ================
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
* --------------------------------------
*
* This is sent by the frontend to set the desired hash algorithm.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
* data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
* supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
*
* NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
* hashing and the backend is free to choose how it steers packets
* to queues (which is the default behaviour).
*
* XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
* ----------------------------------
*
* This is sent by the frontend to query the types of hash supported by
* the backend.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
* data[0] = 0
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = supported hash types (if operation was successful)
*
* NOTE: A valid hash algorithm must be selected before this operation can
* succeed.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
* ----------------------------------
*
* This is sent by the frontend to set the types of hash that the backend
* should calculate. (See above for hash type definitions).
* Note that the 'maximal' type of hash should always be chosen. For
* example, if the frontend sets both IPV4 and IPV4_TCP hash types then
* the latter hash type should be calculated for any TCP packet and the
* former only calculated for non-TCP packets.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
* data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
* value is invalid or
* unsupported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: A valid hash algorithm must be selected before this operation can
* succeed.
* Also, setting data[0] to zero disables hashing and the backend
* is free to choose how it steers packets to queues.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
* --------------------------------
*
* This is sent by the frontend to set the key of the hash if the algorithm
* requires it. (See hash algorithms above).
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
* data[0] = grant reference of page containing the key (assumed to
* start at beginning of grant)
* data[1] = size of key in octets
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
* than the backend
* supports
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: Any key octets not specified are assumed to be zero (the key
* is assumed to be empty by default) and specifying a new key
* invalidates any previous key, hence specifying a key size of
* zero will clear the key (which ensures that the calculated hash
* will always be zero).
* The maximum size of key is algorithm and backend specific, but
* is also limited by the single grant reference.
* The grant reference may be read-only and must remain valid until
* the response has been processed.
*
* XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
* -----------------------------------------
*
* This is sent by the frontend to query the maximum size of mapping
* table supported by the backend. The size is specified in terms of
* table entries.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
* data[0] = 0
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = maximum number of entries allowed in the mapping table
* (if operation was successful) or zero if a mapping table is
* not supported (i.e. hash mapping is done only by modular
* arithmetic).
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* -------------------------------------
*
* This is sent by the frontend to set the actual size of the mapping
* table to be used by the backend. The size is specified in terms of
* table entries.
* Any previous table is invalidated by this message and any new table
* is assumed to be zero filled.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* data[0] = number of entries in mapping table
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: Setting data[0] to 0 means that hash mapping should be done
* using modular arithmetic.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
* ------------------------------------
*
* This is sent by the frontend to set the content of the table mapping
* hash value to queue number. The backend should calculate the hash from
* the packet header, use it as an index into the table (modulo the size
* of the table) and then steer the packet to the queue number found at
* that index.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
* data[0] = grant reference of page containing the mapping (sub-)table
* (assumed to start at beginning of grant)
* data[1] = size of (sub-)table in entries
* data[2] = offset, in entries, of sub-table within overall table
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
* is invalid
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
* than the backend
* supports
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: The overall table has the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | mapping[0] | mapping[1] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | . |
* | . |
* | . |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | mapping[N-2] | mapping[N-1] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* message and each mapping must specifies a queue between 0 and
* "multi-queue-num-queues" (see above).
* The backend may support a mapping table larger than can be
* mapped by a single grant reference. Thus sub-tables within a
* larger table can be individually set by sending multiple messages
* with differing offset values. Specifying a new sub-table does not
* invalidate any table data outside that range.
* The grant reference may be read-only and must remain valid until
* the response has been processed.
*/
DEFINE_RING_TYPES(xen_netif_ctrl,
struct xen_netif_ctrl_request,
struct xen_netif_ctrl_response);
/*
* Guest transmit
* ==============
*
* This is the 'wire' format for transmit (frontend -> backend) packets:
*
* Fragment 1: xen_netif_tx_request_t - flags = XEN_NETTXF_*
* size = total packet size
* [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
* XEN_NETTXF_extra_info)
* ...
* [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
* XEN_NETIF_EXTRA_MORE)
* ... * ...
* Request N: xen_netif_tx_request -- 0 * Fragment N: xen_netif_tx_request_t - (only if fragment N-1 flags include
* XEN_NETTXF_more_data - flags on preceding
* extras are not relevant here)
* flags = 0
* size = fragment size
*
* NOTE:
*
* This format slightly is different from that used for receive
* (backend -> frontend) packets. Specifically, in a multi-fragment
* packet the actual size of fragment 1 can only be determined by
* subtracting the sizes of fragments 2..N from the total packet size.
*
* Ring slot size is 12 octets, however not all request/response
* structs use the full size.
*
* tx request data (xen_netif_tx_request_t)
* ------------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | grant ref | offset | flags |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | size |
* +-----+-----+-----+-----+
*
* grant ref: Reference to buffer page.
* offset: Offset within buffer page.
* flags: XEN_NETTXF_*.
* id: request identifier, echoed in response.
* size: packet size in bytes.
*
* tx response (xen_netif_tx_response_t)
* ---------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | status | unused |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | unused |
* +-----+-----+-----+-----+
*
* id: reflects id in transmit request
* status: XEN_NETIF_RSP_*
*
* Guest receive
* =============
*
* This is the 'wire' format for receive (backend -> frontend) packets:
*
* Fragment 1: xen_netif_rx_request_t - flags = XEN_NETRXF_*
* size = fragment size
* [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
* XEN_NETRXF_extra_info)
* ...
* [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
* XEN_NETIF_EXTRA_MORE)
* ...
* Fragment N: xen_netif_rx_request_t - (only if fragment N-1 flags include
* XEN_NETRXF_more_data - flags on preceding
* extras are not relevant here)
* flags = 0
* size = fragment size
*
* NOTE:
*
* This format slightly is different from that used for transmit
* (frontend -> backend) packets. Specifically, in a multi-fragment
* packet the size of the packet can only be determined by summing the
* sizes of fragments 1..N.
*
* Ring slot size is 8 octets.
*
* rx request (xen_netif_rx_request_t)
* -------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | pad | gref |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* id: request identifier, echoed in response.
* gref: reference to incoming granted frame.
*
* rx response (xen_netif_rx_response_t)
* ---------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | offset | flags | status |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* id: reflects id in receive request
* offset: offset in page of start of received packet
* flags: XEN_NETRXF_*
* status: -ve: XEN_NETIF_RSP_*; +ve: Rx'ed pkt size.
*
* NOTE: Historically, to support GSO on the frontend receive side, Linux
* netfront does not make use of the rx response id (because, as
* described below, extra info structures overlay the id field).
* Instead it assumes that responses always appear in the same ring
* slot as their corresponding request. Thus, to maintain
* compatibility, backends must make sure this is the case.
*
* Extra Info
* ==========
*
* Can be present if initial request or response has NET{T,R}XF_extra_info,
* or previous extra request has XEN_NETIF_EXTRA_MORE.
*
* The struct therefore needs to fit into either a tx or rx slot and
* is therefore limited to 8 octets.
*
* NOTE: Because extra info data overlays the usual request/response
* structures, there is no id information in the opposite direction.
* So, if an extra info overlays an rx response the frontend can
* assume that it is in the same ring slot as the request that was
* consumed to make the slot available, and the backend must ensure
* this assumption is true.
*
* extra info (xen_netif_extra_info_t)
* -------------------------------
*
* General format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| type specific data |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | padding for tx |
* +-----+-----+-----+-----+
*
* type: XEN_NETIF_EXTRA_TYPE_*
* flags: XEN_NETIF_EXTRA_FLAG_*
* padding for tx: present only in the tx case due to 8 octet limit
* from rx case. Not shown in type specific entries
* below.
*
* XEN_NETIF_EXTRA_TYPE_GSO:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| size |type | pad | features |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_GSO
* flags: XEN_NETIF_EXTRA_FLAG_*
* size: Maximum payload size of each segment. For example,
* for TCP this is just the path MSS.
* type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of
* the packet and any extra features required to segment the
* packet properly.
* features: EN_XEN_NETIF_GSO_FEAT_*: This specifies any extra GSO
* features required to process this packet, such as ECN
* support for TCPv4.
*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| addr |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
* flags: XEN_NETIF_EXTRA_FLAG_*
* addr: address to add/remove
*
* XEN_NETIF_EXTRA_TYPE_HASH:
*
* A backend that supports teoplitz hashing is assumed to accept
* this type of extra info in transmit packets.
* A frontend that enables hashing is assumed to accept
* this type of extra info in receive packets.
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags|htype| alg |LSB ---- value ---- MSB|
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_HASH
* flags: XEN_NETIF_EXTRA_FLAG_*
* htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above)
* alg: The algorithm used to calculate the hash (one of
* XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above)
* value: Hash value
*/ */
/* Protocol checksum field is blank in the packet (hardware offload)? */ /* Protocol checksum field is blank in the packet (hardware offload)? */
...@@ -146,11 +833,11 @@ ...@@ -146,11 +833,11 @@
#define XEN_NETIF_MAX_TX_SIZE 0xFFFF #define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct xen_netif_tx_request { struct xen_netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */ grant_ref_t gref;
uint16_t offset; /* Offset within buffer page */ uint16_t offset;
uint16_t flags; /* XEN_NETTXF_* */ uint16_t flags;
uint16_t id; /* Echoed in response message. */ uint16_t id;
uint16_t size; /* Packet size in bytes. */ uint16_t size;
}; };
/* Types of xen_netif_extra_info descriptors. */ /* Types of xen_netif_extra_info descriptors. */
...@@ -158,9 +845,10 @@ struct xen_netif_tx_request { ...@@ -158,9 +845,10 @@ struct xen_netif_tx_request {
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MAX (4) #define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
#define XEN_NETIF_EXTRA_TYPE_MAX (5)
/* xen_netif_extra_info flags. */ /* xen_netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
...@@ -170,55 +858,40 @@ struct xen_netif_tx_request { ...@@ -170,55 +858,40 @@ struct xen_netif_tx_request {
#define XEN_NETIF_GSO_TYPE_TCPV6 (2) #define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/* /*
* This structure needs to fit within both netif_tx_request and * This structure needs to fit within both xen_netif_tx_request_t and
* netif_rx_response for compatibility. * xen_netif_rx_response_t for compatibility.
*/ */
struct xen_netif_extra_info { struct xen_netif_extra_info {
uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t type;
uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ uint8_t flags;
union { union {
struct { struct {
/*
* Maximum payload size of each segment. For
* example, for TCP this is just the path MSS.
*/
uint16_t size; uint16_t size;
uint8_t type;
/*
* GSO type. This determines the protocol of
* the packet and any extra features required
* to segment the packet properly.
*/
uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
/* Future expansion. */
uint8_t pad; uint8_t pad;
uint16_t features;
/*
* GSO features. This specifies any extra GSO
* features required to process this packet,
* such as ECN support for TCPv4.
*/
uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
} gso; } gso;
struct { struct {
uint8_t addr[6]; /* Address to add/remove. */ uint8_t addr[6];
} mcast; } mcast;
struct {
uint8_t type;
uint8_t algorithm;
uint8_t value[4];
} hash;
uint16_t pad[3]; uint16_t pad[3];
} u; } u;
}; };
struct xen_netif_tx_response { struct xen_netif_tx_response {
uint16_t id; uint16_t id;
int16_t status; /* XEN_NETIF_RSP_* */ int16_t status;
}; };
struct xen_netif_rx_request { struct xen_netif_rx_request {
uint16_t id; /* Echoed in response message. */ uint16_t id; /* Echoed in response message. */
grant_ref_t gref; /* Reference to incoming granted frame */ uint16_t pad;
grant_ref_t gref;
}; };
/* Packet data has been validated against protocol checksum. */ /* Packet data has been validated against protocol checksum. */
...@@ -237,32 +910,30 @@ struct xen_netif_rx_request { ...@@ -237,32 +910,30 @@ struct xen_netif_rx_request {
#define _XEN_NETRXF_extra_info (3) #define _XEN_NETRXF_extra_info (3)
#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) #define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info)
/* GSO Prefix descriptor. */ /* Packet has GSO prefix. Deprecated but included for compatibility */
#define _XEN_NETRXF_gso_prefix (4) #define _XEN_NETRXF_gso_prefix (4)
#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) #define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix)
struct xen_netif_rx_response { struct xen_netif_rx_response {
uint16_t id; uint16_t id;
uint16_t offset; /* Offset in page of start of received packet */ uint16_t offset;
uint16_t flags; /* XEN_NETRXF_* */ uint16_t flags;
int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ int16_t status;
}; };
/* /*
* Generate netif ring structures and types. * Generate xen_netif ring structures and types.
*/ */
DEFINE_RING_TYPES(xen_netif_tx, DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request,
struct xen_netif_tx_request,
struct xen_netif_tx_response); struct xen_netif_tx_response);
DEFINE_RING_TYPES(xen_netif_rx, DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request,
struct xen_netif_rx_request,
struct xen_netif_rx_response); struct xen_netif_rx_response);
#define XEN_NETIF_RSP_DROPPED -2 #define XEN_NETIF_RSP_DROPPED -2
#define XEN_NETIF_RSP_ERROR -1 #define XEN_NETIF_RSP_ERROR -1
#define XEN_NETIF_RSP_OKAY 0 #define XEN_NETIF_RSP_OKAY 0
/* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ /* No response: used for auxiliary requests (e.g., xen_netif_extra_info_t). */
#define XEN_NETIF_RSP_NULL 1 #define XEN_NETIF_RSP_NULL 1
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment