Commit f3286a3a authored by Chris Metcalf's avatar Chris Metcalf Committed by David S. Miller

tile: support multiple mPIPE shims in tilegx network driver

The initial driver support was for a single mPIPE shim on the chip
(as is the case for the Gx36 hardware).  The Gx72 chip has two mPIPE
shims, so we extend the driver to handle that case.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6ab4ae9a
...@@ -16,6 +16,24 @@ ...@@ -16,6 +16,24 @@
#include "gxio/iorpc_mpipe_info.h" #include "gxio/iorpc_mpipe_info.h"
struct instance_aux_param {
_gxio_mpipe_link_name_t name;
};
int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
_gxio_mpipe_link_name_t name)
{
struct instance_aux_param temp;
struct instance_aux_param *params = &temp;
params->name = name;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
}
EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
struct enumerate_aux_param { struct enumerate_aux_param {
_gxio_mpipe_link_name_t name; _gxio_mpipe_link_name_t name;
_gxio_mpipe_link_mac_t mac; _gxio_mpipe_link_mac_t mac;
......
...@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) ...@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
int fd; int fd;
int i; int i;
if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
return -EINVAL;
snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
fd = hv_dev_open((HV_VirtAddr) file, 0); fd = hv_dev_open((HV_VirtAddr) file, 0);
context->fd = fd;
if (fd < 0) { if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd; return fd;
...@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) ...@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
return -ENODEV; return -ENODEV;
} }
context->fd = fd;
/* Map in the MMIO space. */ /* Map in the MMIO space. */
context->mmio_cfg_base = (void __force *) context->mmio_cfg_base = (void __force *)
iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
...@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) ...@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
context->__stacks.stacks[i] = 255; context->__stacks.stacks[i] = 255;
context->instance = mpipe_index;
return 0; return 0;
fast_failed: fast_failed:
iounmap((void __force __iomem *)(context->mmio_cfg_base)); iounmap((void __force __iomem *)(context->mmio_cfg_base));
cfg_failed: cfg_failed:
hv_dev_close(context->fd); hv_dev_close(context->fd);
context->fd = -1;
return -ENODEV; return -ENODEV;
} }
...@@ -496,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void) ...@@ -496,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
return contextp; return contextp;
} }
int gxio_mpipe_link_instance(const char *link_name)
{
_gxio_mpipe_link_name_t name;
gxio_mpipe_context_t *context = _gxio_get_link_context();
if (!context)
return GXIO_ERR_NO_DEVICE;
strncpy(name.name, link_name, sizeof(name.name));
name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
return gxio_mpipe_info_instance_aux(context, name);
}
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{ {
int rv; int rv;
......
...@@ -27,11 +27,15 @@ ...@@ -27,11 +27,15 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
_gxio_mpipe_link_name_t name);
int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
unsigned int idx, unsigned int idx,
_gxio_mpipe_link_name_t * name, _gxio_mpipe_link_name_t * name,
......
...@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t; ...@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
*/ */
typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
/*
* Max # of mpipe instances. 2 currently.
*/
#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
/* Get the "va" field from an "idesc". /* Get the "va" field from an "idesc".
* *
* This is the address at which the ingress hardware copied the first * This is the address at which the ingress hardware copied the first
...@@ -311,6 +318,9 @@ typedef struct { ...@@ -311,6 +318,9 @@ typedef struct {
/* File descriptor for calling up to Linux (and thus the HV). */ /* File descriptor for calling up to Linux (and thus the HV). */
int fd; int fd;
/* Corresponding mpipe instance #. */
int instance;
/* The VA at which configuration registers are mapped. */ /* The VA at which configuration registers are mapped. */
char *mmio_cfg_base; char *mmio_cfg_base;
...@@ -1716,6 +1726,24 @@ typedef struct { ...@@ -1716,6 +1726,24 @@ typedef struct {
uint8_t mac; uint8_t mac;
} gxio_mpipe_link_t; } gxio_mpipe_link_t;
/* Translate a link name to the instance number of the mPIPE shim which is
* connected to that link. This call does not verify whether the link is
* currently available, and does not reserve any link resources;
* gxio_mpipe_link_open() must be called to perform those functions.
*
* Typically applications will call this function to translate a link name
* to an mPIPE instance number; call gxio_mpipe_init(), passing it that
* instance number, to initialize the mPIPE shim; and then call
* gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
* context, to configure the link.
*
* @param link_name Name of the link; see @ref gxio_mpipe_link_names.
* @return The mPIPE instance number which is associated with the named
* link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
* not exist.
*/
extern int gxio_mpipe_link_instance(const char *link_name);
/* Retrieve one of this system's legal link names, and its MAC address. /* Retrieve one of this system's legal link names, and its MAC address.
* *
* @param index Link name index. If a system supports N legal link names, * @param index Link name index. If a system supports N legal link names,
......
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
#include <arch/mpipe_constants.h> #include <arch/mpipe_constants.h>
/** Number of mPIPE instances supported */
#define HV_MPIPE_INSTANCE_MAX (2)
/** Number of buffer stacks (32). */ /** Number of buffer stacks (32). */
#define HV_MPIPE_NUM_BUFFER_STACKS \ #define HV_MPIPE_NUM_BUFFER_STACKS \
(MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
......
...@@ -133,27 +133,31 @@ struct tile_net_tx_wake { ...@@ -133,27 +133,31 @@ struct tile_net_tx_wake {
/* Info for a specific cpu. */ /* Info for a specific cpu. */
struct tile_net_info { struct tile_net_info {
/* The NAPI struct. */
struct napi_struct napi;
/* Packet queue. */
gxio_mpipe_iqueue_t iqueue;
/* Our cpu. */ /* Our cpu. */
int my_cpu; int my_cpu;
/* True if iqueue is valid. */
bool has_iqueue;
/* NAPI flags. */
bool napi_added;
bool napi_enabled;
/* Number of buffers (by kind) which must still be provided. */
unsigned int num_needed_buffers[MAX_KINDS];
/* A timer for handling egress completions. */ /* A timer for handling egress completions. */
struct hrtimer egress_timer; struct hrtimer egress_timer;
/* True if "egress_timer" is scheduled. */ /* True if "egress_timer" is scheduled. */
bool egress_timer_scheduled; bool egress_timer_scheduled;
/* Comps for each egress channel. */ struct info_mpipe {
struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; /* Packet queue. */
/* Transmit wake timer for each egress channel. */ gxio_mpipe_iqueue_t iqueue;
struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; /* The NAPI struct. */
struct napi_struct napi;
/* Number of buffers (by kind) which must still be provided. */
unsigned int num_needed_buffers[MAX_KINDS];
/* instance id. */
int instance;
/* True if iqueue is valid. */
bool has_iqueue;
/* NAPI flags. */
bool napi_added;
bool napi_enabled;
/* Comps for each egress channel. */
struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
/* Transmit wake timer for each egress channel. */
struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
} mpipe[NR_MPIPE_MAX];
}; };
/* Info for egress on a particular egress channel. */ /* Info for egress on a particular egress channel. */
...@@ -178,17 +182,54 @@ struct tile_net_priv { ...@@ -178,17 +182,54 @@ struct tile_net_priv {
int loopify_channel; int loopify_channel;
/* The egress channel (channel or loopify_channel). */ /* The egress channel (channel or loopify_channel). */
int echannel; int echannel;
/* mPIPE instance, 0 or 1. */
int instance;
}; };
/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ static struct mpipe_data {
static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; /* The ingress irq. */
int ingress_irq;
/* Devices currently associated with each channel. /* The "context" for all devices. */
* NOTE: The array entry can become NULL after ifconfig down, but gxio_mpipe_context_t context;
* we do not free the underlying net_device structures, so it is
* safe to use a pointer after reading it from this array. /* Egress info, indexed by "priv->echannel"
*/ * (lazily created as needed).
static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; */
struct tile_net_egress
egress_for_echannel[TILE_NET_CHANNELS];
/* Devices currently associated with each channel.
* NOTE: The array entry can become NULL after ifconfig down, but
* we do not free the underlying net_device structures, so it is
* safe to use a pointer after reading it from this array.
*/
struct net_device
*tile_net_devs_for_channel[TILE_NET_CHANNELS];
/* The actual memory allocated for the buffer stacks. */
void *buffer_stack_vas[MAX_KINDS];
/* The amount of memory allocated for each buffer stack. */
size_t buffer_stack_bytes[MAX_KINDS];
/* The first buffer stack index
* (small = +0, large = +1, jumbo = +2).
*/
int first_buffer_stack;
/* The buckets. */
int first_bucket;
int num_buckets;
} mpipe_data[NR_MPIPE_MAX] = {
[0 ... (NR_MPIPE_MAX - 1)] {
.ingress_irq = -1,
.first_buffer_stack = -1,
.first_bucket = -1,
.num_buckets = 1
}
};
/* A mutex for "tile_net_devs_for_channel". */ /* A mutex for "tile_net_devs_for_channel". */
static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
...@@ -196,8 +237,6 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); ...@@ -196,8 +237,6 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
/* The per-cpu info. */ /* The per-cpu info. */
static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
/* The "context" for all devices. */
static gxio_mpipe_context_t context;
/* The buffer size enums for each buffer stack. /* The buffer size enums for each buffer stack.
* See arch/tile/include/gxio/mpipe.h for the set of possible values. * See arch/tile/include/gxio/mpipe.h for the set of possible values.
...@@ -210,22 +249,6 @@ static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { ...@@ -210,22 +249,6 @@ static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
GXIO_MPIPE_BUFFER_SIZE_16384 GXIO_MPIPE_BUFFER_SIZE_16384
}; };
/* The actual memory allocated for the buffer stacks. */
static void *buffer_stack_vas[MAX_KINDS];
/* The amount of memory allocated for each buffer stack. */
static size_t buffer_stack_bytes[MAX_KINDS];
/* The first buffer stack index (small = +0, large = +1, jumbo = +2). */
static int first_buffer_stack = -1;
/* The buckets. */
static int first_bucket = -1;
static int num_buckets = 1;
/* The ingress irq. */
static int ingress_irq = -1;
/* Text value of tile_net.cpus if passed as a module parameter. */ /* Text value of tile_net.cpus if passed as a module parameter. */
static char *network_cpus_string; static char *network_cpus_string;
...@@ -241,6 +264,13 @@ static char *custom_str; ...@@ -241,6 +264,13 @@ static char *custom_str;
/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ /* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
static uint jumbo_num; static uint jumbo_num;
/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
static inline int mpipe_instance(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
return priv->instance;
}
/* The "tile_net.cpus" argument specifies the cpus that are dedicated /* The "tile_net.cpus" argument specifies the cpus that are dedicated
* to handle ingress packets. * to handle ingress packets.
* *
...@@ -314,8 +344,9 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field) ...@@ -314,8 +344,9 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
} }
/* Allocate and push a buffer. */ /* Allocate and push a buffer. */
static bool tile_net_provide_buffer(int kind) static bool tile_net_provide_buffer(int instance, int kind)
{ {
struct mpipe_data *md = &mpipe_data[instance];
gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
const unsigned long buffer_alignment = 128; const unsigned long buffer_alignment = 128;
...@@ -337,7 +368,7 @@ static bool tile_net_provide_buffer(int kind) ...@@ -337,7 +368,7 @@ static bool tile_net_provide_buffer(int kind)
/* Make sure "skb" and the back-pointer have been flushed. */ /* Make sure "skb" and the back-pointer have been flushed. */
wmb(); wmb();
gxio_mpipe_push_buffer(&context, first_buffer_stack + kind, gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
(void *)va_to_tile_io_addr(skb->data)); (void *)va_to_tile_io_addr(skb->data));
return true; return true;
...@@ -363,11 +394,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va) ...@@ -363,11 +394,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
return skb; return skb;
} }
static void tile_net_pop_all_buffers(int stack) static void tile_net_pop_all_buffers(int instance, int stack)
{ {
struct mpipe_data *md = &mpipe_data[instance];
for (;;) { for (;;) {
tile_io_addr_t addr = tile_io_addr_t addr =
(tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
stack);
if (addr == 0) if (addr == 0)
break; break;
dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
...@@ -378,17 +412,21 @@ static void tile_net_pop_all_buffers(int stack) ...@@ -378,17 +412,21 @@ static void tile_net_pop_all_buffers(int stack)
static void tile_net_provide_needed_buffers(void) static void tile_net_provide_needed_buffers(void)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int kind; int instance, kind;
for (instance = 0; instance < NR_MPIPE_MAX &&
for (kind = 0; kind < MAX_KINDS; kind++) { info->mpipe[instance].has_iqueue; instance++) {
while (info->num_needed_buffers[kind] != 0) { for (kind = 0; kind < MAX_KINDS; kind++) {
if (!tile_net_provide_buffer(kind)) { while (info->mpipe[instance].num_needed_buffers[kind]
/* Add info to the allocation failure dump. */ != 0) {
pr_notice("Tile %d still needs some buffers\n", if (!tile_net_provide_buffer(instance, kind)) {
info->my_cpu); pr_notice("Tile %d still needs"
return; " some buffers\n",
info->my_cpu);
return;
}
info->mpipe[instance].
num_needed_buffers[kind]--;
} }
info->num_needed_buffers[kind]--;
} }
} }
} }
...@@ -412,6 +450,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, ...@@ -412,6 +450,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc, unsigned long len) gxio_mpipe_idesc_t *idesc, unsigned long len)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int instance = mpipe_instance(dev);
/* Encode the actual packet length. */ /* Encode the actual packet length. */
skb_put(skb, len); skb_put(skb, len);
...@@ -422,7 +461,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, ...@@ -422,7 +461,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
if (idesc->cs && idesc->csum_seed_val == 0xFFFF) if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
napi_gro_receive(&info->napi, skb); napi_gro_receive(&info->mpipe[instance].napi, skb);
/* Update stats. */ /* Update stats. */
tile_net_stats_add(1, &dev->stats.rx_packets); tile_net_stats_add(1, &dev->stats.rx_packets);
...@@ -430,18 +469,19 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, ...@@ -430,18 +469,19 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
/* Need a new buffer. */ /* Need a new buffer. */
if (idesc->size == buffer_size_enums[0]) if (idesc->size == buffer_size_enums[0])
info->num_needed_buffers[0]++; info->mpipe[instance].num_needed_buffers[0]++;
else if (idesc->size == buffer_size_enums[1]) else if (idesc->size == buffer_size_enums[1])
info->num_needed_buffers[1]++; info->mpipe[instance].num_needed_buffers[1]++;
else else
info->num_needed_buffers[2]++; info->mpipe[instance].num_needed_buffers[2]++;
} }
/* Handle a packet. Return true if "processed", false if "filtered". */ /* Handle a packet. Return true if "processed", false if "filtered". */
static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; struct mpipe_data *md = &mpipe_data[instance];
struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
uint8_t l2_offset; uint8_t l2_offset;
void *va; void *va;
void *buf; void *buf;
...@@ -477,7 +517,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) ...@@ -477,7 +517,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
if (dev) if (dev)
tile_net_stats_add(1, &dev->stats.rx_dropped); tile_net_stats_add(1, &dev->stats.rx_dropped);
drop: drop:
gxio_mpipe_iqueue_drop(&info->iqueue, idesc); gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
} else { } else {
struct sk_buff *skb = mpipe_buf_to_skb(va); struct sk_buff *skb = mpipe_buf_to_skb(va);
...@@ -487,7 +527,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) ...@@ -487,7 +527,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
tile_net_receive_skb(dev, skb, idesc, len); tile_net_receive_skb(dev, skb, idesc, len);
} }
gxio_mpipe_iqueue_consume(&info->iqueue, idesc); gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
return !filter; return !filter;
} }
...@@ -508,14 +548,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget) ...@@ -508,14 +548,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned int work = 0; unsigned int work = 0;
gxio_mpipe_idesc_t *idesc; gxio_mpipe_idesc_t *idesc;
int i, n; int instance, i, n;
struct mpipe_data *md;
/* Process packets. */ struct info_mpipe *info_mpipe =
while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { container_of(napi, struct info_mpipe, napi);
instance = info_mpipe->instance;
while ((n = gxio_mpipe_iqueue_try_peek(
&info_mpipe->iqueue,
&idesc)) > 0) {
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (i == TILE_NET_BATCH) if (i == TILE_NET_BATCH)
goto done; goto done;
if (tile_net_handle_packet(idesc + i)) { if (tile_net_handle_packet(instance,
idesc + i)) {
if (++work >= budget) if (++work >= budget)
goto done; goto done;
} }
...@@ -523,14 +569,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget) ...@@ -523,14 +569,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
} }
/* There are no packets left. */ /* There are no packets left. */
napi_complete(&info->napi); napi_complete(&info_mpipe->napi);
md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */ /* Re-enable hypervisor interrupts. */
gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); gxio_mpipe_enable_notif_ring_interrupt(
&md->context, info->mpipe[instance].iqueue.ring);
/* HACK: Avoid the "rotting packet" problem. */ /* HACK: Avoid the "rotting packet" problem. */
if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
napi_schedule(&info->napi); napi_schedule(&info_mpipe->napi);
/* ISSUE: Handle completions? */ /* ISSUE: Handle completions? */
...@@ -540,11 +588,11 @@ static int tile_net_poll(struct napi_struct *napi, int budget) ...@@ -540,11 +588,11 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
return work; return work;
} }
/* Handle an ingress interrupt on the current cpu. */ /* Handle an ingress interrupt from an instance on the current cpu. */
static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
napi_schedule(&info->napi); napi_schedule(&info->mpipe[(uint64_t)id].napi);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -586,7 +634,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev, ...@@ -586,7 +634,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
{ {
struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev); struct tile_net_priv *priv = netdev_priv(dev);
struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; int instance = priv->instance;
struct tile_net_tx_wake *tx_wake =
&info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer, hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
...@@ -624,7 +674,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) ...@@ -624,7 +674,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned long irqflags; unsigned long irqflags;
bool pending = false; bool pending = false;
int i; int i, instance;
local_irq_save(irqflags); local_irq_save(irqflags);
...@@ -632,13 +682,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) ...@@ -632,13 +682,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
info->egress_timer_scheduled = false; info->egress_timer_scheduled = false;
/* Free all possible comps for this tile. */ /* Free all possible comps for this tile. */
for (i = 0; i < TILE_NET_CHANNELS; i++) { for (instance = 0; instance < NR_MPIPE_MAX &&
struct tile_net_egress *egress = &egress_for_echannel[i]; info->mpipe[instance].has_iqueue; instance++) {
struct tile_net_comps *comps = info->comps_for_echannel[i]; for (i = 0; i < TILE_NET_CHANNELS; i++) {
if (comps->comp_last >= comps->comp_next) struct tile_net_egress *egress =
continue; &mpipe_data[instance].egress_for_echannel[i];
tile_net_free_comps(egress->equeue, comps, -1, true); struct tile_net_comps *comps =
pending = pending || (comps->comp_last < comps->comp_next); info->mpipe[instance].comps_for_echannel[i];
if (!egress || comps->comp_last >= comps->comp_next)
continue;
tile_net_free_comps(egress->equeue, comps, -1, true);
pending = pending ||
(comps->comp_last < comps->comp_next);
}
} }
/* Reschedule timer if needed. */ /* Reschedule timer if needed. */
...@@ -650,13 +706,15 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) ...@@ -650,13 +706,15 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
/* Helper function for "tile_net_update()". */ /* Helper functions for "tile_net_update()". */
static void manage_ingress_irq(void *enable) static void enable_ingress_irq(void *irq)
{ {
if (enable) enable_percpu_irq((long)irq, 0);
enable_percpu_irq(ingress_irq, 0); }
else
disable_percpu_irq(ingress_irq); static void disable_ingress_irq(void *irq)
{
disable_percpu_irq((long)irq);
} }
/* Helper function for tile_net_open() and tile_net_stop(). /* Helper function for tile_net_open() and tile_net_stop().
...@@ -666,19 +724,22 @@ static int tile_net_update(struct net_device *dev) ...@@ -666,19 +724,22 @@ static int tile_net_update(struct net_device *dev)
{ {
static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
bool saw_channel = false; bool saw_channel = false;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
int channel; int channel;
int rc; int rc;
int cpu; int cpu;
gxio_mpipe_rules_init(&rules, &context); saw_channel = false;
gxio_mpipe_rules_init(&rules, &md->context);
for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
if (tile_net_devs_for_channel[channel] == NULL) if (md->tile_net_devs_for_channel[channel] == NULL)
continue; continue;
if (!saw_channel) { if (!saw_channel) {
saw_channel = true; saw_channel = true;
gxio_mpipe_rules_begin(&rules, first_bucket, gxio_mpipe_rules_begin(&rules, md->first_bucket,
num_buckets, NULL); md->num_buckets, NULL);
gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
} }
gxio_mpipe_rules_add_channel(&rules, channel); gxio_mpipe_rules_add_channel(&rules, channel);
...@@ -689,7 +750,8 @@ static int tile_net_update(struct net_device *dev) ...@@ -689,7 +750,8 @@ static int tile_net_update(struct net_device *dev)
*/ */
rc = gxio_mpipe_rules_commit(&rules); rc = gxio_mpipe_rules_commit(&rules);
if (rc != 0) { if (rc != 0) {
netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
instance, rc);
return -EIO; return -EIO;
} }
...@@ -697,35 +759,38 @@ static int tile_net_update(struct net_device *dev) ...@@ -697,35 +759,38 @@ static int tile_net_update(struct net_device *dev)
* We use on_each_cpu to handle the IPI mask or unmask. * We use on_each_cpu to handle the IPI mask or unmask.
*/ */
if (!saw_channel) if (!saw_channel)
on_each_cpu(manage_ingress_irq, (void *)0, 1); on_each_cpu(disable_ingress_irq,
(void *)(long)(md->ingress_irq), 1);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
if (!info->has_iqueue)
if (!info->mpipe[instance].has_iqueue)
continue; continue;
if (saw_channel) { if (saw_channel) {
if (!info->napi_added) { if (!info->mpipe[instance].napi_added) {
netif_napi_add(dev, &info->napi, netif_napi_add(dev, &info->mpipe[instance].napi,
tile_net_poll, TILE_NET_WEIGHT); tile_net_poll, TILE_NET_WEIGHT);
info->napi_added = true; info->mpipe[instance].napi_added = true;
} }
if (!info->napi_enabled) { if (!info->mpipe[instance].napi_enabled) {
napi_enable(&info->napi); napi_enable(&info->mpipe[instance].napi);
info->napi_enabled = true; info->mpipe[instance].napi_enabled = true;
} }
} else { } else {
if (info->napi_enabled) { if (info->mpipe[instance].napi_enabled) {
napi_disable(&info->napi); napi_disable(&info->mpipe[instance].napi);
info->napi_enabled = false; info->mpipe[instance].napi_enabled = false;
} }
/* FIXME: Drain the iqueue. */ /* FIXME: Drain the iqueue. */
} }
} }
if (saw_channel) if (saw_channel)
on_each_cpu(manage_ingress_irq, (void *)1, 1); on_each_cpu(enable_ingress_irq,
(void *)(long)(md->ingress_irq), 1);
/* HACK: Allow packets to flow in the simulator. */ /* HACK: Allow packets to flow in the simulator. */
if (saw_channel) if (saw_channel)
sim_enable_mpipe_links(0, -1); sim_enable_mpipe_links(instance, -1);
return 0; return 0;
} }
...@@ -735,46 +800,52 @@ static int create_buffer_stack(struct net_device *dev, ...@@ -735,46 +800,52 @@ static int create_buffer_stack(struct net_device *dev,
int kind, size_t num_buffers) int kind, size_t num_buffers)
{ {
pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
int stack_idx = first_buffer_stack + kind; int stack_idx = md->first_buffer_stack + kind;
void *va; void *va;
int i, rc; int i, rc;
/* Round up to 64KB and then use alloc_pages() so we get the /* Round up to 64KB and then use alloc_pages() so we get the
* required 64KB alignment. * required 64KB alignment.
*/ */
buffer_stack_bytes[kind] = ALIGN(needed, 64 * 1024); md->buffer_stack_bytes[kind] =
ALIGN(needed, 64 * 1024);
va = alloc_pages_exact(buffer_stack_bytes[kind], GFP_KERNEL); va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
if (va == NULL) { if (va == NULL) {
netdev_err(dev, netdev_err(dev,
"Could not alloc %zd bytes for buffer stack %d\n", "Could not alloc %zd bytes for buffer stack %d\n",
buffer_stack_bytes[kind], kind); md->buffer_stack_bytes[kind], kind);
return -ENOMEM; return -ENOMEM;
} }
/* Initialize the buffer stack. */ /* Initialize the buffer stack. */
rc = gxio_mpipe_init_buffer_stack(&context, stack_idx, rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
buffer_size_enums[kind], buffer_size_enums[kind], va,
va, buffer_stack_bytes[kind], 0); md->buffer_stack_bytes[kind], 0);
if (rc != 0) { if (rc != 0) {
netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
free_pages_exact(va, buffer_stack_bytes[kind]); instance, rc);
free_pages_exact(va, md->buffer_stack_bytes[kind]);
return rc; return rc;
} }
buffer_stack_vas[kind] = va; md->buffer_stack_vas[kind] = va;
rc = gxio_mpipe_register_client_memory(&context, stack_idx, rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
hash_pte, 0); hash_pte, 0);
if (rc != 0) { if (rc != 0) {
netdev_err(dev, "gxio_mpipe_register_client_memory: %d\n", rc); netdev_err(dev,
"gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
instance, rc);
return rc; return rc;
} }
/* Provide initial buffers. */ /* Provide initial buffers. */
for (i = 0; i < num_buffers; i++) { for (i = 0; i < num_buffers; i++) {
if (!tile_net_provide_buffer(kind)) { if (!tile_net_provide_buffer(instance, kind)) {
netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -793,14 +864,18 @@ static int init_buffer_stacks(struct net_device *dev, ...@@ -793,14 +864,18 @@ static int init_buffer_stacks(struct net_device *dev,
int num_kinds = MAX_KINDS - (jumbo_num == 0); int num_kinds = MAX_KINDS - (jumbo_num == 0);
size_t num_buffers; size_t num_buffers;
int rc; int rc;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
/* Allocate the buffer stacks. */ /* Allocate the buffer stacks. */
rc = gxio_mpipe_alloc_buffer_stacks(&context, num_kinds, 0, 0); rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks: %d\n", rc); netdev_err(dev,
"gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
instance, rc);
return rc; return rc;
} }
first_buffer_stack = rc; md->first_buffer_stack = rc;
/* Enough small/large buffers to (normally) avoid buffer errors. */ /* Enough small/large buffers to (normally) avoid buffer errors. */
num_buffers = num_buffers =
...@@ -829,6 +904,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, ...@@ -829,6 +904,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
{ {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
int order, i, rc; int order, i, rc;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
struct page *page; struct page *page;
void *addr; void *addr;
...@@ -843,7 +920,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, ...@@ -843,7 +920,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr = pfn_to_kaddr(page_to_pfn(page)); addr = pfn_to_kaddr(page_to_pfn(page));
memset(addr, 0, COMPS_SIZE); memset(addr, 0, COMPS_SIZE);
for (i = 0; i < TILE_NET_CHANNELS; i++) for (i = 0; i < TILE_NET_CHANNELS; i++)
info->comps_for_echannel[i] = info->mpipe[instance].comps_for_echannel[i] =
addr + i * sizeof(struct tile_net_comps); addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */ /* If this is a network cpu, create an iqueue. */
...@@ -857,14 +934,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, ...@@ -857,14 +934,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
return -ENOMEM; return -ENOMEM;
} }
addr = pfn_to_kaddr(page_to_pfn(page)); addr = pfn_to_kaddr(page_to_pfn(page));
rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
addr, NOTIF_RING_SIZE, 0); &md->context, ring++, addr,
NOTIF_RING_SIZE, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, netdev_err(dev,
"gxio_mpipe_iqueue_init failed: %d\n", rc); "gxio_mpipe_iqueue_init failed: %d\n", rc);
return rc; return rc;
} }
info->has_iqueue = true; info->mpipe[instance].has_iqueue = true;
} }
return ring; return ring;
...@@ -877,40 +955,41 @@ static int init_notif_group_and_buckets(struct net_device *dev, ...@@ -877,40 +955,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
int ring, int network_cpus_count) int ring, int network_cpus_count)
{ {
int group, rc; int group, rc;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
/* Allocate one NotifGroup. */ /* Allocate one NotifGroup. */
rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
rc); instance, rc);
return rc; return rc;
} }
group = rc; group = rc;
/* Initialize global num_buckets value. */ /* Initialize global num_buckets value. */
if (network_cpus_count > 4) if (network_cpus_count > 4)
num_buckets = 256; md->num_buckets = 256;
else if (network_cpus_count > 1) else if (network_cpus_count > 1)
num_buckets = 16; md->num_buckets = 16;
/* Allocate some buckets, and set global first_bucket value. */ /* Allocate some buckets, and set global first_bucket value. */
rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
instance, rc);
return rc; return rc;
} }
first_bucket = rc; md->first_bucket = rc;
/* Init group and buckets. */ /* Init group and buckets. */
rc = gxio_mpipe_init_notif_group_and_buckets( rc = gxio_mpipe_init_notif_group_and_buckets(
&context, group, ring, network_cpus_count, &md->context, group, ring, network_cpus_count,
first_bucket, num_buckets, md->first_bucket, md->num_buckets,
GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
if (rc != 0) { if (rc != 0) {
netdev_err( netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
dev, "mpipe[%d] %d\n", instance, rc);
"gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
rc);
return rc; return rc;
} }
...@@ -924,30 +1003,39 @@ static int init_notif_group_and_buckets(struct net_device *dev, ...@@ -924,30 +1003,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
*/ */
static int tile_net_setup_interrupts(struct net_device *dev) static int tile_net_setup_interrupts(struct net_device *dev)
{ {
int cpu, rc; int cpu, rc, irq;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
irq = md->ingress_irq;
if (irq < 0) {
irq = create_irq();
if (irq < 0) {
netdev_err(dev,
"create_irq failed: mpipe[%d] %d\n",
instance, irq);
return irq;
}
tile_irq_activate(irq, TILE_IRQ_PERCPU);
rc = create_irq(); rc = request_irq(irq, tile_net_handle_ingress_irq,
if (rc < 0) { 0, "tile_net", (void *)((uint64_t)instance));
netdev_err(dev, "create_irq failed: %d\n", rc);
return rc; if (rc != 0) {
} netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
ingress_irq = rc; instance, rc);
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); destroy_irq(irq);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, return rc;
0, "tile_net", NULL); }
if (rc != 0) { md->ingress_irq = irq;
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);
ingress_irq = -1;
return rc;
} }
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
if (info->has_iqueue) { if (info->mpipe[instance].has_iqueue) {
gxio_mpipe_request_notif_ring_interrupt( gxio_mpipe_request_notif_ring_interrupt(&md->context,
&context, cpu_x(cpu), cpu_y(cpu), cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
KERNEL_PL, ingress_irq, info->iqueue.ring); info->mpipe[instance].iqueue.ring);
} }
} }
...@@ -955,40 +1043,45 @@ static int tile_net_setup_interrupts(struct net_device *dev) ...@@ -955,40 +1043,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
} }
/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
static void tile_net_init_mpipe_fail(void) static void tile_net_init_mpipe_fail(int instance)
{ {
int kind, cpu; int kind, cpu;
struct mpipe_data *md = &mpipe_data[instance];
/* Do cleanups that require the mpipe context first. */ /* Do cleanups that require the mpipe context first. */
for (kind = 0; kind < MAX_KINDS; kind++) { for (kind = 0; kind < MAX_KINDS; kind++) {
if (buffer_stack_vas[kind] != NULL) { if (md->buffer_stack_vas[kind] != NULL) {
tile_net_pop_all_buffers(first_buffer_stack + kind); tile_net_pop_all_buffers(instance,
md->first_buffer_stack +
kind);
} }
} }
/* Destroy mpipe context so the hardware no longer owns any memory. */ /* Destroy mpipe context so the hardware no longer owns any memory. */
gxio_mpipe_destroy(&context); gxio_mpipe_destroy(&md->context);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
free_pages((unsigned long)(info->comps_for_echannel[0]), free_pages(
get_order(COMPS_SIZE)); (unsigned long)(
info->comps_for_echannel[0] = NULL; info->mpipe[instance].comps_for_echannel[0]),
free_pages((unsigned long)(info->iqueue.idescs), get_order(COMPS_SIZE));
info->mpipe[instance].comps_for_echannel[0] = NULL;
free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
get_order(NOTIF_RING_SIZE)); get_order(NOTIF_RING_SIZE));
info->iqueue.idescs = NULL; info->mpipe[instance].iqueue.idescs = NULL;
} }
for (kind = 0; kind < MAX_KINDS; kind++) { for (kind = 0; kind < MAX_KINDS; kind++) {
if (buffer_stack_vas[kind] != NULL) { if (md->buffer_stack_vas[kind] != NULL) {
free_pages_exact(buffer_stack_vas[kind], free_pages_exact(md->buffer_stack_vas[kind],
buffer_stack_bytes[kind]); md->buffer_stack_bytes[kind]);
buffer_stack_vas[kind] = NULL; md->buffer_stack_vas[kind] = NULL;
} }
} }
first_buffer_stack = -1; md->first_buffer_stack = -1;
first_bucket = -1; md->first_bucket = -1;
} }
/* The first time any tilegx network device is opened, we initialize /* The first time any tilegx network device is opened, we initialize
...@@ -1005,6 +1098,8 @@ static int tile_net_init_mpipe(struct net_device *dev) ...@@ -1005,6 +1098,8 @@ static int tile_net_init_mpipe(struct net_device *dev)
int rc; int rc;
int cpu; int cpu;
int first_ring, ring; int first_ring, ring;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
int network_cpus_count = cpus_weight(network_cpus_map); int network_cpus_count = cpus_weight(network_cpus_map);
if (!hash_default) { if (!hash_default) {
...@@ -1012,9 +1107,10 @@ static int tile_net_init_mpipe(struct net_device *dev) ...@@ -1012,9 +1107,10 @@ static int tile_net_init_mpipe(struct net_device *dev)
return -EIO; return -EIO;
} }
rc = gxio_mpipe_init(&context, 0); rc = gxio_mpipe_init(&md->context, instance);
if (rc != 0) { if (rc != 0) {
netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
instance, rc);
return -EIO; return -EIO;
} }
...@@ -1024,7 +1120,8 @@ static int tile_net_init_mpipe(struct net_device *dev) ...@@ -1024,7 +1120,8 @@ static int tile_net_init_mpipe(struct net_device *dev)
goto fail; goto fail;
/* Allocate one NotifRing for each network cpu. */ /* Allocate one NotifRing for each network cpu. */
rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); rc = gxio_mpipe_alloc_notif_rings(&md->context,
network_cpus_count, 0, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
rc); rc);
...@@ -1054,7 +1151,7 @@ static int tile_net_init_mpipe(struct net_device *dev) ...@@ -1054,7 +1151,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
return 0; return 0;
fail: fail:
tile_net_init_mpipe_fail(); tile_net_init_mpipe_fail(instance);
return rc; return rc;
} }
...@@ -1072,9 +1169,11 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) ...@@ -1072,9 +1169,11 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
int headers_order, edescs_order, equeue_order; int headers_order, edescs_order, equeue_order;
size_t edescs_size; size_t edescs_size;
int rc = -ENOMEM; int rc = -ENOMEM;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
/* Only initialize once. */ /* Only initialize once. */
if (egress_for_echannel[echannel].equeue != NULL) if (md->egress_for_echannel[echannel].equeue != NULL)
return 0; return 0;
/* Allocate memory for the "headers". */ /* Allocate memory for the "headers". */
...@@ -1113,20 +1212,21 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) ...@@ -1113,20 +1212,21 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
/* Allocate an edma ring (using a one entry "free list"). */ /* Allocate an edma ring (using a one entry "free list"). */
if (ering < 0) { if (ering < 0) {
rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
if (rc < 0) { if (rc < 0) {
netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: %d\n", netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
rc); "mpipe[%d] %d\n", instance, rc);
goto fail_equeue; goto fail_equeue;
} }
ering = rc; ering = rc;
} }
/* Initialize the equeue. */ /* Initialize the equeue. */
rc = gxio_mpipe_equeue_init(equeue, &context, ering, echannel, rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
edescs, edescs_size, 0); edescs, edescs_size, 0);
if (rc != 0) { if (rc != 0) {
netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
instance, rc);
goto fail_equeue; goto fail_equeue;
} }
...@@ -1143,8 +1243,8 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) ...@@ -1143,8 +1243,8 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
} }
/* Done. */ /* Done. */
egress_for_echannel[echannel].equeue = equeue; md->egress_for_echannel[echannel].equeue = equeue;
egress_for_echannel[echannel].headers = headers; md->egress_for_echannel[echannel].headers = headers;
return 0; return 0;
fail_equeue: fail_equeue:
...@@ -1164,9 +1264,12 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) ...@@ -1164,9 +1264,12 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
const char *link_name) const char *link_name)
{ {
int rc = gxio_mpipe_link_open(link, &context, link_name, 0); int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
if (rc < 0) { if (rc < 0) {
netdev_err(dev, "Failed to open '%s'\n", link_name); netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
link_name, instance, rc);
return rc; return rc;
} }
if (jumbo_num != 0) { if (jumbo_num != 0) {
...@@ -1193,12 +1296,21 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, ...@@ -1193,12 +1296,21 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
static int tile_net_open(struct net_device *dev) static int tile_net_open(struct net_device *dev)
{ {
struct tile_net_priv *priv = netdev_priv(dev); struct tile_net_priv *priv = netdev_priv(dev);
int cpu, rc; int cpu, rc, instance;
mutex_lock(&tile_net_devs_for_channel_mutex); mutex_lock(&tile_net_devs_for_channel_mutex);
/* Do one-time initialization the first time any device is opened. */ /* Get the instance info. */
if (ingress_irq < 0) { rc = gxio_mpipe_link_instance(dev->name);
if (rc < 0 || rc >= NR_MPIPE_MAX)
return -EIO;
priv->instance = rc;
instance = rc;
if (!mpipe_data[rc].context.mmio_fast_base) {
/* Do one-time initialization per instance the first time
* any device is opened.
*/
rc = tile_net_init_mpipe(dev); rc = tile_net_init_mpipe(dev);
if (rc != 0) if (rc != 0)
goto fail; goto fail;
...@@ -1229,7 +1341,7 @@ static int tile_net_open(struct net_device *dev) ...@@ -1229,7 +1341,7 @@ static int tile_net_open(struct net_device *dev)
if (rc != 0) if (rc != 0)
goto fail; goto fail;
tile_net_devs_for_channel[priv->channel] = dev; mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
rc = tile_net_update(dev); rc = tile_net_update(dev);
if (rc != 0) if (rc != 0)
...@@ -1241,7 +1353,7 @@ static int tile_net_open(struct net_device *dev) ...@@ -1241,7 +1353,7 @@ static int tile_net_open(struct net_device *dev)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake = struct tile_net_tx_wake *tx_wake =
&info->tx_wake[priv->echannel]; &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL); HRTIMER_MODE_REL);
...@@ -1267,7 +1379,7 @@ static int tile_net_open(struct net_device *dev) ...@@ -1267,7 +1379,7 @@ static int tile_net_open(struct net_device *dev)
priv->channel = -1; priv->channel = -1;
} }
priv->echannel = -1; priv->echannel = -1;
tile_net_devs_for_channel[priv->channel] = NULL; mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
mutex_unlock(&tile_net_devs_for_channel_mutex); mutex_unlock(&tile_net_devs_for_channel_mutex);
/* Don't return raw gxio error codes to generic Linux. */ /* Don't return raw gxio error codes to generic Linux. */
...@@ -1279,18 +1391,20 @@ static int tile_net_stop(struct net_device *dev) ...@@ -1279,18 +1391,20 @@ static int tile_net_stop(struct net_device *dev)
{ {
struct tile_net_priv *priv = netdev_priv(dev); struct tile_net_priv *priv = netdev_priv(dev);
int cpu; int cpu;
int instance = priv->instance;
struct mpipe_data *md = &mpipe_data[instance];
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake = struct tile_net_tx_wake *tx_wake =
&info->tx_wake[priv->echannel]; &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_cancel(&tx_wake->timer); hrtimer_cancel(&tx_wake->timer);
netif_stop_subqueue(dev, cpu); netif_stop_subqueue(dev, cpu);
} }
mutex_lock(&tile_net_devs_for_channel_mutex); mutex_lock(&tile_net_devs_for_channel_mutex);
tile_net_devs_for_channel[priv->channel] = NULL; md->tile_net_devs_for_channel[priv->channel] = NULL;
(void)tile_net_update(dev); (void)tile_net_update(dev);
if (priv->loopify_channel >= 0) { if (priv->loopify_channel >= 0) {
if (gxio_mpipe_link_close(&priv->loopify_link) != 0) if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
...@@ -1500,6 +1614,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, ...@@ -1500,6 +1614,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
struct sk_buff *skb, unsigned char *headers, s64 slot) struct sk_buff *skb, unsigned char *headers, s64 slot)
{ {
struct skb_shared_info *sh = skb_shinfo(skb); struct skb_shared_info *sh = skb_shinfo(skb);
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len; unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size; unsigned int p_len = sh->gso_size;
...@@ -1522,8 +1638,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, ...@@ -1522,8 +1638,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
edesc_head.xfer_size = sh_len; edesc_head.xfer_size = sh_len;
/* This is only used to specify the TLB. */ /* This is only used to specify the TLB. */
edesc_head.stack_idx = first_buffer_stack; edesc_head.stack_idx = md->first_buffer_stack;
edesc_body.stack_idx = first_buffer_stack; edesc_body.stack_idx = md->first_buffer_stack;
/* Egress all the edescs. */ /* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) { for (segment = 0; segment < sh->gso_segs; segment++) {
...@@ -1598,8 +1714,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) ...@@ -1598,8 +1714,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev); struct tile_net_priv *priv = netdev_priv(dev);
int channel = priv->echannel; int channel = priv->echannel;
struct tile_net_egress *egress = &egress_for_echannel[channel]; int instance = priv->instance;
struct tile_net_comps *comps = info->comps_for_echannel[channel]; struct mpipe_data *md = &mpipe_data[instance];
struct tile_net_egress *egress = &md->egress_for_echannel[channel];
struct tile_net_comps *comps =
info->mpipe[instance].comps_for_echannel[channel];
gxio_mpipe_equeue_t *equeue = egress->equeue; gxio_mpipe_equeue_t *equeue = egress->equeue;
unsigned long irqflags; unsigned long irqflags;
int num_edescs; int num_edescs;
...@@ -1663,10 +1782,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1663,10 +1782,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev); struct tile_net_priv *priv = netdev_priv(dev);
struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; int instance = priv->instance;
struct mpipe_data *md = &mpipe_data[instance];
struct tile_net_egress *egress =
&md->egress_for_echannel[priv->echannel];
gxio_mpipe_equeue_t *equeue = egress->equeue; gxio_mpipe_equeue_t *equeue = egress->equeue;
struct tile_net_comps *comps = struct tile_net_comps *comps =
info->comps_for_echannel[priv->echannel]; info->mpipe[instance].comps_for_echannel[priv->echannel];
unsigned int len = skb->len; unsigned int len = skb->len;
unsigned char *data = skb->data; unsigned char *data = skb->data;
unsigned int num_edescs; unsigned int num_edescs;
...@@ -1683,7 +1805,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1683,7 +1805,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
/* This is only used to specify the TLB. */ /* This is only used to specify the TLB. */
edesc.stack_idx = first_buffer_stack; edesc.stack_idx = md->first_buffer_stack;
/* Prepare the edescs. */ /* Prepare the edescs. */
for (i = 0; i < num_edescs; i++) { for (i = 0; i < num_edescs; i++) {
...@@ -1790,9 +1912,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p) ...@@ -1790,9 +1912,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
*/ */
static void tile_net_netpoll(struct net_device *dev) static void tile_net_netpoll(struct net_device *dev)
{ {
disable_percpu_irq(ingress_irq); int instance = mpipe_instance(dev);
tile_net_handle_ingress_irq(ingress_irq, NULL); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
enable_percpu_irq(ingress_irq, 0); struct mpipe_data *md = &mpipe_data[instance];
disable_percpu_irq(md->ingress_irq);
napi_schedule(&info->mpipe[instance].napi);
enable_percpu_irq(md->ingress_irq, 0);
} }
#endif #endif
...@@ -1895,9 +2021,12 @@ static void tile_net_init_module_percpu(void *unused) ...@@ -1895,9 +2021,12 @@ static void tile_net_init_module_percpu(void *unused)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int my_cpu = smp_processor_id(); int my_cpu = smp_processor_id();
int instance;
info->has_iqueue = false; for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
info->mpipe[instance].has_iqueue = false;
info->mpipe[instance].instance = instance;
}
info->my_cpu = my_cpu; info->my_cpu = my_cpu;
/* Initialize the egress timer. */ /* Initialize the egress timer. */
...@@ -1914,6 +2043,8 @@ static int __init tile_net_init_module(void) ...@@ -1914,6 +2043,8 @@ static int __init tile_net_init_module(void)
pr_info("Tilera Network Driver\n"); pr_info("Tilera Network Driver\n");
BUILD_BUG_ON(NR_MPIPE_MAX != 2);
mutex_init(&tile_net_devs_for_channel_mutex); mutex_init(&tile_net_devs_for_channel_mutex);
/* Initialize each CPU. */ /* Initialize each CPU. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment