Commit 9d60f0ea authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-update-to-control-structures'

Jakub Kicinski says:

====================
nfp: update to control structures

This series prepares NFP control structures for crypto offloads.
So far we mostly dealt with configuration requests under rtnl lock.
This will no longer be the case with crypto.  Additionally we will
try to reuse the BPF control message format, so we move common code
out of BPF.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1deeb640 bcf0cafa
...@@ -15,6 +15,7 @@ nfp-objs := \ ...@@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \ nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \ nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \ nfpcore/nfp_target.o \
ccm.o \
nfp_asm.o \ nfp_asm.o \
nfp_app.o \ nfp_app.o \
nfp_app_nic.o \ nfp_app_nic.o \
......
...@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) ...@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed) int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{ {
const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
struct nfp_net *nn = alink->vnic; struct nfp_net *nn = alink->vnic;
unsigned int i; unsigned int i;
int err; int err;
err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
if (err)
return err;
/* Write data_len and wipe reserved */ /* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN, nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len); alink->abm->prio_map_len);
...@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed) ...@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i, nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]); packed[i / sizeof(u32)]);
err = nfp_net_reconfig_mbox(nn, err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
if (err) if (err)
nfp_err(alink->abm->app->cpp, nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err); "setting DSCP -> VQ map failed with error %d\n", err);
......
...@@ -6,48 +6,13 @@ ...@@ -6,48 +6,13 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/wait.h>
#include "../ccm.h"
#include "../nfp_app.h" #include "../nfp_app.h"
#include "../nfp_net.h" #include "../nfp_net.h"
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
{
u16 used_tags;
used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
}
static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
{
/* All FW communication for BPF is request-reply. To make sure we
* don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (nfp_bpf_all_tags_busy(bpf)) {
cmsg_warn(bpf, "all FW request contexts busy!\n");
return -EAGAIN;
}
WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
return bpf->tag_alloc_next++;
}
static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
bpf->tag_alloc_last != bpf->tag_alloc_next)
bpf->tag_alloc_last++;
}
static struct sk_buff * static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{ {
...@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) ...@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size; return size;
} }
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return hdr->type;
}
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
}
static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;
skb_queue_walk(&bpf->cmsg_replies, skb) {
msg_tag = nfp_bpf_cmsg_get_tag(skb);
if (msg_tag == tag) {
nfp_bpf_free_tag(bpf, tag);
__skb_unlink(skb, &bpf->cmsg_replies);
return skb;
}
}
return NULL;
}
static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
if (!skb)
nfp_bpf_free_tag(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
int tag)
{
struct sk_buff *skb;
int i, err;
for (i = 0; i < 50; i++) {
udelay(4);
skb = nfp_bpf_reply(bpf, tag);
if (skb)
return skb;
}
err = wait_event_interruptible_timeout(bpf->cmsg_wq,
skb = nfp_bpf_reply(bpf, tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_bpf_reply_drop_tag(bpf, tag);
if (err < 0) {
cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
type);
return ERR_PTR(-ETIMEDOUT);
}
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
enum nfp_bpf_cmsg_type type, unsigned int reply_size)
{
struct cmsg_hdr *hdr;
int tag;
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_alloc_tag(bpf);
if (tag < 0) {
nfp_ctrl_unlock(bpf->app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}
hdr = (void *)skb->data;
hdr->ver = CMSG_MAP_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);
__nfp_app_ctrl_tx(bpf->app, skb);
nfp_ctrl_unlock(bpf->app->ctrl);
skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
if (IS_ERR(skb))
return skb;
hdr = (struct cmsg_hdr *)skb->data;
if (hdr->type != __CMSG_REPLY(type)) {
cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
hdr->type, __CMSG_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}
return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}
static int static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply) struct cmsg_reply_map_simple *reply)
...@@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) ...@@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type); req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0; req->map_flags = 0;
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC, skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
sizeof(*reply)); sizeof(*reply));
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) ...@@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data; req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid); req->tid = cpu_to_be32(nfp_map->tid);
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE, skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
sizeof(*reply)); sizeof(*reply));
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n"); cmsg_warn(bpf, "leaking map - I/O error\n");
return; return;
...@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, ...@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
} }
static int static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
enum nfp_bpf_cmsg_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{ {
struct nfp_bpf_map *nfp_map = offmap->dev_priv; struct nfp_bpf_map *nfp_map = offmap->dev_priv;
...@@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size); map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op, skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
nfp_bpf_cmsg_map_reply_size(bpf, 1)); nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags) void *key, void *value, u64 flags)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL); key, value, flags, NULL, NULL);
} }
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL); key, NULL, 0, NULL, NULL);
} }
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value) void *key, void *value)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value); key, NULL, 0, NULL, value);
} }
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key) void *next_key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL); NULL, NULL, 0, next_key, NULL);
} }
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key) void *key, void *next_key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL); key, NULL, 0, next_key, NULL);
} }
...@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) ...@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) { if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len); cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
goto err_free; dev_kfree_skb_any(skb);
return;
} }
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len)) if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
else else
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return;
} }
nfp_ctrl_lock(bpf->app->ctrl); nfp_ccm_rx(&bpf->ccm, skb);
tag = nfp_bpf_cmsg_get_tag(skb);
if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
}
__skb_queue_tail(&bpf->cmsg_replies, skb);
wake_up_interruptible_all(&bpf->cmsg_wq);
nfp_ctrl_unlock(bpf->app->ctrl);
return;
err_unlock:
nfp_ctrl_unlock(bpf->app->ctrl);
err_free:
dev_kfree_skb_any(skb);
} }
void void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len) nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{ {
const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
const struct cmsg_hdr *hdr = data;
if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) { if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len); cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return; return;
} }
if (hdr->type == CMSG_TYPE_BPF_EVENT) if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len); nfp_bpf_event_output(bpf, data, len);
else else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n", cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/types.h> #include <linux/types.h>
#include "../ccm.h"
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking /* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver. * our FW ABI. In that case we will do translation in the driver.
...@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps { ...@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps {
/* /*
* Types defined for map related control messages * Types defined for map related control messages
*/ */
#define CMSG_MAP_ABI_VERSION 1
enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_ALLOC = 1,
CMSG_TYPE_MAP_FREE = 2,
CMSG_TYPE_MAP_LOOKUP = 3,
CMSG_TYPE_MAP_UPDATE = 4,
CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7,
CMSG_TYPE_BPF_EVENT = 8,
__CMSG_TYPE_MAP_MAX,
};
#define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */ /* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_KEY_LW 16
...@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status { ...@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status {
CMSG_RC_ERR_MAP_E2BIG = 7, CMSG_RC_ERR_MAP_E2BIG = 7,
}; };
struct cmsg_hdr {
u8 type;
u8 ver;
__be16 tag;
};
struct cmsg_reply_map_simple { struct cmsg_reply_map_simple {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 rc; __be32 rc;
}; };
struct cmsg_req_map_alloc_tbl { struct cmsg_req_map_alloc_tbl {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 key_size; /* in bytes */ __be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */ __be32 value_size; /* in bytes */
__be32 max_entries; __be32 max_entries;
...@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl { ...@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl {
}; };
struct cmsg_req_map_free_tbl { struct cmsg_req_map_free_tbl {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 tid; __be32 tid;
}; };
...@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl { ...@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl {
}; };
struct cmsg_req_map_op { struct cmsg_req_map_op {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 tid; __be32 tid;
__be32 count; __be32 count;
__be32 flags; __be32 flags;
...@@ -135,7 +114,7 @@ struct cmsg_reply_map_op { ...@@ -135,7 +114,7 @@ struct cmsg_reply_map_op {
}; };
struct cmsg_bpf_event { struct cmsg_bpf_event {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 cpu_id; __be32 cpu_id;
__be64 map_ptr; __be64 map_ptr;
__be32 data_size; __be32 data_size;
......
...@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app; bpf->app = app;
app->priv = bpf; app->priv = bpf;
skb_queue_head_init(&bpf->cmsg_replies);
init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list); INIT_LIST_HEAD(&bpf->map_list);
err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); err = nfp_ccm_init(&bpf->ccm, app);
if (err) if (err)
goto err_free_bpf; goto err_free_bpf;
err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
if (err)
goto err_clean_ccm;
nfp_bpf_init_capabilities(bpf); nfp_bpf_init_capabilities(bpf);
err = nfp_bpf_parse_capabilities(app); err = nfp_bpf_parse_capabilities(app);
...@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app)
err_free_neutral_maps: err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral); rhashtable_destroy(&bpf->maps_neutral);
err_clean_ccm:
nfp_ccm_clean(&bpf->ccm);
err_free_bpf: err_free_bpf:
kfree(bpf); kfree(bpf);
return err; return err;
...@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app) ...@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev); bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); nfp_ccm_clean(&bpf->ccm);
WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral, rhashtable_free_and_destroy(&bpf->maps_neutral,
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h> #include <linux/wait.h>
#include "../ccm.h"
#include "../nfp_asm.h" #include "../nfp_asm.h"
#include "fw.h" #include "fw.h"
...@@ -84,16 +85,10 @@ enum pkt_vec { ...@@ -84,16 +85,10 @@ enum pkt_vec {
/** /**
* struct nfp_app_bpf - bpf app priv structure * struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app * @app: backpointer to the app
* @ccm: common control message handler data
* *
* @bpf_dev: BPF offload device handle * @bpf_dev: BPF offload device handle
* *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
*
* @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies
*
* @cmsg_key_sz: size of key in cmsg element array * @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array * @cmsg_val_sz: size of value in cmsg element array
* *
...@@ -132,16 +127,10 @@ enum pkt_vec { ...@@ -132,16 +127,10 @@ enum pkt_vec {
*/ */
struct nfp_app_bpf { struct nfp_app_bpf {
struct nfp_app *app; struct nfp_app *app;
struct nfp_ccm ccm;
struct bpf_offload_dev *bpf_dev; struct bpf_offload_dev *bpf_dev;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq;
unsigned int cmsg_key_sz; unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz; unsigned int cmsg_val_sz;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
#include "main.h" #include "main.h"
#include "../ccm.h"
#include "../nfp_app.h" #include "../nfp_app.h"
#include "../nfp_net_ctrl.h" #include "../nfp_net_ctrl.h"
#include "../nfp_net.h" #include "../nfp_net.h"
...@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, ...@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL; return -EINVAL;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL; return -EINVAL;
rcu_read_lock(); rcu_read_lock();
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net.h"
#define NFP_CCM_TYPE_REPLY_BIT 7
#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
{
u16 used_tags;
used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
}
static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
{
/* CCM is for FW communication which is request-reply. To make sure
* we don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
ccm_warn(ccm->app, "all FW request contexts busy!\n");
return -EAGAIN;
}
WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
return ccm->tag_alloc_next++;
}
static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
ccm->tag_alloc_last != ccm->tag_alloc_next)
ccm->tag_alloc_last++;
}
static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;
skb_queue_walk(&ccm->replies, skb) {
msg_tag = nfp_ccm_get_tag(skb);
if (msg_tag == tag) {
nfp_ccm_free_tag(ccm, tag);
__skb_unlink(skb, &ccm->replies);
return skb;
}
}
return NULL;
}
static struct sk_buff *
nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(app->ctrl);
skb = __nfp_ccm_reply(ccm, tag);
nfp_ctrl_unlock(app->ctrl);
return skb;
}
static struct sk_buff *
nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(app->ctrl);
skb = __nfp_ccm_reply(ccm, tag);
if (!skb)
nfp_ccm_free_tag(ccm, tag);
nfp_ctrl_unlock(app->ctrl);
return skb;
}
static struct sk_buff *
nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
enum nfp_ccm_type type, int tag)
{
struct sk_buff *skb;
int i, err;
for (i = 0; i < 50; i++) {
udelay(4);
skb = nfp_ccm_reply(ccm, app, tag);
if (skb)
return skb;
}
err = wait_event_interruptible_timeout(ccm->wq,
skb = nfp_ccm_reply(ccm, app,
tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
if (err < 0) {
ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
return ERR_PTR(-ETIMEDOUT);
}
return skb;
}
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size)
{
struct nfp_app *app = ccm->app;
struct nfp_ccm_hdr *hdr;
int reply_type, tag;
nfp_ctrl_lock(app->ctrl);
tag = nfp_ccm_alloc_tag(ccm);
if (tag < 0) {
nfp_ctrl_unlock(app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}
hdr = (void *)skb->data;
hdr->ver = NFP_CCM_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);
__nfp_app_ctrl_tx(app, skb);
nfp_ctrl_unlock(app->ctrl);
skb = nfp_ccm_wait_reply(ccm, app, type, tag);
if (IS_ERR(skb))
return skb;
reply_type = nfp_ccm_get_type(skb);
if (reply_type != __NFP_CCM_REPLY(type)) {
ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
reply_type, __NFP_CCM_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}
return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}
void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
{
struct nfp_app *app = ccm->app;
unsigned int tag;
if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
goto err_free;
}
nfp_ctrl_lock(app->ctrl);
tag = nfp_ccm_get_tag(skb);
if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
}
__skb_queue_tail(&ccm->replies, skb);
wake_up_interruptible_all(&ccm->wq);
nfp_ctrl_unlock(app->ctrl);
return;
err_unlock:
nfp_ctrl_unlock(app->ctrl);
err_free:
dev_kfree_skb_any(skb);
}
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
{
ccm->app = app;
skb_queue_head_init(&ccm->replies);
init_waitqueue_head(&ccm->wq);
return 0;
}
void nfp_ccm_clean(struct nfp_ccm *ccm)
{
WARN_ON(!skb_queue_empty(&ccm->replies));
}
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
#ifndef NFP_CCM_H
#define NFP_CCM_H 1
#include <linux/bitmap.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
struct nfp_app;
/* Firmware ABI */
enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_ALLOC = 1,
NFP_CCM_TYPE_BPF_MAP_FREE = 2,
NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3,
NFP_CCM_TYPE_BPF_MAP_UPDATE = 4,
NFP_CCM_TYPE_BPF_MAP_DELETE = 5,
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
struct nfp_ccm_hdr {
u8 type;
u8 ver;
__be16 tag;
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
return hdr->type;
}
static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
}
/* Implementation */
/**
* struct nfp_ccm - common control message handling
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
*
* @replies: received cmsg replies waiting to be consumed
* @wq: work queue for waiting for cmsg replies
*/
struct nfp_ccm {
struct nfp_app *app;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
struct sk_buff_head replies;
struct wait_queue_head wq;
};
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
void nfp_ccm_clean(struct nfp_ccm *ccm);
void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
#endif
...@@ -539,12 +539,17 @@ struct nfp_net_dp { ...@@ -539,12 +539,17 @@ struct nfp_net_dp {
* @shared_handler: Handler for shared interrupts * @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt * @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz) * @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects HW reconfiguration request regs/machinery * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
* @reconfig_sync_present and HW reconfiguration request
* regs/machinery from async requests (sync must take
* @bar_lock)
* @reconfig_posted: Pending reconfig bits coming from async sources * @reconfig_posted: Pending reconfig bits coming from async sources
* @reconfig_timer_active: Timer for reading reconfiguration results is pending * @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig * @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results * @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only) * @reconfig_in_progress_update: Update FW is processing now (debug only)
* @bar_lock: vNIC config BAR access lock, protects: update,
* mailbox area
* @link_up: Is the link up? * @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
...@@ -615,6 +620,8 @@ struct nfp_net { ...@@ -615,6 +620,8 @@ struct nfp_net {
struct timer_list reconfig_timer; struct timer_list reconfig_timer;
u32 reconfig_in_progress_update; u32 reconfig_in_progress_update;
struct mutex bar_lock;
u32 rx_coalesce_usecs; u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames; u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs; u32 tx_coalesce_usecs;
...@@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn) ...@@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
spin_unlock_bh(&nn->r_vecs[0].lock); spin_unlock_bh(&nn->r_vecs[0].lock);
} }
static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
{
mutex_lock(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{
mutex_unlock(&nn->bar_lock);
}
/* Globals */ /* Globals */
extern const char nfp_driver_version[]; extern const char nfp_driver_version[];
...@@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); ...@@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd); int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
unsigned int unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/lockdep.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/overflow.h> #include <linux/overflow.h>
#include <linux/page_ref.h> #include <linux/page_ref.h>
...@@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) ...@@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
return false; return false;
} }
static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{ {
bool timed_out = false; bool timed_out = false;
int i;
/* Poll update field, waiting for NFP to ack the config.
* Do an opportunistic wait-busy loop, afterward sleep.
*/
for (i = 0; i < 50; i++) {
if (nfp_net_reconfig_check_done(nn, false))
return false;
udelay(4);
}
/* Poll update field, waiting for NFP to ack the config */
while (!nfp_net_reconfig_check_done(nn, timed_out)) { while (!nfp_net_reconfig_check_done(nn, timed_out)) {
msleep(1); usleep_range(250, 500);
timed_out = time_is_before_eq_jiffies(deadline); timed_out = time_is_before_eq_jiffies(deadline);
} }
return timed_out;
}
static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{
if (__nfp_net_reconfig_wait(nn, deadline))
return -EIO;
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
return -EIO; return -EIO;
return timed_out ? -EIO : 0; return 0;
} }
static void nfp_net_reconfig_timer(struct timer_list *t) static void nfp_net_reconfig_timer(struct timer_list *t)
...@@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) ...@@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
} }
/** /**
* nfp_net_reconfig() - Reconfigure the firmware * __nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure * @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config * @update: The value for the update field in the BAR config
* *
...@@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) ...@@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
* *
* Return: Negative errno on error, 0 on success * Return: Negative errno on error, 0 on success
*/ */
int nfp_net_reconfig(struct nfp_net *nn, u32 update) static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{ {
int ret; int ret;
lockdep_assert_held(&nn->bar_lock);
nfp_net_reconfig_sync_enter(nn); nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update); nfp_net_reconfig_start(nn, update);
...@@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) ...@@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret; return ret;
} }
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
nn_ctrl_bar_lock(nn);
ret = __nfp_net_reconfig(nn, update);
nn_ctrl_bar_unlock(nn);
return ret;
}
int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
{
if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
nn_err(nn, "mailbox too small for %u of data (%u)\n",
data_size, nn->tlv_caps.mbox_len);
return -EIO;
}
nn_ctrl_bar_lock(nn);
return 0;
}
/** /**
* nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
* @nn: NFP Net device to reconfigure * @nn: NFP Net device to reconfigure
* @mbox_cmd: The value for the mailbox command * @mbox_cmd: The value for the mailbox command
* *
...@@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) ...@@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
* *
* Return: Negative errno on error, 0 on success * Return: Negative errno on error, 0 on success
*/ */
int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
{ {
u32 mbox = nn->tlv_caps.mbox_off; u32 mbox = nn->tlv_caps.mbox_off;
int ret; int ret;
if (!nfp_net_has_mbox(&nn->tlv_caps)) { lockdep_assert_held(&nn->bar_lock);
nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
return -EIO;
}
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) { if (ret) {
nn_err(nn, "Mailbox update error\n"); nn_err(nn, "Mailbox update error\n");
return ret; return ret;
...@@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) ...@@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
} }
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
{
int ret;
ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
nn_ctrl_bar_unlock(nn);
return ret;
}
/* Interrupt configuration and handling /* Interrupt configuration and handling
*/ */
...@@ -3111,7 +3159,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3111,7 +3159,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
static int static int
nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
int err;
/* Priority tagged packets with vlan id 0 are processed by the /* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets * NFP as untagged packets
...@@ -3119,17 +3169,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) ...@@ -3119,17 +3169,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid) if (!vid)
return 0; return 0;
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
if (err)
return err;
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q); ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
} }
static int static int
nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
int err;
/* Priority tagged packets with vlan id 0 are processed by the /* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets * NFP as untagged packets
...@@ -3137,11 +3193,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) ...@@ -3137,11 +3193,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid) if (!vid)
return 0; return 0;
err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
if (err)
return err;
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q); ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
} }
static void nfp_net_stat64(struct net_device *netdev, static void nfp_net_stat64(struct net_device *netdev,
...@@ -3633,6 +3693,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, ...@@ -3633,6 +3693,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
mutex_init(&nn->bar_lock);
spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock); spin_lock_init(&nn->link_status_lock);
...@@ -3660,6 +3722,9 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, ...@@ -3660,6 +3722,9 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
void nfp_net_free(struct nfp_net *nn) void nfp_net_free(struct nfp_net *nn)
{ {
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
mutex_destroy(&nn->bar_lock);
if (nn->dp.netdev) if (nn->dp.netdev)
free_netdev(nn->dp.netdev); free_netdev(nn->dp.netdev);
else else
...@@ -3921,9 +3986,6 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3921,9 +3986,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
} }
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
/* Stash the re-configuration queue away. First odd queue in TX Bar */ /* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
...@@ -3936,6 +3998,9 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3936,6 +3998,9 @@ int nfp_net_init(struct nfp_net *nn)
if (err) if (err)
return err; return err;
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
nfp_net_vecs_init(nn); nfp_net_vecs_init(nn);
if (!nn->dp.netdev) if (!nn->dp.netdev)
......
...@@ -389,7 +389,6 @@ ...@@ -389,7 +389,6 @@
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0 #define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4 #define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8 #define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
...@@ -495,10 +494,4 @@ struct nfp_net_tlv_caps { ...@@ -495,10 +494,4 @@ struct nfp_net_tlv_caps {
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps); struct nfp_net_tlv_caps *caps);
static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
{
return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
}
#endif /* _NFP_NET_CTRL_H_ */ #endif /* _NFP_NET_CTRL_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment