Commit 015cdf68 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4'

Hariprasad Shenai says:

====================
This patch series provides miscelleneous fixes for Chelsio T4/T5 adapters
related to server entries and server filter entries.

Also, fixes a bug in ULD (Upper Level Driver) like iw_cxgb4 where-in it
calculates wrong tuple values
on T5 adapter. So, a new API cxgb4_select_ntuple is exported so as to enable
Upper Lever Drivers like iw_cxgb4 to correctly calculate tuple values.

The patches series is created agains David Miller's 'net' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

Patch 8/8 (RDMA-cxgb4-Use-cxgb4_select_ntuple-to-correctly-calc.patch)
has a build dependency on Patch 5/8
(cxgb4-Add-API-to-correctly-calculate-tuple-fields.patch).

Also, Patch 6/8 (RDMA-cxgb4-Calculate-the-filter-server-TID-properly.patch) has
a functional
dependency on Patch 3/8 (cxgb4-Assign-filter-server-TIDs-properly.patch)

We would like to request this patch series to get merged via David Miller's
'net' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.

V2 changes:
 - Removed earlier patch which added sftids_in_use counter. However, the counter
   was actually not used anywhere in this patch series.
   Thanks to David Miller for spotting this.
   We have dropped this patch in V2 and will submit a more complete patch which
   uses sftids_in_use counter later on.
 - Fixed a 'checkpatch.pl --strict' warning on Patch 5/8
   (cxgb4-Add-API-to-correctly-calculate-tuple-fields.patch).
 - Removed some un-used #defines from Patch 5/8
   (cxgb4-Add-API-to-correctly-calculate-tuple-fields.patch).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
parents 6eb3c282 41b4f86c
...@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) ...@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
} }
#define VLAN_NONE 0xfff
#define FILTER_SEL_VLAN_NONE 0xffff
#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
#define FILTER_SEL_WIDTH_VIN_P_FC \
(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
#define FILTER_SEL_WIDTH_TAG_P_FC \
(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
struct l2t_entry *l2t)
{
unsigned int ntuple = 0;
u32 viid;
switch (dev->rdev.lldi.filt_mode) {
/* default filter mode */
case HW_TPL_FR_MT_PR_IV_P_FC:
if (l2t->vlan == VLAN_NONE)
ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
else {
ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
}
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
break;
case HW_TPL_FR_MT_PR_OV_P_FC: {
viid = cxgb4_port_viid(l2t->neigh->dev);
ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
break;
}
default:
break;
}
return ntuple;
}
static int send_connect(struct c4iw_ep *ep) static int send_connect(struct c4iw_ep *ep)
{ {
struct cpl_act_open_req *req; struct cpl_act_open_req *req;
...@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr; req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr; req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0); req->opt0 = cpu_to_be64(opt0);
req->params = cpu_to_be32(select_ntuple(ep->com.dev, req->params = cpu_to_be32(cxgb4_select_ntuple(
ep->dst, ep->l2t)); ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
req->opt2 = cpu_to_be32(opt2); req->opt2 = cpu_to_be32(opt2);
} else { } else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
...@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *) req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8)); (ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0); req6->opt0 = cpu_to_be64(opt0);
req6->params = cpu_to_be32( req6->params = cpu_to_be32(cxgb4_select_ntuple(
select_ntuple(ep->com.dev, ep->dst, ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t));
req6->opt2 = cpu_to_be32(opt2); req6->opt2 = cpu_to_be32(opt2);
} }
} else { } else {
...@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr; t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0); t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE( t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
select_ntuple(ep->com.dev, cxgb4_select_ntuple(
ep->dst, ep->l2t))); ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2); t5_req->opt2 = cpu_to_be32(opt2);
} else { } else {
t5_req6 = (struct cpl_t5_act_open_req6 *) t5_req6 = (struct cpl_t5_act_open_req6 *)
...@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8)); (ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0); t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32( t5_req6->params = (__force __be64)cpu_to_be32(
select_ntuple(ep->com.dev, ep->dst, ep->l2t)); cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2); t5_req6->opt2 = cpu_to_be32(opt2);
} }
} }
...@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) ...@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr; sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port; req->le.lport = sin->sin_port;
...@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/* /*
* Allocate a server TID. * Allocate a server TID.
*/ */
if (dev->rdev.lldi.enable_fw_ofld_conn) if (dev->rdev.lldi.enable_fw_ofld_conn &&
ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep); cm_id->local_addr.ss_family, ep);
else else
...@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/* /*
* Calculate the server tid from filter hit index from cpl_rx_pkt. * Calculate the server tid from filter hit index from cpl_rx_pkt.
*/ */
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
- dev->rdev.lldi.tids->sftid_base
+ dev->rdev.lldi.tids->nstids;
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) { if (!lep) {
...@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
window = (__force u16) htons((__force u16)tcph->window); window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */ /* Calcuate filter portion for LE region. */
filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
dev->rdev.lldi.ports[0],
e));
/* /*
* Synthesize the cpl_pass_accept_req. We have everything except the * Synthesize the cpl_pass_accept_req. We have everything except the
......
...@@ -228,6 +228,25 @@ struct tp_params { ...@@ -228,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */ uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
* Filter Tuple portion of filters and TCP TCB connections. The
* fields which are present are controlled by the TP_VLAN_PRI_MAP.
* Since a variable number of fields may or may not be present, their
* shifted field positions within the Compressed Filter Tuple may
* vary, or not even be present if the field isn't selected in
* TP_VLAN_PRI_MAP. Since some of these fields are needed in various
* places we store their offsets here, or a -1 if the field isn't
* present.
*/
int vlan_shift;
int vnic_shift;
int port_shift;
int protocol_shift;
}; };
struct vpd_params { struct vpd_params {
...@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, ...@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size, const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset); struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter); int t4_prep_adapter(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter); void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
......
...@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) ...@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) { if (stid >= 0) {
t->stid_tab[stid].data = data; t->stid_tab[stid].data = data;
stid += t->stid_base; stid += t->stid_base;
t->stids_in_use++; /* IPv6 requires max of 520 bits or 16 cells in TCAM
* This is equivalent to 4 TIDs. With CLIP enabled it
* needs 2 TIDs.
*/
if (family == PF_INET)
t->stids_in_use++;
else
t->stids_in_use += 4;
} }
spin_unlock_bh(&t->stid_lock); spin_unlock_bh(&t->stid_lock);
return stid; return stid;
...@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) ...@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
} }
if (stid >= 0) { if (stid >= 0) {
t->stid_tab[stid].data = data; t->stid_tab[stid].data = data;
stid += t->stid_base; stid -= t->nstids;
stid += t->sftid_base;
t->stids_in_use++; t->stids_in_use++;
} }
spin_unlock_bh(&t->stid_lock); spin_unlock_bh(&t->stid_lock);
...@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); ...@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/ */
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{ {
stid -= t->stid_base; /* Is it a server filter TID? */
if (t->nsftids && (stid >= t->sftid_base)) {
stid -= t->sftid_base;
stid += t->nstids;
} else {
stid -= t->stid_base;
}
spin_lock_bh(&t->stid_lock); spin_lock_bh(&t->stid_lock);
if (family == PF_INET) if (family == PF_INET)
__clear_bit(stid, t->stid_bmap); __clear_bit(stid, t->stid_bmap);
else else
bitmap_release_region(t->stid_bmap, stid, 2); bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL; t->stid_tab[stid].data = NULL;
t->stids_in_use--; if (family == PF_INET)
t->stids_in_use--;
else
t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock); spin_unlock_bh(&t->stid_lock);
} }
EXPORT_SYMBOL(cxgb4_free_stid); EXPORT_SYMBOL(cxgb4_free_stid);
...@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t) ...@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size; size_t size;
unsigned int stid_bmap_size; unsigned int stid_bmap_size;
unsigned int natids = t->natids; unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) + size = t->ntids * sizeof(*t->tid_tab) +
...@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t) ...@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab; t->afree = t->atid_tab;
} }
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
(is_t4(adap->params.chip) || is_t5(adap->params.chip)))
__set_bit(0, t->stid_bmap);
return 0; return 0;
} }
...@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) ...@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4)); (adap->fn * 4));
lli.filt_mode = adap->filter_mode; lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++) for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i; lli.tx_modq[i] = i;
...@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, ...@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev); adap = netdev2adap(dev);
/* Adjust stid to correct filter index */ /* Adjust stid to correct filter index */
stid -= adap->tids.nstids; stid -= adap->tids.sftid_base;
stid += adap->tids.nftids; stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ... /* Check to make sure the filter requested is writable ...
...@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, ...@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i]; f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0; f->fs.mask.lip[i] = ~0;
} }
if (adap->filter_mode & F_PORT) { if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port; f->fs.val.iport = port;
f->fs.mask.iport = mask; f->fs.mask.iport = mask;
} }
} }
if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
f->fs.val.proto = IPPROTO_TCP;
f->fs.mask.proto = ~0;
}
f->fs.dirsteer = 1; f->fs.dirsteer = 1;
f->fs.iq = queue; f->fs.iq = queue;
/* Mark filter as locked */ /* Mark filter as locked */
...@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, ...@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev); adap = netdev2adap(dev);
/* Adjust stid to correct filter index */ /* Adjust stid to correct filter index */
stid -= adap->tids.nstids; stid -= adap->tids.sftid_base;
stid += adap->tids.nftids; stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid]; f = &adap->tids.ftid_tab[stid];
...@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap) ...@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state; enum dev_state state;
u32 params[7], val[7]; u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd; struct fw_caps_config_cmd caps_cmd;
int reset = 1, j; int reset = 1;
/* /*
* Contact FW, advertising Master capability (and potentially forcing * Contact FW, advertising Master capability (and potentially forcing
...@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap) ...@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
/* /*
* These are finalized by FW initialization, load their values now. * These are finalized by FW initialization, load their values now.
*/ */
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL); t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd); adap->params.b_wnd);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ t4_init_tp_params(adap);
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->filter_mode, 1,
TP_VLAN_PRI_MAP);
adap->flags |= FW_OK; adap->flags |= FW_OK;
return 0; return 0;
......
...@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) ...@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{ {
stid -= t->stid_base; /* Is it a server filter TID? */
if (t->nsftids && (stid >= t->sftid_base)) {
stid -= t->sftid_base;
stid += t->nstids;
} else {
stid -= t->stid_base;
}
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
} }
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "l2t.h" #include "l2t.h"
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_api.h" #include "t4fw_api.h"
#include "t4_regs.h"
#define VLAN_NONE 0xfff #define VLAN_NONE 0xfff
...@@ -411,6 +412,40 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, ...@@ -411,6 +412,40 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
} }
EXPORT_SYMBOL(cxgb4_l2t_get); EXPORT_SYMBOL(cxgb4_l2t_get);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t)
{
struct adapter *adap = netdev2adap(dev);
struct tp_params *tp = &adap->params.tp;
u64 ntuple = 0;
/* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0) {
u32 viid = cxgb4_port_viid(dev);
u32 vf = FW_VIID_VIN_GET(viid);
u32 pf = FW_VIID_PFN_GET(viid);
u32 vld = FW_VIID_VIVLD_GET(viid);
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
V_FT_VNID_ID_PF(pf) |
V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
}
return ntuple;
}
EXPORT_SYMBOL(cxgb4_select_ntuple);
/* /*
* Called when address resolution fails for an L2T entry to handle packets * Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked, * on the arpq head. If a packet specifies a failure handler it is invoked,
......
...@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, ...@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev, const struct net_device *physdev,
unsigned int priority); unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
......
...@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter) ...@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0; return 0;
} }
/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
* Initialize various fields of the adapter's TP Parameters structure.
*/
int t4_init_tp_params(struct adapter *adap)
{
int chan;
u32 v;
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP);
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG);
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presense of an Outer VLAN instead of a VNIC ID.
*/
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
return 0;
}
/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
*
* Return the shift position of a filter field within the Compressed
* Filter Tuple. The filter field is specified via its selection bit
* within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
*/
int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
{
unsigned int filter_mode = adap->params.tp.vlan_pri_map;
unsigned int sel;
int field_shift;
if ((filter_mode & filter_sel) == 0)
return -1;
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
field_shift += W_FT_FCOE;
break;
case F_PORT:
field_shift += W_FT_PORT;
break;
case F_VNIC_ID:
field_shift += W_FT_VNIC_ID;
break;
case F_VLAN:
field_shift += W_FT_VLAN;
break;
case F_TOS:
field_shift += W_FT_TOS;
break;
case F_PROTOCOL:
field_shift += W_FT_PROTOCOL;
break;
case F_ETHERTYPE:
field_shift += W_FT_ETHERTYPE;
break;
case F_MACMATCH:
field_shift += W_FT_MACMATCH;
break;
case F_MPSHITTYPE:
field_shift += W_FT_MPSHITTYPE;
break;
case F_FRAGMENTATION:
field_shift += W_FT_FRAGMENTATION;
break;
}
}
return field_shift;
}
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{ {
u8 addr[6]; u8 addr[6];
......
...@@ -1171,10 +1171,50 @@ ...@@ -1171,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25 #define A_TP_TX_SCHED_PCMD 0x25
#define S_VNIC 11
#define V_VNIC(x) ((x) << S_VNIC)
#define F_VNIC V_VNIC(1U)
#define S_FRAGMENTATION 9
#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
#define F_FRAGMENTATION V_FRAGMENTATION(1U)
#define S_MPSHITTYPE 8
#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
#define F_MPSHITTYPE V_MPSHITTYPE(1U)
#define S_MACMATCH 7
#define V_MACMATCH(x) ((x) << S_MACMATCH)
#define F_MACMATCH V_MACMATCH(1U)
#define S_ETHERTYPE 6
#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
#define F_ETHERTYPE V_ETHERTYPE(1U)
#define S_PROTOCOL 5
#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
#define F_PROTOCOL V_PROTOCOL(1U)
#define S_TOS 4
#define V_TOS(x) ((x) << S_TOS)
#define F_TOS V_TOS(1U)
#define S_VLAN 3
#define V_VLAN(x) ((x) << S_VLAN)
#define F_VLAN V_VLAN(1U)
#define S_VNIC_ID 2
#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
#define F_VNIC_ID V_VNIC_ID(1U)
#define S_PORT 1 #define S_PORT 1
#define V_PORT(x) ((x) << S_PORT) #define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U) #define F_PORT V_PORT(1U)
#define S_FCOE 0
#define V_FCOE(x) ((x) << S_FCOE)
#define F_FCOE V_FCOE(1U)
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 #define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
...@@ -1213,4 +1253,37 @@ ...@@ -1213,4 +1253,37 @@
#define V_CHIPID(x) ((x) << S_CHIPID) #define V_CHIPID(x) ((x) << S_CHIPID)
#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
* Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
* selects for a particular field being present. These fields, when present
* in the Compressed Filter Tuple, have the following widths in bits.
*/
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
#define W_FT_VLAN 17
#define W_FT_TOS 8
#define W_FT_PROTOCOL 8
#define W_FT_ETHERTYPE 16
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
/* Some of the Compressed Filter Tuple fields have internal structure. These
* bit shifts/masks describe those structures. All shifts are relative to the
* base position of the fields within the Compressed Filter Tuple
*/
#define S_FT_VLAN_VLD 16
#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
#define S_FT_VNID_ID_VF 0
#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
#define S_FT_VNID_ID_PF 7
#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
#endif /* __T4_REGS_H */ #endif /* __T4_REGS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment