Commit db3397b9 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Hariprasad Shenai says:

====================
cxgb4/cxgb4vf: Adds support for Chelsio T6 adapter

This patch series adds the following:
Adds NIC driver support for T6 adapter
Adds vNIC driver support for T6 adapter

This patch series has been created against net-next tree and includes
patches on cxgb4 and cxgb4vf driver.

We have included all the maintainers of respective drivers. Kindly review
the change and let us know in case of any review comments.

Thanks

V2:
 Fixed compilation issue, when CHELSIO_T4_FCOE is set
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d8951125 41fc2e41
......@@ -224,7 +224,6 @@ struct sge_params {
};
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
......@@ -273,6 +272,7 @@ struct pci_params {
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
......@@ -284,6 +284,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
T6_FIRST_REV = T6_A0,
T6_LAST_REV = T6_A0,
};
struct devlog_params {
......@@ -292,6 +296,15 @@ struct devlog_params {
u32 size; /* size of log */
};
/* Stores chip specific parameters */
struct arch_specific_params {
u8 nchan;
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
u16 mps_tcam_size;
};
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
......@@ -317,6 +330,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char bypass;
......@@ -850,6 +864,11 @@ enum {
VLAN_REWRITE
};
static inline int is_t6(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
}
static inline int is_t5(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
......
......@@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
static int mps_tcam_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
" VF Replication "
"P0 P1 P2 P3 ML\n");
else {
struct adapter *adap = seq->private;
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
if (v == SEQ_START_TOKEN) {
if (adap->params.arch.mps_rplc_size > 128)
seq_puts(seq, "Idx Ethernet address Mask "
"Vld Ports PF VF "
"Replication "
" P0 P1 P2 P3 ML\n");
else
seq_puts(seq, "Idx Ethernet address Mask "
"Vld Ports PF VF Replication"
" P0 P1 P2 P3 ML\n");
} else {
u64 mask;
u8 addr[ETH_ALEN];
struct adapter *adap = seq->private;
bool replicate;
unsigned int idx = (uintptr_t)v - 2;
u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
u32 rplc[4] = {0, 0, 0, 0};
u64 tcamy, tcamx, val;
u32 cls_lo, cls_hi, ctl;
u32 rplc[8] = {0};
if (chip_ver > CHELSIO_T5) {
/* CtlCmdType - 0: Read, 1: Write
* CtlTcamSel - 0: TCAM0, 1: TCAM1
* CtlXYBitSel- 0: Y bit, 1: X bit
*/
/* Read tcamy */
ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
if (idx < 256)
ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
else
ctl |= CTLTCAMINDEX_V(idx - 256) |
CTLTCAMSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamy = DMACH_G(val) << 32;
tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
/* Read tcamx. Change the control param */
ctl |= CTLXYBITSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
tcamx = DMACH_G(val) << 32;
tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
} else {
tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
}
cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
if (tcamx & tcamy) {
seq_printf(seq, "%3u -\n", idx);
goto out;
}
if (cls_lo & REPLICATE_F) {
rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
if (chip_ver > CHELSIO_T5)
replicate = (cls_lo & T6_REPLICATE_F);
else
replicate = (cls_lo & REPLICATE_F);
if (replicate) {
struct fw_ldst_cmd ldst_cmd;
int ret;
struct fw_ldst_mps_rplc mps_rplc;
u32 ldst_addrspc;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_addrspc =
FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
ldst_cmd.op_to_addrspace =
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
FW_LDST_CMD_ADDRSPACE_V(
FW_LDST_ADDRSPC_MPS));
ldst_addrspc);
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
ldst_cmd.u.mps.fid_ctl =
ldst_cmd.u.mps.rplc.fid_idx =
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
FW_LDST_CMD_CTL_V(idx));
FW_LDST_CMD_IDX_V(idx));
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
if (ret)
......@@ -1126,26 +1174,65 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
"replication map for idx %d: %d\n",
idx, -ret);
else {
rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
mps_rplc = ldst_cmd.u.mps.rplc;
rplc[0] = ntohl(mps_rplc.rplc31_0);
rplc[1] = ntohl(mps_rplc.rplc63_32);
rplc[2] = ntohl(mps_rplc.rplc95_64);
rplc[3] = ntohl(mps_rplc.rplc127_96);
if (adap->params.arch.mps_rplc_size > 128) {
rplc[4] = ntohl(mps_rplc.rplc159_128);
rplc[5] = ntohl(mps_rplc.rplc191_160);
rplc[6] = ntohl(mps_rplc.rplc223_192);
rplc[7] = ntohl(mps_rplc.rplc255_224);
}
}
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
"%3c %#x%4u%4d",
idx, addr[0], addr[1], addr[2], addr[3], addr[4],
addr[5], (unsigned long long)mask,
(cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
if (chip_ver > CHELSIO_T5)
seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012llx%3c %#x%4u%4d",
idx, addr[0], addr[1], addr[2], addr[3],
addr[4], addr[5], (unsigned long long)mask,
(cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
T6_PF_G(cls_lo),
(cls_lo & T6_VF_VALID_F) ?
T6_VF_G(cls_lo) : -1);
else
seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012llx%3c %#x%4u%4d",
idx, addr[0], addr[1], addr[2], addr[3],
addr[4], addr[5], (unsigned long long)mask,
(cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
PF_G(cls_lo),
(cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
if (cls_lo & REPLICATE_F)
if (replicate) {
if (adap->params.arch.mps_rplc_size > 128)
seq_printf(seq, " %08x %08x %08x %08x "
"%08x %08x %08x %08x",
rplc[7], rplc[6], rplc[5], rplc[4],
rplc[3], rplc[2], rplc[1], rplc[0]);
else
seq_printf(seq, " %08x %08x %08x %08x",
rplc[3], rplc[2], rplc[1], rplc[0]);
} else {
if (adap->params.arch.mps_rplc_size > 128)
seq_printf(seq, "%72c", ' ');
else
seq_printf(seq, "%36c", ' ');
}
if (chip_ver > CHELSIO_T5)
seq_printf(seq, "%4u%3u%3u%3u %#x\n",
T6_SRAM_PRIO0_G(cls_lo),
T6_SRAM_PRIO1_G(cls_lo),
T6_SRAM_PRIO2_G(cls_lo),
T6_SRAM_PRIO3_G(cls_lo),
(cls_lo >> T6_MULTILISTEN0_S) & 0xf);
else
seq_printf(seq, "%4u%3u%3u%3u %#x\n",
SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
......@@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
else
seq_printf(seq, " VfWrAddr: %3d\n",
T6_VFWRADDR_G(rssconf));
seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
......@@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
struct adapter *adapter = inode->i_private;
struct seq_tab *p;
struct rss_vf_conf *vfconf;
int vf;
int vf, vfcount = adapter->params.arch.vfcount;
p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
if (!p)
return -ENOMEM;
vfconf = (struct rss_vf_conf *)p->data;
for (vf = 0; vf < 128; vf++) {
for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
}
......@@ -2033,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap,
int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size;
u32 size = 0;
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
......@@ -2104,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
}
if (is_t4(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
if (i & EXT_MEM_ENABLE_F)
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
} else {
if (is_t5(adap->params.chip)) {
if (i & EXT_MEM0_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
add_debugfs_mem(adap, "mc0", MEM_MC0,
......@@ -2120,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM1_SIZE_G(size));
}
} else {
if (i & EXT_MEM_ENABLE_F)
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
......
......@@ -135,8 +135,10 @@ struct filter_entry {
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW6_FNAME "cxgb4/t6fw.bin"
#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
#define FW6_CFNAME "cxgb4/t6-config.txt"
#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
#define PHY_AQ1202_DEVICEID 0x4409
......@@ -1721,7 +1723,7 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
(is_t4(adap->params.chip) || is_t5(adap->params.chip)))
(CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
return 0;
......@@ -2108,10 +2110,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
if (offset < mc0_end) {
memtype = MEM_MC0;
memaddr = offset - edc1_end;
} else if (is_t4(adap->params.chip)) {
/* T4 only has a single memory channel */
goto err;
} else {
} else if (is_t5(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
......@@ -2122,6 +2121,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* offset beyond the end of any memory */
goto err;
}
} else {
/* T4/T6 only has a single memory channel */
goto err;
}
}
......@@ -2286,9 +2288,13 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
else
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
......@@ -2350,7 +2356,7 @@ static void process_db_drop(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else {
} else if (is_t5(adap->params.chip)) {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
......@@ -2371,6 +2377,7 @@ static void process_db_drop(struct work_struct *work)
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
......@@ -3390,6 +3397,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
case CHELSIO_T6:
fw_config_file = FW6_CFNAME;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
......@@ -3586,7 +3596,24 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
}, {
.chip = CHELSIO_T6,
.fs_name = FW6_CFNAME,
.fw_mod_name = FW6_FNAME,
.fw_hdr = {
.chip = FW_HDR_CHIP_T6,
.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
.intfver_nic = FW_INTFVER(T6, NIC),
.intfver_vnic = FW_INTFVER(T6, VNIC),
.intfver_ofld = FW_INTFVER(T6, OFLD),
.intfver_ri = FW_INTFVER(T6, RI),
.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
.intfver_iscsi = FW_INTFVER(T6, ISCSI),
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
}
};
static struct fw_info *find_fw_info(int chip)
......
......@@ -522,14 +522,13 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
u32 val;
if (q->pend_cred >= 8) {
u32 val = adap->params.arch.sge_fl_db;
if (is_t4(adap->params.chip))
val = PIDX_V(q->pend_cred / 8);
val |= PIDX_V(q->pend_cred / 8);
else
val = PIDX_T5_V(q->pend_cred / 8) |
DBTYPE_F;
val |= DBPRIO_F;
val |= PIDX_T5_V(q->pend_cred / 8);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
......@@ -1034,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
static u64 hwcsum(const struct sk_buff *skb)
static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
......@@ -1065,11 +1064,16 @@ static u64 hwcsum(const struct sk_buff *skb)
goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP))
return TXPKT_CSUM_TYPE_V(csum_type) |
TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
else {
if (likely(csum_type >= TX_CSUM_TCPIP)) {
u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
else
hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
} else {
int start = skb_transport_offset(skb);
return TXPKT_CSUM_TYPE_V(csum_type) |
......@@ -1237,9 +1241,15 @@ out_free: dev_kfree_skb_any(skb);
else
lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len) |
TXPKT_ETHHDR_LEN_V(eth_xtra_len);
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
......@@ -1248,7 +1258,8 @@ out_free: dev_kfree_skb_any(skb);
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
cntrl = hwcsum(adap->params.chip, skb) |
TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
......@@ -2440,6 +2451,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
if (fl) {
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
/* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
......@@ -2468,7 +2481,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
FW_IQ_CMD_FL0CONGEN_F);
c.fl0dcaen_to_fl0cidxfthresh =
htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X));
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
......
......@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
u32 req = ENABLE_F | FUNCTION_V(adap->pf) | REGISTER_V(reg);
u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
req |= ENABLE_F;
else
req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
......@@ -381,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
* MEM_MC = 2 -- T4
* MEM_MC0 = 2 -- For T5
* MEM_MC1 = 3 -- For T5
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
......@@ -634,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
return T4_REGMAP_SIZE;
case CHELSIO_T5:
case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
......@@ -1316,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
static const unsigned int t6_reg_ranges[] = {
0x1008, 0x114c,
0x1180, 0x11b4,
0x11fc, 0x1250,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
0x3060, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
0x56cc, 0x575c,
0x580c, 0x5814,
0x5890, 0x58bc,
0x5940, 0x595c,
0x5980, 0x598c,
0x59b0, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a6c,
0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
0x5c10, 0x5ec0,
0x5ec8, 0x5ec8,
0x6000, 0x6040,
0x6058, 0x6154,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
0x8dc0, 0x8de0,
0x8df8, 0x8e84,
0x8ea0, 0x8f88,
0x8fb8, 0x911c,
0x9400, 0x9470,
0x9600, 0x971c,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
0x9d80, 0x9dec,
0x9e00, 0x9e6c,
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
0x11048, 0x11110,
0x11118, 0x1117c,
0x11190, 0x11260,
0x11300, 0x1130c,
0x12000, 0x1205c,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x192b8,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
0x19c00, 0x19c80,
0x19c94, 0x19cbc,
0x19ce4, 0x19d28,
0x19d50, 0x19d78,
0x19d94, 0x19dc8,
0x19df0, 0x19e10,
0x19e50, 0x19e6c,
0x19ea0, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fac,
0x19fc4, 0x19fe4,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
0x1e440, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
0x1e840, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
0x1ec40, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
0x1f040, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
0x1f440, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
0x1f840, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
0x1fc40, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30070,
0x30100, 0x3015c,
0x30190, 0x301d0,
0x30200, 0x30318,
0x30400, 0x3052c,
0x30540, 0x3061c,
0x30800, 0x3088c,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
0x30a0c, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
0x30d00, 0x30d3c,
0x30d44, 0x30d7c,
0x30de0, 0x30de0,
0x30e00, 0x30ed4,
0x30f00, 0x30fa4,
0x30fc0, 0x30fc4,
0x31000, 0x31004,
0x31080, 0x310fc,
0x31208, 0x31220,
0x3123c, 0x31254,
0x31300, 0x31300,
0x31308, 0x3131c,
0x31338, 0x3133c,
0x31380, 0x31380,
0x31388, 0x313a8,
0x313b4, 0x313b4,
0x31400, 0x31420,
0x31438, 0x3143c,
0x31480, 0x31480,
0x314a8, 0x314a8,
0x314b0, 0x314b4,
0x314c8, 0x314d4,
0x31a40, 0x31a4c,
0x31af0, 0x31b20,
0x31b38, 0x31b3c,
0x31b80, 0x31b80,
0x31ba8, 0x31ba8,
0x31bb0, 0x31bb4,
0x31bc8, 0x31bd4,
0x32140, 0x3218c,
0x321f0, 0x32200,
0x32218, 0x32218,
0x32400, 0x32400,
0x32408, 0x3241c,
0x32618, 0x32620,
0x32664, 0x32664,
0x326a8, 0x326a8,
0x326ec, 0x326ec,
0x32a00, 0x32abc,
0x32b00, 0x32b78,
0x32c00, 0x32c00,
0x32c08, 0x32c3c,
0x32e00, 0x32e2c,
0x32f00, 0x32f2c,
0x33000, 0x330ac,
0x330c0, 0x331ac,
0x331c0, 0x332c4,
0x332e4, 0x333c4,
0x333e4, 0x334ac,
0x334c0, 0x335ac,
0x335c0, 0x336c4,
0x336e4, 0x337c4,
0x337e4, 0x337fc,
0x33814, 0x33814,
0x33854, 0x33868,
0x33880, 0x3388c,
0x338c0, 0x338d0,
0x338e8, 0x338ec,
0x33900, 0x339ac,
0x339c0, 0x33ac4,
0x33ae4, 0x33b10,
0x33b24, 0x33b50,
0x33bf0, 0x33c10,
0x33c24, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34070,
0x34100, 0x3415c,
0x34190, 0x341d0,
0x34200, 0x34318,
0x34400, 0x3452c,
0x34540, 0x3461c,
0x34800, 0x3488c,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
0x34a0c, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
0x34d00, 0x34d3c,
0x34d44, 0x34d7c,
0x34de0, 0x34de0,
0x34e00, 0x34ed4,
0x34f00, 0x34fa4,
0x34fc0, 0x34fc4,
0x35000, 0x35004,
0x35080, 0x350fc,
0x35208, 0x35220,
0x3523c, 0x35254,
0x35300, 0x35300,
0x35308, 0x3531c,
0x35338, 0x3533c,
0x35380, 0x35380,
0x35388, 0x353a8,
0x353b4, 0x353b4,
0x35400, 0x35420,
0x35438, 0x3543c,
0x35480, 0x35480,
0x354a8, 0x354a8,
0x354b0, 0x354b4,
0x354c8, 0x354d4,
0x35a40, 0x35a4c,
0x35af0, 0x35b20,
0x35b38, 0x35b3c,
0x35b80, 0x35b80,
0x35ba8, 0x35ba8,
0x35bb0, 0x35bb4,
0x35bc8, 0x35bd4,
0x36140, 0x3618c,
0x361f0, 0x36200,
0x36218, 0x36218,
0x36400, 0x36400,
0x36408, 0x3641c,
0x36618, 0x36620,
0x36664, 0x36664,
0x366a8, 0x366a8,
0x366ec, 0x366ec,
0x36a00, 0x36abc,
0x36b00, 0x36b78,
0x36c00, 0x36c00,
0x36c08, 0x36c3c,
0x36e00, 0x36e2c,
0x36f00, 0x36f2c,
0x37000, 0x370ac,
0x370c0, 0x371ac,
0x371c0, 0x372c4,
0x372e4, 0x373c4,
0x373e4, 0x374ac,
0x374c0, 0x375ac,
0x375c0, 0x376c4,
0x376e4, 0x377c4,
0x377e4, 0x377fc,
0x37814, 0x37814,
0x37854, 0x37868,
0x37880, 0x3788c,
0x378c0, 0x378d0,
0x378e8, 0x378ec,
0x37900, 0x379ac,
0x379c0, 0x37ac4,
0x37ae4, 0x37b10,
0x37b24, 0x37b50,
0x37bf0, 0x37c10,
0x37c24, 0x37c50,
0x37cf0, 0x37cfc,
0x40040, 0x40040,
0x40080, 0x40084,
0x40100, 0x40100,
0x40140, 0x401bc,
0x40200, 0x40214,
0x40228, 0x40228,
0x40240, 0x40258,
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
0x41304, 0x413dc,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x4407c,
0x440c0, 0x4427c,
0x442c0, 0x4447c,
0x444c0, 0x4467c,
0x446c0, 0x4487c,
0x448c0, 0x44a7c,
0x44ac0, 0x44c7c,
0x44cc0, 0x44e7c,
0x44ec0, 0x4507c,
0x450c0, 0x451fc,
0x45800, 0x45868,
0x45880, 0x45884,
0x458a0, 0x458b0,
0x45a00, 0x45a68,
0x45a80, 0x45a84,
0x45aa0, 0x45ab0,
0x460c0, 0x460e4,
0x47000, 0x4708c,
0x47200, 0x47250,
0x47400, 0x47420,
0x47600, 0x47618,
0x47800, 0x4782c,
0x50000, 0x500cc,
0x50400, 0x50400,
0x50800, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x510b0,
0x51300, 0x51324,
};
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
......@@ -1335,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
case CHELSIO_T6:
reg_ranges = t6_reg_ranges;
reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
break;
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
......@@ -1948,7 +2296,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
(is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
(is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
......@@ -2488,6 +2837,7 @@ static void tp_intr_handler(struct adapter *adapter)
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
......@@ -2496,8 +2846,6 @@ static void sge_intr_handler(struct adapter *adapter)
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
......@@ -2510,13 +2858,19 @@ static void sge_intr_handler(struct adapter *adapter)
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
{ ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
static struct intr_info t4t5_sge_intr_info[] = {
{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
{ ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 },
{ 0 }
};
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
......@@ -2526,8 +2880,23 @@ static void sge_intr_handler(struct adapter *adapter)
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0)
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
t4t5_sge_intr_info);
err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
if (err & ERROR_QID_VALID_F) {
dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
ERROR_QID_G(err));
if (err & UNCAPTURED_ERROR_F)
dev_err(adapter->pdev_dev,
"SGE UNCAPTURED_ERROR set (clearing)\n");
t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
UNCAPTURED_ERROR_F);
}
if (v != 0)
t4_fatal_err(adapter);
}
......@@ -2700,6 +3069,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
......@@ -2709,7 +3079,18 @@ static void le_intr_handler(struct adapter *adap)
{ 0 }
};
if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
(chip <= CHELSIO_T5) ?
le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
......@@ -2978,7 +3359,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
if (!is_t4(adapter->params.chip) && (cause & MC1_S))
if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
......@@ -3024,17 +3405,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
EGRESS_SIZE_ERR_F);
DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
......@@ -3248,11 +3630,29 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
u8 rss_key_addr_cnt = 16;
u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
* allows access to key addresses 16-63 by using KeyWrAddrX
* as index[5:4](upper 2) into key table
*/
if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
if (idx >= 0 && idx < 16)
if (idx >= 0 && idx < rss_key_addr_cnt) {
if (rss_key_addr_cnt > 16)
t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
KEYWRADDRX_V(idx >> 4) |
T6_VFWRADDR_V(idx) | KEYWREN_F);
else
t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
KEYWRADDR_V(idx) | KEYWREN_F);
}
}
/**
......@@ -3286,8 +3686,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
{
u32 vrt, mask, data;
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
mask = VFWRADDR_V(VFWRADDR_M);
data = VFWRADDR_V(index);
} else {
mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
data = T6_VFWRADDR_V(index);
}
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
......@@ -4798,45 +5203,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
int i, ret;
int offset, ret = 0;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
unsigned int max_naddr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int nfilters = 0;
unsigned int max_naddr = adap->params.arch.mps_tcam_size;
unsigned int rem = naddr;
if (naddr > 7)
if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr ; /**/) {
unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
rem : ARRAY_SIZE(c.u.exact));
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[fw_naddr]), 16);
struct fw_vi_mac_exact *p;
int i;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
(free ? FW_CMD_EXEC_F : 0) |
FW_CMD_REQUEST_F |
FW_CMD_WRITE_F |
FW_CMD_EXEC_V(free) |
FW_VI_MAC_CMD_VIID_V(viid));
c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
FW_CMD_LEN16_V((naddr + 2) / 2));
c.freemacs_to_len16 =
cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
FW_CMD_LEN16_V(len16));
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx =
cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
FW_VI_MAC_CMD_IDX_V(
FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[offset + i],
sizeof(p->macaddr));
}
/* It's okay if we run out of space in our MAC address arena.
* Some of the addresses we submit may get stored so we need
* to run through the reply to see what the results were ...
*/
ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
if (ret)
return ret;
if (ret && ret != -FW_ENOMEM)
break;
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx));
if (idx)
idx[i] = index >= max_naddr ? 0xffff : index;
idx[offset + i] = (index >= max_naddr ?
0xffff : index);
if (index < max_naddr)
ret++;
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
*hash |= (1ULL <<
hash_mac_addr(addr[offset + i]));
}
free = false;
offset += fw_naddr;
rem -= fw_naddr;
}
if (ret == 0 || ret == -FW_ENOMEM)
ret = nfilters;
return ret;
}
......@@ -4865,9 +5296,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
unsigned int max_mac_addr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
......@@ -5276,9 +5705,30 @@ int t4_prep_adapter(struct adapter *adapter)
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
adapter->params.arch.sge_fl_db = DBPRIO_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T6:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
adapter->params.arch.sge_fl_db = 0;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
adapter->params.arch.mps_rplc_size = 256;
adapter->params.arch.nchan = 2;
adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
......
......@@ -686,6 +686,9 @@ struct cpl_tx_pkt {
#define TXPKT_ETHHDR_LEN_S 34
#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
#define T6_TXPKT_ETHHDR_LEN_S 32
#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
#define TXPKT_CSUM_TYPE_S 40
#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
......
......@@ -418,6 +418,20 @@
#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
#define SGE_ERROR_STATS_A 0x1100
#define UNCAPTURED_ERROR_S 18
#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U)
#define ERROR_QID_VALID_S 17
#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U)
#define ERROR_QID_S 0
#define ERROR_QID_M 0x1ffffU
#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
......@@ -705,6 +719,10 @@
#define REGISTER_S 0
#define REGISTER_V(x) ((x) << REGISTER_S)
#define T6_ENABLE_S 31
#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
#define T6_ENABLE_F T6_ENABLE_V(1U)
#define PFNUM_S 0
#define PFNUM_V(x) ((x) << PFNUM_S)
......@@ -2054,6 +2072,11 @@
#define VFLKPIDX_M 0xffU
#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
#define T6_VFWRADDR_S 8
#define T6_VFWRADDR_M 0xffU
#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
#define TP_RSS_CONFIG_CNG_A 0x7e04
#define TP_RSS_SECRET_KEY0_A 0x40
#define TP_RSS_PF0_CONFIG_A 0x30
......@@ -2175,7 +2198,28 @@
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
#define DMACH_S 0
#define DMACH_M 0xffffU
#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
#define MPS_CLS_TCAM_X_L_A 0xf008
#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
#define CTLCMDTYPE_S 31
#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
#define CTLCMDTYPE_F CTLCMDTYPE_V(1U)
#define CTLTCAMSEL_S 25
#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
#define CTLTCAMINDEX_S 17
#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
#define CTLXYBITSEL_S 16
#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
......@@ -2184,6 +2228,45 @@
#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
#define MPS_CLS_SRAM_L_A 0xe000
#define T6_MULTILISTEN0_S 26
#define T6_SRAM_PRIO3_S 23
#define T6_SRAM_PRIO3_M 0x7U
#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
#define T6_SRAM_PRIO2_S 20
#define T6_SRAM_PRIO2_M 0x7U
#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
#define T6_SRAM_PRIO1_S 17
#define T6_SRAM_PRIO1_M 0x7U
#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
#define T6_SRAM_PRIO0_S 14
#define T6_SRAM_PRIO0_M 0x7U
#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
#define T6_SRAM_VLD_S 13
#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U)
#define T6_REPLICATE_S 12
#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
#define T6_REPLICATE_F T6_REPLICATE_V(1U)
#define T6_PF_S 9
#define T6_PF_M 0x7U
#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
#define T6_VF_VALID_S 8
#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
#define T6_VF_VALID_F T6_VF_VALID_V(1U)
#define T6_VF_S 0
#define T6_VF_M 0xffU
#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
#define MPS_CLS_SRAM_H_A 0xe004
#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
......@@ -2433,6 +2516,8 @@
#define CIM_F CIM_V(1U)
#define MC1_S 31
#define MC1_V(x) ((x) << MC1_S)
#define MC1_F MC1_V(1U)
#define PL_INT_ENABLE_A 0x19410
#define PL_INT_MAP0_A 0x19414
......@@ -2463,6 +2548,18 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
#define T6_UNKNOWNCMD_S 3
#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
#define T6_LIP0_S 2
#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
#define T6_LIP0_F T6_LIP0_V(1U)
#define T6_LIPMISS_S 1
#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
#define LE_DB_INT_CAUSE_A 0x19c3c
#define REQQPARERR_S 16
......@@ -2485,6 +2582,14 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
#define TCAMINTPERR_S 13
#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
#define TCAMINTPERR_F TCAMINTPERR_V(1U)
#define SSRAMINTPERR_S 10
#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
......
......@@ -63,6 +63,7 @@
#define FETCHBURSTMIN_64B_X 2
#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
#define HOSTFCMODE_STATUS_PAGE_X 2
......
......@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
struct fw_ldst_mps {
__be16 fid_ctl;
union fw_ldst_mps {
struct fw_ldst_mps_rplc {
__be16 fid_idx;
__be16 rplcpf_pkd;
__be32 rplc255_224;
__be32 rplc223_192;
__be32 rplc191_160;
__be32 rplc159_128;
__be32 rplc127_96;
__be32 rplc95_64;
__be32 rplc63_32;
__be32 rplc31_0;
} rplc;
struct fw_ldst_mps_atrb {
__be16 fid_mpsid;
__be16 r2[3];
__be32 r3[2];
__be32 r4;
__be32 atrb;
__be16 vlan[16];
} atrb;
} mps;
struct fw_ldst_func {
u8 access_ctl;
......@@ -831,8 +843,8 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID_S 15
#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
#define FW_LDST_CMD_CTL_S 0
#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
#define FW_LDST_CMD_IDX_S 0
#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S)
#define FW_LDST_CMD_RPLCPF_S 0
#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
......@@ -2536,13 +2548,8 @@ enum fw_port_mod_sub_type {
FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
};
/* port stats */
#define FW_NUM_PORT_STATS 50
#define FW_NUM_PORT_TX_STATS 23
#define FW_NUM_PORT_RX_STATS 27
enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_BYTES_IX,
FW_STAT_TX_PORT_BYTES_IX = 0,
FW_STAT_TX_PORT_FRAMES_IX,
FW_STAT_TX_PORT_BCAST_IX,
FW_STAT_TX_PORT_MCAST_IX,
......@@ -2564,11 +2571,12 @@ enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_PPP4_IX,
FW_STAT_TX_PORT_PPP5_IX,
FW_STAT_TX_PORT_PPP6_IX,
FW_STAT_TX_PORT_PPP7_IX
FW_STAT_TX_PORT_PPP7_IX,
FW_NUM_PORT_TX_STATS
};
enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_BYTES_IX,
FW_STAT_RX_PORT_BYTES_IX = 0,
FW_STAT_RX_PORT_FRAMES_IX,
FW_STAT_RX_PORT_BCAST_IX,
FW_STAT_RX_PORT_MCAST_IX,
......@@ -2594,9 +2602,14 @@ enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_PPP5_IX,
FW_STAT_RX_PORT_PPP6_IX,
FW_STAT_RX_PORT_PPP7_IX,
FW_STAT_RX_PORT_LESS_64B_IX
FW_STAT_RX_PORT_LESS_64B_IX,
FW_STAT_RX_PORT_MAC_ERROR_IX,
FW_NUM_PORT_RX_STATS
};
/* port stats */
#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
struct fw_port_stats_cmd {
__be32 op_to_portid;
__be32 retval_len16;
......@@ -3025,7 +3038,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
FW_HDR_CHIP_T5
FW_HDR_CHIP_T5,
FW_HDR_CHIP_T6
};
#define FW_HDR_FW_VER_MAJOR_S 24
......
......@@ -45,4 +45,9 @@
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x0D
#define T6FW_VERSION_MICRO 0x2D
#define T6FW_VERSION_BUILD 0x00
#endif
......@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
u32 val;
u32 val = adapter->params.arch.sge_fl_db;
/* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
......@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
DBTYPE_F;
val |= DBPRIO_F;
val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
......@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
static u64 hwcsum(const struct sk_buff *skb)
static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
......@@ -1116,11 +1114,16 @@ static u64 hwcsum(const struct sk_buff *skb)
goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP))
return TXPKT_CSUM_TYPE_V(csum_type) |
TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
else {
if (likely(csum_type >= TX_CSUM_TCPIP)) {
u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
if (chip <= CHELSIO_T5)
hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
else
hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
} else {
int start = skb_transport_offset(skb);
return TXPKT_CSUM_TYPE_V(csum_type) |
......@@ -1308,10 +1311,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* accounting.
*/
cpl = (void *)(lso + 1);
cntrl = (TXPKT_CSUM_TYPE_V(v6 ?
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len) |
TXPKT_ETHHDR_LEN_V(eth_xtra_len));
TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
......@@ -1328,7 +1336,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
cntrl = hwcsum(adapter->params.chip, skb) |
TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
......@@ -2247,6 +2256,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
enum chip_type chip =
CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
......@@ -2286,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
......
......@@ -51,6 +51,7 @@
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
......@@ -156,6 +157,12 @@ struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
/* Stores chip specific parameters */
struct arch_specific_params {
u32 sge_fl_db;
u16 mps_tcam_size;
};
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
......@@ -215,6 +222,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
struct arch_specific_params arch; /* chip specific params */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
......
......@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
if (naddr > max_naddr)
return -EINVAL;
......@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
/*
* If this is a new allocation, determine whether it should be
......@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_naddr)
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
......@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
adapter->params.arch.sge_fl_db = DBPRIO_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T5:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T6:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
adapter->params.arch.sge_fl_db = 0;
adapter->params.arch.mps_tcam_size =
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment