Commit dcef7070 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-more-debug-info'

Hariprasad Shenai says:

====================
Add some more debug info

This patch series adds the following.
Add more info for sge_qinfo dump
Differentiate tid and stids between different regions, and add a debugfs
entry to dump all the tid info

This patch series has been created against net-next tree and includes
patches on cxgb4 driver.

We have included all the maintainers of respective drivers. Kindly review
the change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 76550786 a4011fd4
...@@ -1943,13 +1943,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) ...@@ -1943,13 +1943,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{ {
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1; int i, r = (uintptr_t)v - 1;
int toe_idx = r - eth_entries; int iscsi_idx = r - eth_entries;
int rdma_idx = toe_idx - toe_entries; int rdma_idx = iscsi_idx - iscsi_entries;
int ciq_idx = rdma_idx - rdma_entries; int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries; int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries; int fq_idx = ctrl_idx - ctrl_entries;
...@@ -1965,8 +1965,12 @@ do { \ ...@@ -1965,8 +1965,12 @@ do { \
seq_putc(seq, '\n'); \ seq_putc(seq, '\n'); \
} while (0) } while (0)
#define S(s, v) S3("s", s, v) #define S(s, v) S3("s", s, v)
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
#define T(s, v) S3("u", s, tx[i].v) #define T(s, v) S3("u", s, tx[i].v)
#define TL(s, v) T3("lu", s, v)
#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
#define R(s, v) S3("u", s, rx[i].v) #define R(s, v) S3("u", s, rx[i].v)
#define RL(s, v) R3("lu", s, v)
if (r < eth_entries) { if (r < eth_entries) {
int base_qset = r * 4; int base_qset = r * 4;
...@@ -2005,12 +2009,30 @@ do { \ ...@@ -2005,12 +2009,30 @@ do { \
R("FL avail:", fl.avail); R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx); R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx); R("FL CIDX:", fl.cidx);
} else if (toe_idx < toe_entries) { RL("RxPackets:", stats.pkts);
const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4]; RL("RxCSO:", stats.rx_cso);
const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4]; RL("VLANxtract:", stats.vlan_ex);
int n = min(4, adap->sge.ofldqsets - 4 * toe_idx); RL("LROmerged:", stats.lro_merged);
RL("LROpackets:", stats.lro_pkts);
RL("RxDrops:", stats.rx_drops);
TL("TSO:", tso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
TL("TxMapErr:", mapping_err);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLStarving:", fl.starving);
} else if (iscsi_idx < iscsi_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.ofldrxq[iscsi_idx * 4];
const struct sge_ofld_txq *tx =
&adap->sge.ofldtxq[iscsi_idx * 4];
int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
S("QType:", "TOE"); S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id); T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size); T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use); T("TxQ inuse:", q.in_use);
...@@ -2030,6 +2052,13 @@ do { \ ...@@ -2030,6 +2052,13 @@ do { \
R("FL avail:", fl.avail); R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx); R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx); R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImmPkts:", stats.imm);
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLStarving:", fl.starving);
} else if (rdma_idx < rdma_entries) { } else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx = const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4]; &adap->sge.rdmarxq[rdma_idx * 4];
...@@ -2052,6 +2081,13 @@ do { \ ...@@ -2052,6 +2081,13 @@ do { \
R("FL avail:", fl.avail); R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx); R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx); R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImmPkts:", stats.imm);
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLStarving:", fl.starving);
} else if (ciq_idx < ciq_entries) { } else if (ciq_idx < ciq_entries) {
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4]; const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
...@@ -2067,6 +2103,9 @@ do { \ ...@@ -2067,6 +2103,9 @@ do { \
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:", S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
RL("RxAN:", stats.an);
RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) { } else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx); int n = min(4, adap->params.nports - 4 * ctrl_idx);
...@@ -2077,6 +2116,8 @@ do { \ ...@@ -2077,6 +2116,8 @@ do { \
T("TxQ inuse:", q.in_use); T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx); T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx); T("TxQ PIDX:", q.pidx);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
} else if (fq_idx == 0) { } else if (fq_idx == 0) {
const struct sge_rspq *evtq = &adap->sge.fw_evtq; const struct sge_rspq *evtq = &adap->sge.fw_evtq;
...@@ -2092,8 +2133,12 @@ do { \ ...@@ -2092,8 +2133,12 @@ do { \
adap->sge.counter_val[evtq->pktcnt_idx]); adap->sge.counter_val[evtq->pktcnt_idx]);
} }
#undef R #undef R
#undef RL
#undef T #undef T
#undef TL
#undef S #undef S
#undef R3
#undef T3
#undef S3 #undef S3
return 0; return 0;
} }
...@@ -2212,6 +2257,73 @@ static const struct file_operations mem_debugfs_fops = { ...@@ -2212,6 +2257,73 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static int tid_info_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
const struct tid_info *t = &adap->tids;
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
if (chip <= CHELSIO_T5)
sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
else
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
} else if (adap->flags & FW_OFLD_CONN) {
seq_printf(seq, "TID range: %u..%u/%u..%u",
t->aftid_base,
t->aftid_end,
adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
} else {
seq_printf(seq, "TID range: %u..%u",
adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->hash_tids_in_use));
}
} else if (t->ntids) {
seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
if (t->nstids)
seq_printf(seq, "STID range: %u..%u, in use: %u\n",
(!t->stid_base &&
(chip <= CHELSIO_T5)) ?
t->stid_base + 1 : t->stid_base,
t->stid_base + t->nstids - 1, t->stids_in_use);
if (t->natids)
seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
t->natids - 1, t->atids_in_use);
seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
t->ftid_base + t->nftids - 1);
if (t->nsftids)
seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
t->sftid_base, t->sftid_base + t->nsftids - 2,
t->sftids_in_use);
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
return 0;
}
DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
static void add_debugfs_mem(struct adapter *adap, const char *name, static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb) unsigned int idx, unsigned int size_mb)
{ {
...@@ -2625,6 +2737,7 @@ int t4_setup_debugfs(struct adapter *adap) ...@@ -2625,6 +2737,7 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif #endif
{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
{ "meminfo", &meminfo_fops, S_IRUSR, 0 }, { "meminfo", &meminfo_fops, S_IRUSR, 0 },
}; };
......
...@@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) ...@@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
t->stid_tab[stid].data = data; t->stid_tab[stid].data = data;
stid -= t->nstids; stid -= t->nstids;
stid += t->sftid_base; stid += t->sftid_base;
t->stids_in_use++; t->sftids_in_use++;
} }
spin_unlock_bh(&t->stid_lock); spin_unlock_bh(&t->stid_lock);
return stid; return stid;
...@@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) ...@@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
else else
bitmap_release_region(t->stid_bmap, stid, 2); bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL; t->stid_tab[stid].data = NULL;
if (family == PF_INET) if (stid < t->nstids) {
t->stids_in_use--; if (family == PF_INET)
else t->stids_in_use--;
t->stids_in_use -= 4; else
t->stids_in_use -= 4;
} else {
t->sftids_in_use--;
}
spin_unlock_bh(&t->stid_lock); spin_unlock_bh(&t->stid_lock);
} }
EXPORT_SYMBOL(cxgb4_free_stid); EXPORT_SYMBOL(cxgb4_free_stid);
...@@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work) ...@@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work)
*/ */
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{ {
void *old;
struct sk_buff *skb; struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids); struct adapter *adap = container_of(t, struct adapter, tids);
old = t->tid_tab[tid]; WARN_ON(tid >= t->ntids);
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
if (t->hash_base && (tid >= t->hash_base))
atomic_dec(&t->hash_tids_in_use);
else
atomic_dec(&t->tids_in_use);
}
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) { if (likely(skb)) {
t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid); mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb); t4_ofld_send(adap, skb);
} else } else
cxgb4_queue_tid_release(t, chan, tid); cxgb4_queue_tid_release(t, chan, tid);
if (old)
atomic_dec(&t->tids_in_use);
} }
EXPORT_SYMBOL(cxgb4_remove_tid); EXPORT_SYMBOL(cxgb4_remove_tid);
...@@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t) ...@@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t)
spin_lock_init(&t->atid_lock); spin_lock_init(&t->atid_lock);
t->stids_in_use = 0; t->stids_in_use = 0;
t->sftids_in_use = 0;
t->afree = NULL; t->afree = NULL;
t->atids_in_use = 0; t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0); atomic_set(&t->tids_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */ /* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) { if (natids) {
...@@ -4814,6 +4825,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4814,6 +4825,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0; adapter->params.offload = 0;
} }
if (is_offload(adapter)) {
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
if (chip <= CHELSIO_T5) {
hash_reg = LE_DB_TID_HASHBASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base / 4;
} else {
hash_reg = T6_LE_DB_HASH_TID_BASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base;
}
}
}
/* See what interrupts we'll be using */ /* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0) if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX; adapter->flags |= USING_MSIX;
......
...@@ -96,6 +96,7 @@ struct tid_info { ...@@ -96,6 +96,7 @@ struct tid_info {
unsigned long *stid_bmap; unsigned long *stid_bmap;
unsigned int nstids; unsigned int nstids;
unsigned int stid_base; unsigned int stid_base;
unsigned int hash_base;
union aopen_entry *atid_tab; union aopen_entry *atid_tab;
unsigned int natids; unsigned int natids;
...@@ -116,8 +117,12 @@ struct tid_info { ...@@ -116,8 +117,12 @@ struct tid_info {
spinlock_t stid_lock; spinlock_t stid_lock;
unsigned int stids_in_use; unsigned int stids_in_use;
unsigned int sftids_in_use;
/* TIDs in the TCAM */
atomic_t tids_in_use; atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
}; };
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
...@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data, ...@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid) unsigned int tid)
{ {
t->tid_tab[tid] = data; t->tid_tab[tid] = data;
atomic_inc(&t->tids_in_use); if (t->hash_base && (tid >= t->hash_base))
atomic_inc(&t->hash_tids_in_use);
else
atomic_inc(&t->tids_in_use);
} }
int cxgb4_alloc_atid(struct tid_info *t, void *data); int cxgb4_alloc_atid(struct tid_info *t, void *data);
......
...@@ -1424,18 +1424,17 @@ static void restart_ctrlq(unsigned long data) ...@@ -1424,18 +1424,17 @@ static void restart_ctrlq(unsigned long data)
struct fw_wr_hdr *wr; struct fw_wr_hdr *wr;
unsigned int ndesc = skb->priority; /* previously saved */ unsigned int ndesc = skb->priority; /* previously saved */
/* written += ndesc;
* Write descriptors and free skbs outside the lock to limit /* Write descriptors and free skbs outside the lock to limit
* wait times. q->full is still set so new skbs will be queued. * wait times. q->full is still set so new skbs will be queued.
*/ */
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock); spin_unlock(&q->sendq.lock);
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr); inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb); kfree_skb(skb);
written += ndesc;
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
unsigned long old = q->q.stops; unsigned long old = q->q.stops;
......
...@@ -2732,10 +2732,15 @@ ...@@ -2732,10 +2732,15 @@
#define T6_LIPMISS_F T6_LIPMISS_V(1U) #define T6_LIPMISS_F T6_LIPMISS_V(1U)
#define LE_DB_CONFIG_A 0x19c04 #define LE_DB_CONFIG_A 0x19c04
#define LE_DB_SERVER_INDEX_A 0x19c18
#define LE_DB_SRVR_START_INDEX_A 0x19c18
#define LE_DB_ACT_CNT_IPV4_A 0x19c20
#define LE_DB_ACT_CNT_IPV6_A 0x19c24
#define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TID_BASE_A 0x19c30
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c #define LE_DB_INT_CAUSE_A 0x19c3c
#define LE_DB_TID_HASHBASE_A 0x19df8 #define LE_DB_TID_HASHBASE_A 0x19df8
#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
#define HASHEN_S 20 #define HASHEN_S 20
#define HASHEN_V(x) ((x) << HASHEN_S) #define HASHEN_V(x) ((x) << HASHEN_S)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment