Commit 7d7632ad authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier

IB/qib: Modify software pma counters to use percpu variables

The counters, unicast_xmit, unicast_rcv, multicast_xmit, multicast_rcv
are now maintained as percpu variables.

The mad code is modified to add a z_ latch so that the percpu counters
monotonically increase with appropriate adjustments in the reset,
read logic to maintain the z_ latch.

This patch also corrects the fact the unitcast_xmit wasn't handled
at all for UC and RC QPs.
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 1ed88dd7
...@@ -1186,7 +1186,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *); ...@@ -1186,7 +1186,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *);
void qib_set_ctxtcnt(struct qib_devdata *); void qib_set_ctxtcnt(struct qib_devdata *);
int qib_create_ctxts(struct qib_devdata *dd); int qib_create_ctxts(struct qib_devdata *dd);
struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
......
...@@ -3265,7 +3265,9 @@ static int init_6120_variables(struct qib_devdata *dd) ...@@ -3265,7 +3265,9 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
qib_init_pportdata(ppd, dd, 0, 1); ret = qib_init_pportdata(ppd, dd, 0, 1);
if (ret)
goto bail;
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_speed_supported = QIB_IB_SDR; ppd->link_speed_supported = QIB_IB_SDR;
ppd->link_width_enabled = IB_WIDTH_4X; ppd->link_width_enabled = IB_WIDTH_4X;
......
...@@ -4059,7 +4059,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) ...@@ -4059,7 +4059,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
init_waitqueue_head(&cpspec->autoneg_wait); init_waitqueue_head(&cpspec->autoneg_wait);
INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
qib_init_pportdata(ppd, dd, 0, 1); ret = qib_init_pportdata(ppd, dd, 0, 1);
if (ret)
goto bail;
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
......
...@@ -6544,7 +6544,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ...@@ -6544,7 +6544,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
} }
dd->num_pports++; dd->num_pports++;
qib_init_pportdata(ppd, dd, pidx, dd->num_pports); ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
if (ret) {
dd->num_pports--;
goto bail;
}
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
ppd->link_width_enabled = IB_WIDTH_4X; ppd->link_width_enabled = IB_WIDTH_4X;
......
...@@ -233,7 +233,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, ...@@ -233,7 +233,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
/* /*
* Common code for initializing the physical port structure. * Common code for initializing the physical port structure.
*/ */
void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
u8 hw_pidx, u8 port) u8 hw_pidx, u8 port)
{ {
int size; int size;
...@@ -243,6 +243,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -243,6 +243,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
spin_lock_init(&ppd->sdma_lock); spin_lock_init(&ppd->sdma_lock);
spin_lock_init(&ppd->lflags_lock); spin_lock_init(&ppd->lflags_lock);
spin_lock_init(&ppd->cc_shadow_lock);
init_waitqueue_head(&ppd->state_wait); init_waitqueue_head(&ppd->state_wait);
init_timer(&ppd->symerr_clear_timer); init_timer(&ppd->symerr_clear_timer);
...@@ -250,8 +251,10 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -250,8 +251,10 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
ppd->symerr_clear_timer.data = (unsigned long)ppd; ppd->symerr_clear_timer.data = (unsigned long)ppd;
ppd->qib_wq = NULL; ppd->qib_wq = NULL;
ppd->ibport_data.pmastats =
spin_lock_init(&ppd->cc_shadow_lock); alloc_percpu(struct qib_pma_counters);
if (!ppd->ibport_data.pmastats)
return -ENOMEM;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail; goto bail;
...@@ -299,7 +302,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -299,7 +302,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
goto bail_3; goto bail_3;
} }
return; return 0;
bail_3: bail_3:
kfree(ppd->ccti_entries_shadow); kfree(ppd->ccti_entries_shadow);
...@@ -313,7 +316,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -313,7 +316,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
bail: bail:
/* User is intentionally disabling the congestion control agent */ /* User is intentionally disabling the congestion control agent */
if (!qib_cc_table_size) if (!qib_cc_table_size)
return; return 0;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
qib_cc_table_size = 0; qib_cc_table_size = 0;
...@@ -324,7 +327,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -324,7 +327,7 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
port); port);
return; return 0;
} }
static int init_pioavailregs(struct qib_devdata *dd) static int init_pioavailregs(struct qib_devdata *dd)
...@@ -635,6 +638,12 @@ static int qib_create_workqueues(struct qib_devdata *dd) ...@@ -635,6 +638,12 @@ static int qib_create_workqueues(struct qib_devdata *dd)
return -ENOMEM; return -ENOMEM;
} }
static void qib_free_pportdata(struct qib_pportdata *ppd)
{
free_percpu(ppd->ibport_data.pmastats);
ppd->ibport_data.pmastats = NULL;
}
/** /**
* qib_init - do the actual initialization sequence on the chip * qib_init - do the actual initialization sequence on the chip
* @dd: the qlogic_ib device * @dd: the qlogic_ib device
...@@ -922,6 +931,7 @@ static void qib_shutdown_device(struct qib_devdata *dd) ...@@ -922,6 +931,7 @@ static void qib_shutdown_device(struct qib_devdata *dd)
destroy_workqueue(ppd->qib_wq); destroy_workqueue(ppd->qib_wq);
ppd->qib_wq = NULL; ppd->qib_wq = NULL;
} }
qib_free_pportdata(ppd);
} }
qib_update_eeprom_log(dd); qib_update_eeprom_log(dd);
......
...@@ -1634,6 +1634,23 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, ...@@ -1634,6 +1634,23 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
return reply((struct ib_smp *)pmp); return reply((struct ib_smp *)pmp);
} }
static void qib_snapshot_pmacounters(
struct qib_ibport *ibp,
struct qib_pma_counters *pmacounters)
{
struct qib_pma_counters *p;
int cpu;
memset(pmacounters, 0, sizeof(*pmacounters));
for_each_possible_cpu(cpu) {
p = per_cpu_ptr(ibp->pmastats, cpu);
pmacounters->n_unicast_xmit += p->n_unicast_xmit;
pmacounters->n_unicast_rcv += p->n_unicast_rcv;
pmacounters->n_multicast_xmit += p->n_multicast_xmit;
pmacounters->n_multicast_rcv += p->n_multicast_rcv;
}
}
static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port) struct ib_device *ibdev, u8 port)
{ {
...@@ -1642,6 +1659,7 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, ...@@ -1642,6 +1659,7 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait; u64 swords, rwords, spkts, rpkts, xwait;
struct qib_pma_counters pma;
u8 port_select = p->port_select; u8 port_select = p->port_select;
memset(pmp->data, 0, sizeof(pmp->data)); memset(pmp->data, 0, sizeof(pmp->data));
...@@ -1664,10 +1682,17 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, ...@@ -1664,10 +1682,17 @@ static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
p->port_rcv_data = cpu_to_be64(rwords); p->port_rcv_data = cpu_to_be64(rwords);
p->port_xmit_packets = cpu_to_be64(spkts); p->port_xmit_packets = cpu_to_be64(spkts);
p->port_rcv_packets = cpu_to_be64(rpkts); p->port_rcv_packets = cpu_to_be64(rpkts);
p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); qib_snapshot_pmacounters(ibp, &pma);
p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
- ibp->z_unicast_xmit);
p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
- ibp->z_unicast_rcv);
p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
- ibp->z_multicast_xmit);
p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
- ibp->z_multicast_rcv);
bail: bail:
return reply((struct ib_smp *) pmp); return reply((struct ib_smp *) pmp);
...@@ -1795,6 +1820,7 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, ...@@ -1795,6 +1820,7 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait; u64 swords, rwords, spkts, rpkts, xwait;
struct qib_pma_counters pma;
qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
...@@ -1810,17 +1836,19 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, ...@@ -1810,17 +1836,19 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
ibp->z_port_rcv_packets = rpkts; ibp->z_port_rcv_packets = rpkts;
qib_snapshot_pmacounters(ibp, &pma);
if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
ibp->n_unicast_xmit = 0; ibp->z_unicast_xmit = pma.n_unicast_xmit;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
ibp->n_unicast_rcv = 0; ibp->z_unicast_rcv = pma.n_unicast_rcv;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
ibp->n_multicast_xmit = 0; ibp->z_multicast_xmit = pma.n_multicast_xmit;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
ibp->n_multicast_rcv = 0; ibp->z_multicast_rcv = pma.n_multicast_rcv;
return pma_get_portcounters_ext(pmp, ibdev, port); return pma_get_portcounters_ext(pmp, ibdev, port);
} }
......
...@@ -752,7 +752,7 @@ void qib_send_rc_ack(struct qib_qp *qp) ...@@ -752,7 +752,7 @@ void qib_send_rc_ack(struct qib_qp *qp)
qib_flush_wc(); qib_flush_wc();
qib_sendbuf_done(dd, pbufn); qib_sendbuf_done(dd, pbufn);
ibp->n_unicast_xmit++; this_cpu_inc(ibp->pmastats->n_unicast_xmit);
goto done; goto done;
queue_ack: queue_ack:
......
...@@ -703,6 +703,7 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, ...@@ -703,6 +703,7 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(bth2); ohdr->bth[2] = cpu_to_be32(bth2);
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
} }
/** /**
......
...@@ -280,11 +280,11 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -280,11 +280,11 @@ int qib_make_ud_req(struct qib_qp *qp)
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
if (ah_attr->dlid != QIB_PERMISSIVE_LID) if (ah_attr->dlid != QIB_PERMISSIVE_LID)
ibp->n_multicast_xmit++; this_cpu_inc(ibp->pmastats->n_multicast_xmit);
else else
ibp->n_unicast_xmit++; this_cpu_inc(ibp->pmastats->n_unicast_xmit);
} else { } else {
ibp->n_unicast_xmit++; this_cpu_inc(ibp->pmastats->n_unicast_xmit);
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) { if (unlikely(lid == ppd->lid)) {
/* /*
......
...@@ -662,7 +662,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) ...@@ -662,7 +662,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
if (mcast == NULL) if (mcast == NULL)
goto drop; goto drop;
ibp->n_multicast_rcv++; this_cpu_inc(ibp->pmastats->n_multicast_rcv);
list_for_each_entry_rcu(p, &mcast->qp_list, list) list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
/* /*
...@@ -689,7 +689,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) ...@@ -689,7 +689,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
rcd->lookaside_qpn = qp_num; rcd->lookaside_qpn = qp_num;
} else } else
qp = rcd->lookaside_qp; qp = rcd->lookaside_qp;
ibp->n_unicast_rcv++; this_cpu_inc(ibp->pmastats->n_unicast_rcv);
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
} }
return; return;
......
...@@ -664,6 +664,13 @@ struct qib_opcode_stats_perctx { ...@@ -664,6 +664,13 @@ struct qib_opcode_stats_perctx {
struct qib_opcode_stats stats[128]; struct qib_opcode_stats stats[128];
}; };
struct qib_pma_counters {
u64 n_unicast_xmit; /* total unicast packets sent */
u64 n_unicast_rcv; /* total unicast packets received */
u64 n_multicast_xmit; /* total multicast packets sent */
u64 n_multicast_rcv; /* total multicast packets received */
};
struct qib_ibport { struct qib_ibport {
struct qib_qp __rcu *qp0; struct qib_qp __rcu *qp0;
struct qib_qp __rcu *qp1; struct qib_qp __rcu *qp1;
...@@ -680,10 +687,11 @@ struct qib_ibport { ...@@ -680,10 +687,11 @@ struct qib_ibport {
__be64 mkey; __be64 mkey;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
u64 tid; /* TID for traps */ u64 tid; /* TID for traps */
u64 n_unicast_xmit; /* total unicast packets sent */ struct qib_pma_counters __percpu *pmastats;
u64 n_unicast_rcv; /* total unicast packets received */ u64 z_unicast_xmit; /* starting count for PMA */
u64 n_multicast_xmit; /* total multicast packets sent */ u64 z_unicast_rcv; /* starting count for PMA */
u64 n_multicast_rcv; /* total multicast packets received */ u64 z_multicast_xmit; /* starting count for PMA */
u64 z_multicast_rcv; /* starting count for PMA */
u64 z_symbol_error_counter; /* starting count for PMA */ u64 z_symbol_error_counter; /* starting count for PMA */
u64 z_link_error_recovery_counter; /* starting count for PMA */ u64 z_link_error_recovery_counter; /* starting count for PMA */
u64 z_link_downed_counter; /* starting count for PMA */ u64 z_link_downed_counter; /* starting count for PMA */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment