Commit d42d118c authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-improve-and-tune-TC-MQPRIO-offload'

Rahul Lakkireddy says:

====================
cxgb4: improve and tune TC-MQPRIO offload

Patch 1 improves the Tx path's credit request and recovery mechanism
when running under heavy load.

Patch 2 adds ability to tune the burst buffer sizes of all traffic
classes to improve performance for <= 1500 MTU, under heavy load.

Patch 3 adds support to track EOTIDs and dump software queue
contexts used by TC-MQPRIO offload.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3430223d 5148e595
...@@ -1125,19 +1125,20 @@ struct adapter { ...@@ -1125,19 +1125,20 @@ struct adapter {
* programmed with various parameters. * programmed with various parameters.
*/ */
struct ch_sched_params { struct ch_sched_params {
s8 type; /* packet or flow */ u8 type; /* packet or flow */
union { union {
struct { struct {
s8 level; /* scheduler hierarchy level */ u8 level; /* scheduler hierarchy level */
s8 mode; /* per-class or per-flow */ u8 mode; /* per-class or per-flow */
s8 rateunit; /* bit or packet rate */ u8 rateunit; /* bit or packet rate */
s8 ratemode; /* %port relative or kbps absolute */ u8 ratemode; /* %port relative or kbps absolute */
s8 channel; /* scheduler channel [0..N] */ u8 channel; /* scheduler channel [0..N] */
s8 class; /* scheduler class [0..N] */ u8 class; /* scheduler class [0..N] */
s32 minrate; /* minimum rate */ u32 minrate; /* minimum rate */
s32 maxrate; /* maximum rate */ u32 maxrate; /* maximum rate */
s16 weight; /* percent weight */ u16 weight; /* percent weight */
s16 pktsize; /* average packet size */ u16 pktsize; /* average packet size */
u16 burstsize; /* burst buffer size */
} params; } params;
} u; } u;
}; };
...@@ -1952,9 +1953,10 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, ...@@ -1952,9 +1953,10 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
enum ctxt_type ctype, u32 *data); enum ctxt_type ctype, u32 *data);
int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
enum ctxt_type ctype, u32 *data); enum ctxt_type ctype, u32 *data);
int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
int rateunit, int ratemode, int channel, int class, u8 rateunit, u8 ratemode, u8 channel, u8 class,
int minrate, int maxrate, int weight, int pktsize); u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
u16 burstsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state); void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_idma_monitor_init(struct adapter *adapter, void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma); struct sge_idma_monitor_state *idma);
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "cudbg_lib_common.h" #include "cudbg_lib_common.h"
#include "cudbg_entity.h" #include "cudbg_entity.h"
#include "cudbg_lib.h" #include "cudbg_lib.h"
#include "cxgb4_tc_mqprio.h"
/* generic seq_file support for showing a table of size rows x width. */ /* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
...@@ -2657,32 +2658,19 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld) ...@@ -2657,32 +2658,19 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v) static int sge_qinfo_show(struct seq_file *seq, void *v)
{ {
int eth_entries, ctrl_entries, eo_entries = 0; int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
const struct sge_uld_txq_info *utxq_info; const struct sge_uld_txq_info *utxq_info;
const struct sge_uld_rxq_info *urxq_info; const struct sge_uld_rxq_info *urxq_info;
struct cxgb4_tc_port_mqprio *port_mqprio;
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1; int i, j, n, r = (uintptr_t)v - 1;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
if (adap->sge.eohw_txq)
eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
if (s->uld_rxq_info) {
for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
}
}
if (r) if (r)
seq_putc(seq, '\n'); seq_putc(seq, '\n');
...@@ -2759,11 +2747,21 @@ do { \ ...@@ -2759,11 +2747,21 @@ do { \
RL("FLLow:", fl.low); RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
goto unlock; goto out;
} }
r -= eth_entries; r -= eth_entries;
if (r < eo_entries) { if (!adap->tc_mqprio)
goto skip_mqprio;
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (!refcount_read(&adap->tc_mqprio->refcnt)) {
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
goto skip_mqprio;
}
eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
if (r < eohw_entries) {
int base_qset = r * 4; int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset]; const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset]; const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
...@@ -2808,10 +2806,71 @@ do { \ ...@@ -2808,10 +2806,71 @@ do { \
RL("FLLow:", fl.low); RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
goto unlock; mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
goto out;
}
r -= eohw_entries;
for (j = 0; j < adap->params.nports; j++) {
int entries;
u8 tc;
port_mqprio = &adap->tc_mqprio->port_mqprio[j];
entries = 0;
for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
entries += port_mqprio->mqprio.qopt.count[tc];
if (!entries)
continue;
eosw_entries = DIV_ROUND_UP(entries, 4);
if (r < eosw_entries) {
const struct sge_eosw_txq *tx;
n = min(4, entries - 4 * r);
tx = &port_mqprio->eosw_txq[4 * r];
S("QType:", "EOSW-TXQ");
S("Interface:",
adap->port[j] ? adap->port[j]->name : "N/A");
T("EOTID:", hwtid);
T("HWQID:", hwqid);
T("State:", state);
T("Size:", ndesc);
T("In-Use:", inuse);
T("Credits:", cred);
T("Compl:", ncompl);
T("Last-Compl:", last_compl);
T("PIDX:", pidx);
T("Last-PIDX:", last_pidx);
T("CIDX:", cidx);
T("Last-CIDX:", last_cidx);
T("FLOWC-IDX:", flowc_idx);
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
goto out;
}
r -= eosw_entries;
}
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
skip_mqprio:
if (!is_uld(adap))
goto skip_uld;
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
if (s->uld_rxq_info) {
for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
}
} }
r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) { if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx; const struct sge_uld_txq *tx;
...@@ -2994,6 +3053,9 @@ do { \ ...@@ -2994,6 +3053,9 @@ do { \
} }
r -= uld_txq_entries[CXGB4_TX_CRYPTO]; r -= uld_txq_entries[CXGB4_TX_CRYPTO];
mutex_unlock(&uld_mutex);
skip_uld:
if (r < ctrl_entries) { if (r < ctrl_entries) {
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4]; const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
...@@ -3008,7 +3070,7 @@ do { \ ...@@ -3008,7 +3070,7 @@ do { \
TL("TxQFull:", q.stops); TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts); TL("TxQRestarts:", q.restarts);
goto unlock; goto out;
} }
r -= ctrl_entries; r -= ctrl_entries;
...@@ -3026,11 +3088,9 @@ do { \ ...@@ -3026,11 +3088,9 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
s->counter_val[evtq->pktcnt_idx]); s->counter_val[evtq->pktcnt_idx]);
goto unlock; goto out;
} }
unlock:
mutex_unlock(&uld_mutex);
#undef R #undef R
#undef RL #undef RL
#undef T #undef T
...@@ -3039,13 +3099,38 @@ do { \ ...@@ -3039,13 +3099,38 @@ do { \
#undef R3 #undef R3
#undef T3 #undef T3
#undef S3 #undef S3
out:
return 0;
unlock:
mutex_unlock(&uld_mutex);
return 0; return 0;
} }
static int sge_queue_entries(const struct adapter *adap) static int sge_queue_entries(const struct adapter *adap)
{ {
int tot_uld_entries = 0; int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
int i;
if (adap->tc_mqprio) {
struct cxgb4_tc_port_mqprio *port_mqprio;
u8 tc;
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->sge.eohw_txq)
eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
for (i = 0; i < adap->params.nports; i++) {
u32 entries = 0;
port_mqprio = &adap->tc_mqprio->port_mqprio[i];
for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
entries += port_mqprio->mqprio.qopt.count[tc];
if (entries)
eosw_entries += DIV_ROUND_UP(entries, 4);
}
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
if (!is_uld(adap)) if (!is_uld(adap))
goto lld_only; goto lld_only;
...@@ -3062,8 +3147,7 @@ static int sge_queue_entries(const struct adapter *adap) ...@@ -3062,8 +3147,7 @@ static int sge_queue_entries(const struct adapter *adap)
lld_only: lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) + return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + eohw_entries + eosw_entries + tot_uld_entries +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
} }
...@@ -3244,6 +3328,10 @@ static int tid_info_show(struct seq_file *seq, void *v) ...@@ -3244,6 +3328,10 @@ static int tid_info_show(struct seq_file *seq, void *v)
if (t->nhpftids) if (t->nhpftids)
seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base, seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
t->hpftid_base + t->nhpftids - 1); t->hpftid_base + t->nhpftids - 1);
if (t->neotids)
seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
t->eotid_base, t->eotid_base + t->neotids - 1,
atomic_read(&t->eotids_in_use));
if (t->ntids) if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n", seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A), t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
......
...@@ -1579,6 +1579,7 @@ static int tid_init(struct tid_info *t) ...@@ -1579,6 +1579,7 @@ static int tid_init(struct tid_info *t)
atomic_set(&t->tids_in_use, 0); atomic_set(&t->tids_in_use, 0);
atomic_set(&t->conns_in_use, 0); atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0); atomic_set(&t->hash_tids_in_use, 0);
atomic_set(&t->eotids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */ /* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) { if (natids) {
...@@ -3021,7 +3022,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, ...@@ -3021,7 +3022,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
SCHED_CLASS_RATEUNIT_BITS, SCHED_CLASS_RATEUNIT_BITS,
SCHED_CLASS_RATEMODE_ABS, SCHED_CLASS_RATEMODE_ABS,
pi->tx_chan, class_id, 0, pi->tx_chan, class_id, 0,
max_tx_rate * 1000, 0, pktsize); max_tx_rate * 1000, 0, pktsize, 0);
if (ret) { if (ret) {
dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
ret); ret);
......
...@@ -342,6 +342,13 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev, ...@@ -342,6 +342,13 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000); p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000); p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
/* Request larger burst buffer for smaller MTU, so
* that hardware can work on more data per burst
* cycle.
*/
if (dev->mtu <= ETH_DATA_LEN)
p.u.params.burstsize = 8 * dev->mtu;
e = cxgb4_sched_class_alloc(dev, &p); e = cxgb4_sched_class_alloc(dev, &p);
if (!e) { if (!e) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -567,6 +574,7 @@ static void cxgb4_mqprio_disable_offload(struct net_device *dev) ...@@ -567,6 +574,7 @@ static void cxgb4_mqprio_disable_offload(struct net_device *dev)
int cxgb4_setup_tc_mqprio(struct net_device *dev, int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio) struct tc_mqprio_qopt_offload *mqprio)
{ {
struct adapter *adap = netdev2adap(dev);
bool needs_bring_up = false; bool needs_bring_up = false;
int ret; int ret;
...@@ -574,6 +582,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, ...@@ -574,6 +582,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
/* To configure tc params, the current allocated EOTIDs must /* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's * be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is * traffic running on the interface. So, ensure interface is
...@@ -609,6 +619,7 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, ...@@ -609,6 +619,7 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
if (needs_bring_up) if (needs_bring_up)
cxgb_open(dev); cxgb_open(dev);
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret; return ret;
} }
...@@ -621,6 +632,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap) ...@@ -621,6 +632,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio) if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
return; return;
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
for_each_port(adap, i) { for_each_port(adap, i) {
dev = adap->port[i]; dev = adap->port[i];
if (!dev) if (!dev)
...@@ -632,6 +644,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap) ...@@ -632,6 +644,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
cxgb4_mqprio_disable_offload(dev); cxgb4_mqprio_disable_offload(dev);
} }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
} }
int cxgb4_init_tc_mqprio(struct adapter *adap) int cxgb4_init_tc_mqprio(struct adapter *adap)
...@@ -653,6 +666,8 @@ int cxgb4_init_tc_mqprio(struct adapter *adap) ...@@ -653,6 +666,8 @@ int cxgb4_init_tc_mqprio(struct adapter *adap)
goto out_free_mqprio; goto out_free_mqprio;
} }
mutex_init(&tc_mqprio->mqprio_mutex);
tc_mqprio->port_mqprio = tc_port_mqprio; tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) { for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i]; port_mqprio = &tc_mqprio->port_mqprio[i];
...@@ -687,6 +702,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap) ...@@ -687,6 +702,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
u8 i; u8 i;
if (adap->tc_mqprio) { if (adap->tc_mqprio) {
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->tc_mqprio->port_mqprio) { if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) { for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i]; struct net_device *dev = adap->port[i];
...@@ -698,6 +714,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap) ...@@ -698,6 +714,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
} }
kfree(adap->tc_mqprio->port_mqprio); kfree(adap->tc_mqprio->port_mqprio);
} }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
kfree(adap->tc_mqprio); kfree(adap->tc_mqprio);
} }
} }
...@@ -33,6 +33,7 @@ struct cxgb4_tc_port_mqprio { ...@@ -33,6 +33,7 @@ struct cxgb4_tc_port_mqprio {
struct cxgb4_tc_mqprio { struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */ refcount_t refcnt; /* Refcount for adapter-wide resources */
struct mutex mqprio_mutex; /* Lock for accessing MQPRIO info */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */ struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
}; };
......
...@@ -147,6 +147,9 @@ struct tid_info { ...@@ -147,6 +147,9 @@ struct tid_info {
/* TIDs in the HASH */ /* TIDs in the HASH */
atomic_t hash_tids_in_use; atomic_t hash_tids_in_use;
atomic_t conns_in_use; atomic_t conns_in_use;
/* ETHOFLD TIDs used for rate limiting */
atomic_t eotids_in_use;
/* lock for setting/clearing filter bitmap */ /* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock; spinlock_t ftid_lock;
...@@ -221,12 +224,14 @@ static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data) ...@@ -221,12 +224,14 @@ static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{ {
set_bit(eotid, t->eotid_bmap); set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data; t->eotid_tab[eotid].data = data;
atomic_inc(&t->eotids_in_use);
} }
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid) static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{ {
clear_bit(eotid, t->eotid_bmap); clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL; t->eotid_tab[eotid].data = NULL;
atomic_dec(&t->eotids_in_use);
} }
int cxgb4_alloc_atid(struct tid_info *t, void *data); int cxgb4_alloc_atid(struct tid_info *t, void *data);
......
...@@ -57,7 +57,8 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, ...@@ -57,7 +57,8 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
p->u.params.ratemode, p->u.params.ratemode,
p->u.params.channel, e->idx, p->u.params.channel, e->idx,
p->u.params.minrate, p->u.params.maxrate, p->u.params.minrate, p->u.params.maxrate,
p->u.params.weight, p->u.params.pktsize); p->u.params.weight, p->u.params.pktsize,
p->u.params.burstsize);
break; break;
default: default:
err = -ENOTSUPP; err = -ENOTSUPP;
......
...@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap, ...@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
return flits + nsgl; return flits + nsgl;
} }
static inline void *write_eo_wr(struct adapter *adap, static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
struct sge_eosw_txq *eosw_txq, struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, u32 hdr_len, u32 wrlen)
u32 hdr_len, u32 wrlen)
{ {
const struct skb_shared_info *ssi = skb_shinfo(skb); const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl; struct cpl_tx_pkt_core *cpl;
...@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap, ...@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap,
immd_len += hdr_len; immd_len += hdr_len;
if (!eosw_txq->ncompl || if (!eosw_txq->ncompl ||
eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) { (eosw_txq->last_compl + wrlen16) >=
(adap->params.ofldq_wr_cred / 2)) {
compl = true; compl = true;
eosw_txq->ncompl++; eosw_txq->ncompl++;
eosw_txq->last_compl = 0; eosw_txq->last_compl = 0;
...@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap, ...@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap,
return cpl; return cpl;
} }
static void ethofld_hard_xmit(struct net_device *dev, static int ethofld_hard_xmit(struct net_device *dev,
struct sge_eosw_txq *eosw_txq) struct sge_eosw_txq *eosw_txq)
{ {
struct port_info *pi = netdev2pinfo(dev); struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
...@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
bool skip_eotx_wr = false; bool skip_eotx_wr = false;
struct tx_sw_desc *d; struct tx_sw_desc *d;
struct sk_buff *skb; struct sk_buff *skb;
int left, ret = 0;
u8 flits, ndesc; u8 flits, ndesc;
int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock); spin_lock(&eohw_txq->lock);
...@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev,
wrlen = flits * 8; wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16); wrlen16 = DIV_ROUND_UP(wrlen, 16);
/* If there are no CPL credits, then wait for credits left = txq_avail(&eohw_txq->q) - ndesc;
* to come back and retry again
/* If there are no descriptors left in hardware queues or no
* CPL credits left in software queues, then wait for them
* to come back and retry again. Note that we always request
* for credits update via interrupt for every half credits
* consumed. So, the interrupt will eventually restore the
* credits and invoke the Tx path again.
*/ */
if (unlikely(wrlen16 > eosw_txq->cred)) if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
ret = -ENOMEM;
goto out_unlock; goto out_unlock;
}
if (unlikely(skip_eotx_wr)) { if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr; start = (u64 *)wr;
...@@ -2231,7 +2239,8 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2231,7 +2239,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len); hdr_len);
if (data_len) { if (data_len) {
if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) { ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
if (unlikely(ret)) {
memset(d->addr, 0, sizeof(d->addr)); memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++; eohw_txq->mapping_err++;
goto out_unlock; goto out_unlock;
...@@ -2277,12 +2286,13 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2277,12 +2286,13 @@ static void ethofld_hard_xmit(struct net_device *dev,
out_unlock: out_unlock:
spin_unlock(&eohw_txq->lock); spin_unlock(&eohw_txq->lock);
return ret;
} }
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{ {
struct sk_buff *skb; struct sk_buff *skb;
int pktcount; int pktcount, ret;
switch (eosw_txq->state) { switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE: case CXGB4_EO_STATE_ACTIVE:
...@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) ...@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
continue; continue;
} }
ethofld_hard_xmit(dev, eosw_txq); ret = ethofld_hard_xmit(dev, eosw_txq);
if (ret)
break;
} }
} }
......
...@@ -10361,9 +10361,10 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, ...@@ -10361,9 +10361,10 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
return ret; return ret;
} }
int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
int rateunit, int ratemode, int channel, int class, u8 rateunit, u8 ratemode, u8 channel, u8 class,
int minrate, int maxrate, int weight, int pktsize) u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
u16 burstsize)
{ {
struct fw_sched_cmd cmd; struct fw_sched_cmd cmd;
...@@ -10385,6 +10386,7 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode, ...@@ -10385,6 +10386,7 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
cmd.u.params.max = cpu_to_be32(maxrate); cmd.u.params.max = cpu_to_be32(maxrate);
cmd.u.params.weight = cpu_to_be16(weight); cmd.u.params.weight = cpu_to_be16(weight);
cmd.u.params.pktsize = cpu_to_be16(pktsize); cmd.u.params.pktsize = cpu_to_be16(pktsize);
cmd.u.params.burstsize = cpu_to_be16(burstsize);
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1); NULL, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment