Commit f612b815 authored by Hariprasad Shenai's avatar Hariprasad Shenai Committed by David S. Miller

RDMA/cxgb4/cxgb4vf/csiostor: Cleanup SGE register defines

This patch cleanups all SGE related macros/register defines that are
defined in t4_regs.h and the affected files.
Signed-off-by: default avatarHariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5f07b3c5
...@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, ...@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else { } else {
PDBG("%s: DB wq->sq.pidx = %d\n", PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx); __func__, wq->sq.pidx);
writel(PIDX_T5(inc), wq->sq.udb); writel(PIDX_T5_V(inc), wq->sq.udb);
} }
/* Flush user doorbell area writes. */ /* Flush user doorbell area writes. */
wmb(); wmb();
return; return;
} }
writel(QID(wq->sq.qid) | PIDX(inc), wq->db); writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
} }
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
...@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, ...@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else { } else {
PDBG("%s: DB wq->rq.pidx = %d\n", PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx); __func__, wq->rq.pidx);
writel(PIDX_T5(inc), wq->rq.udb); writel(PIDX_T5_V(inc), wq->rq.udb);
} }
/* Flush user doorbell area writes. */ /* Flush user doorbell area writes. */
wmb(); wmb();
return; return;
} }
writel(QID(wq->rq.qid) | PIDX(inc), wq->db); writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
} }
static inline int t4_wq_in_error(struct t4_wq *wq) static inline int t4_wq_in_error(struct t4_wq *wq)
...@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) ...@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val; u32 val;
set_bit(CQ_ARMED, &cq->flags); set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) { while (cq->cidx_inc > CIDXINC_M) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_MASK; cq->cidx_inc -= CIDXINC_M;
} }
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc = 0; cq->cidx_inc = 0;
return 0; return 0;
...@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq) ...@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq)
{ {
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val; u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc = 0; cq->cidx_inc = 0;
} }
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include "cxgb4.h" #include "cxgb4.h"
#include "t4_regs.h" #include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_api.h" #include "t4fw_api.h"
#include "cxgb4_dcb.h" #include "cxgb4_dcb.h"
...@@ -1050,9 +1051,9 @@ static void enable_rx(struct adapter *adap) ...@@ -1050,9 +1051,9 @@ static void enable_rx(struct adapter *adap)
if (q->handler) if (q->handler)
napi_enable(&q->napi); napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */ /* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM(q->intr_params) | SEINTARM_V(q->intr_params) |
INGRESSQID(q->cntxt_id)); INGRESSQID_V(q->cntxt_id));
} }
} }
...@@ -3702,14 +3703,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, ...@@ -3702,14 +3703,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
if (pidx != hw_pidx) { if (pidx != hw_pidx) {
u16 delta; u16 delta;
u32 val;
if (pidx >= hw_pidx) if (pidx >= hw_pidx)
delta = pidx - hw_pidx; delta = pidx - hw_pidx;
else else
delta = size - hw_pidx + pidx; delta = size - hw_pidx + pidx;
if (is_t4(adap->params.chip))
val = PIDX_V(delta);
else
val = PIDX_T5_V(delta);
wmb(); wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
QID(qid) | PIDX(delta)); QID_V(qid) | val);
} }
out: out:
return ret; return ret;
...@@ -3721,8 +3728,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev) ...@@ -3721,8 +3728,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
struct adapter *adap; struct adapter *adap;
adap = netdev2adap(dev); adap = netdev2adap(dev);
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, NOCOALESCE_F,
F_NOCOALESCE); NOCOALESCE_F);
} }
EXPORT_SYMBOL(cxgb4_disable_db_coalescing); EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
...@@ -3731,7 +3738,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev) ...@@ -3731,7 +3738,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
struct adapter *adap; struct adapter *adap;
adap = netdev2adap(dev); adap = netdev2adap(dev);
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, NOCOALESCE_F, 0);
} }
EXPORT_SYMBOL(cxgb4_enable_db_coalescing); EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
...@@ -3809,8 +3816,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev) ...@@ -3809,8 +3816,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
struct adapter *adap; struct adapter *adap;
adap = netdev2adap(dev); adap = netdev2adap(dev);
lo = t4_read_reg(adap, SGE_TIMESTAMP_LO); lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI)); hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
return ((u64)hi << 32) | (u64)lo; return ((u64)hi << 32) | (u64)lo;
} }
...@@ -3904,8 +3911,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q) ...@@ -3904,8 +3911,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
* are committed before we tell HW about them. * are committed before we tell HW about them.
*/ */
wmb(); wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
q->db_pidx_inc = 0; q->db_pidx_inc = 0;
} }
q->db_disabled = 0; q->db_disabled = 0;
...@@ -3952,9 +3959,9 @@ static void process_db_full(struct work_struct *work) ...@@ -3952,9 +3959,9 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay); drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap); enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
t4_set_reg_field(adap, SGE_INT_ENABLE3, t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
DBFIFO_HP_INT | DBFIFO_LP_INT, DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
DBFIFO_HP_INT | DBFIFO_LP_INT); DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
} }
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
...@@ -3968,14 +3975,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) ...@@ -3968,14 +3975,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
goto out; goto out;
if (q->db_pidx != hw_pidx) { if (q->db_pidx != hw_pidx) {
u16 delta; u16 delta;
u32 val;
if (q->db_pidx >= hw_pidx) if (q->db_pidx >= hw_pidx)
delta = q->db_pidx - hw_pidx; delta = q->db_pidx - hw_pidx;
else else
delta = q->size - hw_pidx + q->db_pidx; delta = q->size - hw_pidx + q->db_pidx;
if (is_t4(adap->params.chip))
val = PIDX_V(delta);
else
val = PIDX_T5_V(delta);
wmb(); wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
QID(q->cntxt_id) | PIDX(delta)); QID_V(q->cntxt_id) | val);
} }
out: out:
q->db_disabled = 0; q->db_disabled = 0;
...@@ -4024,7 +4037,7 @@ static void process_db_drop(struct work_struct *work) ...@@ -4024,7 +4037,7 @@ static void process_db_drop(struct work_struct *work)
dev_err(adap->pdev_dev, "doorbell drop recovery: " dev_err(adap->pdev_dev, "doorbell drop recovery: "
"qid=%d, pidx_inc=%d\n", qid, pidx_inc); "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
else else
writel(PIDX_T5(pidx_inc) | QID(bar2_qid), writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
/* Re-enable BAR2 WC */ /* Re-enable BAR2 WC */
...@@ -4039,8 +4052,8 @@ void t4_db_full(struct adapter *adap) ...@@ -4039,8 +4052,8 @@ void t4_db_full(struct adapter *adap)
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
disable_dbs(adap); disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3, t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0); DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
queue_work(adap->workq, &adap->db_full_task); queue_work(adap->workq, &adap->db_full_task);
} }
} }
...@@ -4089,8 +4102,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld) ...@@ -4089,8 +4102,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++) for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i; lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
lli.fw_vers = adap->params.fw_vers; lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh; lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_ingpadboundary = adap->sge.fl_align; lli.sge_ingpadboundary = adap->sge.fl_align;
...@@ -4783,7 +4796,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { ...@@ -4783,7 +4796,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
void t4_fatal_err(struct adapter *adap) void t4_fatal_err(struct adapter *adap)
{ {
t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
t4_intr_disable(adap); t4_intr_disable(adap);
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
} }
...@@ -5013,9 +5026,9 @@ static int adap_init0_tweaks(struct adapter *adapter) ...@@ -5013,9 +5026,9 @@ static int adap_init0_tweaks(struct adapter *adapter)
rx_dma_offset); rx_dma_offset);
rx_dma_offset = 2; rx_dma_offset = 2;
} }
t4_set_reg_field(adapter, SGE_CONTROL, t4_set_reg_field(adapter, SGE_CONTROL_A,
PKTSHIFT_MASK, PKTSHIFT_V(PKTSHIFT_M),
PKTSHIFT(rx_dma_offset)); PKTSHIFT_V(rx_dma_offset));
/* /*
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
...@@ -5332,8 +5345,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) ...@@ -5332,8 +5345,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
s->counter_val[0] = 1; s->counter_val[0] = 1;
for (i = 1; i < SGE_NCOUNTERS; i++) for (i = 1; i < SGE_NCOUNTERS; i++)
s->counter_val[i] = min(intr_cnt[i - 1], s->counter_val[i] = min(intr_cnt[i - 1], THRESHOLD_0_M);
THRESHOLD_0_GET(THRESHOLD_0_MASK));
t4_sge_init(adapter); t4_sge_init(adapter);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -6467,9 +6479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6467,9 +6479,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) { if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn; s_qpp = (QUEUESPERPAGEPF0_S +
qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); adapter->fn);
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE; num_seg = PAGE_SIZE / SEGMENT_SIZE;
/* Each segment size is 128B. Write coalescing is enabled only /* Each segment size is 128B. Write coalescing is enabled only
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <net/tcp.h> #include <net/tcp.h>
#include "cxgb4.h" #include "cxgb4.h"
#include "t4_regs.h" #include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_api.h" #include "t4fw_api.h"
...@@ -521,10 +522,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) ...@@ -521,10 +522,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{ {
u32 val; u32 val;
if (q->pend_cred >= 8) { if (q->pend_cred >= 8) {
val = PIDX(q->pend_cred / 8); if (is_t4(adap->params.chip))
if (!is_t4(adap->params.chip)) val = PIDX_V(q->pend_cred / 8);
val |= DBTYPE(1); else
val |= DBPRIO(1); val = PIDX_T5_V(q->pend_cred / 8) |
DBTYPE_F;
val |= DBPRIO_F;
wmb(); wmb();
/* If we don't have access to the new User Doorbell (T5+), use /* If we don't have access to the new User Doorbell (T5+), use
...@@ -532,10 +535,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) ...@@ -532,10 +535,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism. * mechanism.
*/ */
if (unlikely(q->bar2_addr == NULL)) { if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
val | QID(q->cntxt_id)); val | QID_V(q->cntxt_id));
} else { } else {
writel(val | QID(q->bar2_qid), writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL); q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to /* This Write memory Barrier will force the write to
...@@ -884,7 +887,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) ...@@ -884,7 +887,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* doorbell mechanism; otherwise use the new BAR2 mechanism. * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ */
if (unlikely(q->bar2_addr == NULL)) { if (unlikely(q->bar2_addr == NULL)) {
u32 val = PIDX(n); u32 val = PIDX_V(n);
unsigned long flags; unsigned long flags;
/* For T4 we need to participate in the Doorbell Recovery /* For T4 we need to participate in the Doorbell Recovery
...@@ -892,14 +895,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) ...@@ -892,14 +895,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
*/ */
spin_lock_irqsave(&q->db_lock, flags); spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) if (!q->db_disabled)
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
QID(q->cntxt_id) | val); QID_V(q->cntxt_id) | val);
else else
q->db_pidx_inc += n; q->db_pidx_inc += n;
q->db_pidx = q->pidx; q->db_pidx = q->pidx;
spin_unlock_irqrestore(&q->db_lock, flags); spin_unlock_irqrestore(&q->db_lock, flags);
} else { } else {
u32 val = PIDX_T5(n); u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within /* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to * the doorbell, but T5 and later shrank the field in order to
...@@ -907,7 +910,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) ...@@ -907,7 +910,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* large in the first place (14 bits) so we just use the T5 * large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large. * and later limits and warn if a Queue ID is too large.
*/ */
WARN_ON(val & DBPRIO(1)); WARN_ON(val & DBPRIO_F);
/* If we're only writing a single TX Descriptor and we can use /* If we're only writing a single TX Descriptor and we can use
* Inferred QID registers, we can use the Write Combining * Inferred QID registers, we can use the Write Combining
...@@ -923,7 +926,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) ...@@ -923,7 +926,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
(q->bar2_addr + SGE_UDB_WCDOORBELL), (q->bar2_addr + SGE_UDB_WCDOORBELL),
wr); wr);
} else { } else {
writel(val | QID(q->bar2_qid), writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL); q->bar2_addr + SGE_UDB_KDOORBELL);
} }
...@@ -2001,16 +2004,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) ...@@ -2001,16 +2004,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
} else } else
params = QINTR_TIMER_IDX(7); params = QINTR_TIMER_IDX(7);
val = CIDXINC(work_done) | SEINTARM(params); val = CIDXINC_V(work_done) | SEINTARM_V(params);
/* If we don't have access to the new User GTS (T5+), use the old /* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism. * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ */
if (unlikely(q->bar2_addr == NULL)) { if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
val | INGRESSQID((u32)q->cntxt_id)); val | INGRESSQID_V((u32)q->cntxt_id));
} else { } else {
writel(val | INGRESSQID(q->bar2_qid), writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS); q->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -2056,16 +2059,16 @@ static unsigned int process_intrq(struct adapter *adap) ...@@ -2056,16 +2059,16 @@ static unsigned int process_intrq(struct adapter *adap)
rspq_next(q); rspq_next(q);
} }
val = CIDXINC(credits) | SEINTARM(q->intr_params); val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
/* If we don't have access to the new User GTS (T5+), use the old /* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism. * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ */
if (unlikely(q->bar2_addr == NULL)) { if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
val | INGRESSQID(q->cntxt_id)); val | INGRESSQID_V(q->cntxt_id));
} else { } else {
writel(val | INGRESSQID(q->bar2_qid), writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS); q->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -2770,8 +2773,8 @@ static int t4_sge_init_soft(struct adapter *adap) ...@@ -2770,8 +2773,8 @@ static int t4_sge_init_soft(struct adapter *adap)
* process_responses() and that only packet data is going to the * process_responses() and that only packet data is going to the
* Free Lists. * Free Lists.
*/ */
if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) != if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2785,7 +2788,7 @@ static int t4_sge_init_soft(struct adapter *adap) ...@@ -2785,7 +2788,7 @@ static int t4_sge_init_soft(struct adapter *adap)
* XXX meet our needs! * XXX meet our needs!
*/ */
#define READ_FL_BUF(x) \ #define READ_FL_BUF(x) \
t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32)) t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
...@@ -2839,11 +2842,11 @@ static int t4_sge_init_soft(struct adapter *adap) ...@@ -2839,11 +2842,11 @@ static int t4_sge_init_soft(struct adapter *adap)
s->timer_val[5] = core_ticks_to_us(adap, s->timer_val[5] = core_ticks_to_us(adap,
TIMERVALUE5_GET(timer_value_4_and_5)); TIMERVALUE5_GET(timer_value_4_and_5));
ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD); ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
return 0; return 0;
} }
...@@ -2856,8 +2859,7 @@ static int t4_sge_init_hard(struct adapter *adap) ...@@ -2856,8 +2859,7 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up our basic SGE mode to deliver CPL messages to our Ingress * Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List. * Queue and Packet Date to the Free List.
*/ */
t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK, t4_set_reg_field(adap, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
RXPKTCPLMODE_MASK);
/* /*
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
...@@ -2887,22 +2889,22 @@ static int t4_sge_init_hard(struct adapter *adap) ...@@ -2887,22 +2889,22 @@ static int t4_sge_init_hard(struct adapter *adap)
s->fl_pg_order = FL_PG_ORDER; s->fl_pg_order = FL_PG_ORDER;
if (s->fl_pg_order) if (s->fl_pg_order)
t4_write_reg(adap, t4_write_reg(adap,
SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32), SGE_FL_BUFFER_SIZE0_A+RX_LARGE_PG_BUF*sizeof(u32),
PAGE_SIZE << FL_PG_ORDER); PAGE_SIZE << FL_PG_ORDER);
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32), t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A+RX_SMALL_MTU_BUF*sizeof(u32),
FL_MTU_SMALL_BUFSIZE(adap)); FL_MTU_SMALL_BUFSIZE(adap));
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32), t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A+RX_LARGE_MTU_BUF*sizeof(u32),
FL_MTU_LARGE_BUFSIZE(adap)); FL_MTU_LARGE_BUFSIZE(adap));
/* /*
* Note that the SGE Ingress Packet Count Interrupt Threshold and * Note that the SGE Ingress Packet Count Interrupt Threshold and
* Timer Holdoff values must be supplied by our caller. * Timer Holdoff values must be supplied by our caller.
*/ */
t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD_A,
THRESHOLD_0(s->counter_val[0]) | THRESHOLD_0_V(s->counter_val[0]) |
THRESHOLD_1(s->counter_val[1]) | THRESHOLD_1_V(s->counter_val[1]) |
THRESHOLD_2(s->counter_val[2]) | THRESHOLD_2_V(s->counter_val[2]) |
THRESHOLD_3(s->counter_val[3])); THRESHOLD_3_V(s->counter_val[3]));
t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
...@@ -2927,9 +2929,9 @@ int t4_sge_init(struct adapter *adap) ...@@ -2927,9 +2929,9 @@ int t4_sge_init(struct adapter *adap)
* Ingress Padding Boundary and Egress Status Page Size are set up by * Ingress Padding Boundary and Egress Status Page Size are set up by
* t4_fixup_host_params(). * t4_fixup_host_params().
*/ */
sge_control = t4_read_reg(adap, SGE_CONTROL); sge_control = t4_read_reg(adap, SGE_CONTROL_A);
s->pktshift = PKTSHIFT_GET(sge_control); s->pktshift = PKTSHIFT_G(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
/* T4 uses a single control field to specify both the PCIe Padding and /* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these * Packing Boundary. T5 introduced the ability to specify these
...@@ -2937,8 +2939,8 @@ int t4_sge_init(struct adapter *adap) ...@@ -2937,8 +2939,8 @@ int t4_sge_init(struct adapter *adap)
* within Packed Buffer Mode is the maximum of these two * within Packed Buffer Mode is the maximum of these two
* specifications. * specifications.
*/ */
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
X_INGPADBOUNDARY_SHIFT); INGPADBOUNDARY_SHIFT_X);
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
s->fl_align = ingpadboundary; s->fl_align = ingpadboundary;
} else { } else {
...@@ -2975,11 +2977,11 @@ int t4_sge_init(struct adapter *adap) ...@@ -2975,11 +2977,11 @@ int t4_sge_init(struct adapter *adap)
* buffers and a new field which only applies to Packed Mode Free List * buffers and a new field which only applies to Packed Mode Free List
* buffers. * buffers.
*/ */
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
if (is_t4(adap->params.chip)) if (is_t4(adap->params.chip))
egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
else else
egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1; s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include "cxgb4.h" #include "cxgb4.h"
#include "t4_regs.h" #include "t4_regs.h"
#include "t4_values.h"
#include "t4fw_api.h" #include "t4fw_api.h"
/** /**
...@@ -1499,43 +1500,43 @@ static void sge_intr_handler(struct adapter *adapter) ...@@ -1499,43 +1500,43 @@ static void sge_intr_handler(struct adapter *adapter)
u64 v; u64 v;
static const struct intr_info sge_intr_info[] = { static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE, { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 }, "SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC, { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 }, "SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
{ DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
{ ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 }, "SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 }, 0 },
{ ERR_ING_CTXT_PRIO, { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 }, "SGE too many priority ingress contexts", -1, 0 },
{ ERR_EGR_CTXT_PRIO, { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 }, "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 } { 0 }
}; };
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) { if (v) {
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
(unsigned long long)v); (unsigned long long)v);
t4_write_reg(adapter, SGE_INT_CAUSE1, v); t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
} }
if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0) v != 0)
t4_fatal_err(adapter); t4_fatal_err(adapter);
} }
...@@ -2025,15 +2026,15 @@ void t4_intr_enable(struct adapter *adapter) ...@@ -2025,15 +2026,15 @@ void t4_intr_enable(struct adapter *adapter)
{ {
u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
DBFIFO_HP_INT | DBFIFO_LP_INT | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
EGRESS_SIZE_ERR); EGRESS_SIZE_ERR_F);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
} }
...@@ -3148,22 +3149,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, ...@@ -3148,22 +3149,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1; unsigned int fl_align_log = fls(fl_align) - 1;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE, t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0(sge_hps) | HOSTPAGESIZEPF0_V(sge_hps) |
HOSTPAGESIZEPF1(sge_hps) | HOSTPAGESIZEPF1_V(sge_hps) |
HOSTPAGESIZEPF2(sge_hps) | HOSTPAGESIZEPF2_V(sge_hps) |
HOSTPAGESIZEPF3(sge_hps) | HOSTPAGESIZEPF3_V(sge_hps) |
HOSTPAGESIZEPF4(sge_hps) | HOSTPAGESIZEPF4_V(sge_hps) |
HOSTPAGESIZEPF5(sge_hps) | HOSTPAGESIZEPF5_V(sge_hps) |
HOSTPAGESIZEPF6(sge_hps) | HOSTPAGESIZEPF6_V(sge_hps) |
HOSTPAGESIZEPF7(sge_hps)); HOSTPAGESIZEPF7_V(sge_hps));
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL, t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_MASK | INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_MASK, EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY(fl_align_log - 5) | INGPADBOUNDARY_V(fl_align_log -
EGRSTATUSPAGESIZE(stat_len != 64)); INGPADBOUNDARY_SHIFT_X) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
} else { } else {
/* T5 introduced the separation of the Free List Padding and /* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding * Packing Boundaries. Thus, we can select a smaller Padding
...@@ -3193,15 +3195,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, ...@@ -3193,15 +3195,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
fl_align = 64; fl_align = 64;
fl_align_log = 6; fl_align_log = 6;
} }
t4_set_reg_field(adap, SGE_CONTROL, t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_MASK | INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_MASK, EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
EGRSTATUSPAGESIZE(stat_len != 64)); EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A, t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
INGPACKBOUNDARY_V(fl_align_log - INGPACKBOUNDARY_V(fl_align_log -
INGPACKBOUNDARY_SHIFT_X)); INGPACKBOUNDARY_SHIFT_X));
} }
/* /*
* Adjust various SGE Free List Host Buffer Sizes. * Adjust various SGE Free List Host Buffer Sizes.
...@@ -3224,12 +3226,12 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, ...@@ -3224,12 +3226,12 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* Default Firmware Configuration File but we need to adjust it for * Default Firmware Configuration File but we need to adjust it for
* this host's cache line size. * this host's cache line size.
*/ */
t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
(t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
& ~(fl_align-1)); & ~(fl_align-1));
t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
(t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
& ~(fl_align-1)); & ~(fl_align-1));
t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
...@@ -4133,7 +4135,7 @@ int t4_init_sge_params(struct adapter *adapter) ...@@ -4133,7 +4135,7 @@ int t4_init_sge_params(struct adapter *adapter)
/* Extract the SGE Page Size for our PF. /* Extract the SGE Page Size for our PF.
*/ */
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S + s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
...@@ -4142,10 +4144,10 @@ int t4_init_sge_params(struct adapter *adapter) ...@@ -4142,10 +4144,10 @@ int t4_init_sge_params(struct adapter *adapter)
*/ */
s_qpp = (QUEUESPERPAGEPF0_S + s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
return 0; return 0;
} }
......
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_VALUES_H__
#define __T4_VALUES_H__
/* This file contains definitions for various T4 register value hardware
* constants. The types of values encoded here are predominantly those for
* register fields which control "modal" behavior. For the most part, we do
* not include definitions for register fields which are simple numeric
* metrics, etc.
*/
/* SGE register field values.
*/
/* CONTROL1 register */
#define RXPKTCPLMODE_SPLIT_X 1
#define INGPCIEBOUNDARY_SHIFT_X 5
#define INGPCIEBOUNDARY_32B_X 0
#define INGPADBOUNDARY_SHIFT_X 5
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
/* GTS register */
#define SGE_TIMERREGS 6
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
* (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
* we have a Going To Sleep register at offsets 8x+4.
*
* As noted above, we have many instances of the Simple Doorbell and Going To
* Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
* non-64-byte aligned offset for the Simple Doorbell in order to attempt to
* avoid buffering of the writes to the Simple Doorbell and we want to use a
* non-contiguous offset for the Going To Sleep writes in order to avoid
* possible combining between them.
*/
#define SGE_UDB_SIZE 128
#define SGE_UDB_KDOORBELL 8
#define SGE_UDB_GTS 20
#define SGE_UDB_WCDOORBELL 64
#endif /* __T4_VALUES_H__ */
...@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq) ...@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
* enable interrupts. * enable interrupts.
*/ */
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC(0) | CIDXINC_V(0) |
SEINTARM(rspq->intr_params) | SEINTARM_V(rspq->intr_params) |
INGRESSQID(rspq->cntxt_id)); INGRESSQID_V(rspq->cntxt_id));
} }
/* /*
...@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter) ...@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
*/ */
if (adapter->flags & USING_MSI) if (adapter->flags & USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC(0) | CIDXINC_V(0) |
SEINTARM(s->intrq.intr_params) | SEINTARM_V(s->intrq.intr_params) |
INGRESSQID(s->intrq.cntxt_id)); INGRESSQID_V(s->intrq.cntxt_id));
} }
...@@ -2306,14 +2306,10 @@ static int adap_init0(struct adapter *adapter) ...@@ -2306,14 +2306,10 @@ static int adap_init0(struct adapter *adapter)
s->timer_val[5] = core_ticks_to_us(adapter, s->timer_val[5] = core_ticks_to_us(adapter,
TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
s->counter_val[0] = s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[1] = s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[2] =
THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
s->counter_val[3] =
THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
/* /*
* Grab our Virtual Interface resource allocation, extract the * Grab our Virtual Interface resource allocation, extract the
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "t4vf_defs.h" #include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h" #include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h" #include "../cxgb4/t4fw_api.h"
#include "../cxgb4/t4_msg.h" #include "../cxgb4/t4_msg.h"
...@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) ...@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/ */
if (fl->pend_cred >= FL_PER_EQ_UNIT) { if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip)) if (is_t4(adapter->params.chip))
val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else else
val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
DBTYPE(1); DBTYPE_F;
val |= DBPRIO(1); val |= DBPRIO_F;
/* Make sure all memory writes to the Free List queue are /* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them. * committed before we tell the hardware about them.
...@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) ...@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (unlikely(fl->bar2_addr == NULL)) { if (unlikely(fl->bar2_addr == NULL)) {
t4_write_reg(adapter, t4_write_reg(adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(fl->cntxt_id) | val); QID_V(fl->cntxt_id) | val);
} else { } else {
writel(val | QID(fl->bar2_qid), writel(val | QID_V(fl->bar2_qid),
fl->bar2_addr + SGE_UDB_KDOORBELL); fl->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to /* This Write memory Barrier will force the write to
...@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* doorbell mechanism; otherwise use the new BAR2 mechanism. * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ */
if (unlikely(tq->bar2_addr == NULL)) { if (unlikely(tq->bar2_addr == NULL)) {
u32 val = PIDX(n); u32 val = PIDX_V(n);
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | val); QID_V(tq->cntxt_id) | val);
} else { } else {
u32 val = PIDX_T5(n); u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within /* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to * the doorbell, but T5 and later shrank the field in order to
...@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* large in the first place (14 bits) so we just use the T5 * large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large. * and later limits and warn if a Queue ID is too large.
*/ */
WARN_ON(val & DBPRIO(1)); WARN_ON(val & DBPRIO_F);
/* If we're only writing a single Egress Unit and the BAR2 /* If we're only writing a single Egress Unit and the BAR2
* Queue ID is 0, we can use the Write Combining Doorbell * Queue ID is 0, we can use the Write Combining Doorbell
...@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
count--; count--;
} }
} else } else
writel(val | QID(tq->bar2_qid), writel(val | QID_V(tq->bar2_qid),
tq->bar2_addr + SGE_UDB_KDOORBELL); tq->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write Memory Barrier will force the write to the User /* This Write Memory Barrier will force the write to the User
...@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) ...@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0)) if (unlikely(work_done == 0))
rspq->unhandled_irqs++; rspq->unhandled_irqs++;
val = CIDXINC(work_done) | SEINTARM(intr_params); val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
if (is_t4(rspq->adapter->params.chip)) { if (is_t4(rspq->adapter->params.chip)) {
t4_write_reg(rspq->adapter, t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID((u32)rspq->cntxt_id)); val | INGRESSQID_V((u32)rspq->cntxt_id));
} else { } else {
writel(val | INGRESSQID(rspq->bar2_qid), writel(val | INGRESSQID_V(rspq->bar2_qid),
rspq->bar2_addr + SGE_UDB_GTS); rspq->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter) ...@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq); rspq_next(intrq);
} }
val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
if (is_t4(adapter->params.chip)) if (is_t4(adapter->params.chip))
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID(intrq->cntxt_id)); val | INGRESSQID_V(intrq->cntxt_id));
else { else {
writel(val | INGRESSQID(intrq->bar2_qid), writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS); intrq->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1); fl0, fl1);
return -EINVAL; return -EINVAL;
} }
if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
*/ */
if (fl1) if (fl1)
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64); ? 128 : 64);
s->pktshift = PKTSHIFT_GET(sge_params->sge_control); s->pktshift = PKTSHIFT_G(sge_params->sge_control);
/* T4 uses a single control field to specify both the PCIe Padding and /* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these * Packing Boundary. T5 introduced the ability to specify these
...@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
* end doing this because it would initialize the Padding Boundary and * end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).) * leave the Packing Boundary initialized to 0 (16 bytes).)
*/ */
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
X_INGPADBOUNDARY_SHIFT); INGPADBOUNDARY_SHIFT_X);
if (is_t4(adapter->params.chip)) { if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary; s->fl_align = ingpadboundary;
} else { } else {
...@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
* Congestion Threshold is in units of 2 Free List pointers.) * Congestion Threshold is in units of 2 Free List pointers.)
*/ */
s->fl_starve_thres s->fl_starve_thres
= EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
/* /*
* Set up tasklet timers. * Set up tasklet timers.
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "t4vf_defs.h" #include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h" #include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h" #include "../cxgb4/t4fw_api.h"
/* /*
...@@ -528,13 +529,13 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -528,13 +529,13 @@ int t4vf_get_sge_params(struct adapter *adapter)
int v; int v;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
...@@ -576,9 +577,9 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -576,9 +577,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
} }
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
v = t4vf_query_params(adapter, 2, params, vals); v = t4vf_query_params(adapter, 2, params, vals);
if (v) if (v)
return v; return v;
...@@ -628,10 +629,10 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -628,10 +629,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
sge_params->sge_vf_eq_qpp = sge_params->sge_vf_eq_qpp =
((sge_params->sge_egress_queues_per_page >> s_qpp) ((sge_params->sge_egress_queues_per_page >> s_qpp)
& QUEUESPERPAGEPF0_MASK); & QUEUESPERPAGEPF0_M);
sge_params->sge_vf_iq_qpp = sge_params->sge_vf_iq_qpp =
((sge_params->sge_ingress_queues_per_page >> s_qpp) ((sge_params->sge_ingress_queues_per_page >> s_qpp)
& QUEUESPERPAGEPF0_MASK); & QUEUESPERPAGEPF0_M);
} }
return 0; return 0;
......
...@@ -2256,15 +2256,15 @@ csio_hw_intr_enable(struct csio_hw *hw) ...@@ -2256,15 +2256,15 @@ csio_hw_intr_enable(struct csio_hw *hw)
pl &= (~SF); pl &= (~SF);
csio_wr_reg32(hw, pl, PL_INT_ENABLE); csio_wr_reg32(hw, pl, PL_INT_ENABLE);
csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID1_F |
ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
SGE_INT_ENABLE3); SGE_INT_ENABLE3_A);
csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
} }
...@@ -2300,7 +2300,7 @@ csio_hw_intr_disable(struct csio_hw *hw) ...@@ -2300,7 +2300,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
void void
csio_hw_fatal_err(struct csio_hw *hw) csio_hw_fatal_err(struct csio_hw *hw)
{ {
csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
csio_hw_intr_disable(hw); csio_hw_intr_disable(hw);
/* Do not reset HW, we may need FW state for debugging */ /* Do not reset HW, we may need FW state for debugging */
...@@ -2698,44 +2698,44 @@ static void csio_sge_intr_handler(struct csio_hw *hw) ...@@ -2698,44 +2698,44 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
uint64_t v; uint64_t v;
static struct intr_info sge_intr_info[] = { static struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE, { ERR_CPL_EXCEED_IQE_SIZE_F,
"SGE received CPL exceeding IQE size", -1, 1 }, "SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC, { ERR_INVALID_CIDX_INC_F,
"SGE GTS CIDX increment too large", -1, 0 }, "SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 }, "SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
0 }, 0 },
{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
0 }, 0 },
{ ERR_ING_CTXT_PRIO, { ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 }, "SGE too many priority ingress contexts", -1, 0 },
{ ERR_EGR_CTXT_PRIO, { ERR_EGR_CTXT_PRIO_F,
"SGE too many priority egress contexts", -1, 0 }, "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0, NULL, 0, 0 } { 0, NULL, 0, 0 }
}; };
v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
if (v) { if (v) {
csio_fatal(hw, "SGE parity error (%#llx)\n", csio_fatal(hw, "SGE parity error (%#llx)\n",
(unsigned long long)v); (unsigned long long)v);
csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
SGE_INT_CAUSE1); SGE_INT_CAUSE1_A);
csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
} }
v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
v != 0) v != 0)
csio_hw_fatal_err(hw); csio_hw_fatal_err(hw);
} }
......
...@@ -66,15 +66,15 @@ static inline int csio_is_t5(uint16_t chip) ...@@ -66,15 +66,15 @@ static inline int csio_is_t5(uint16_t chip)
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_HW_PIDX(hw, index) \ #define CSIO_HW_PIDX(hw, index) \
(csio_is_t4(hw->chip_id) ? (PIDX(index)) : \ (csio_is_t4(hw->chip_id) ? (PIDX_V(index)) : \
(PIDX_T5(index) | DBTYPE(1U))) (PIDX_T5_G(index) | DBTYPE_F))
#define CSIO_HW_LP_INT_THRESH(hw, val) \ #define CSIO_HW_LP_INT_THRESH(hw, val) \
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_V(val)) : \
(V_LP_INT_THRESH_T5(val))) (V_LP_INT_THRESH_T5(val)))
#define CSIO_HW_M_LP_INT_THRESH(hw) \ #define CSIO_HW_M_LP_INT_THRESH(hw) \
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5)) (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_M) : (M_LP_INT_THRESH_T5))
#define CSIO_MAC_INT_CAUSE_REG(hw, port) \ #define CSIO_MAC_INT_CAUSE_REG(hw, port) \
(csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \ (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
......
...@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */ ...@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
static int csio_sge_timer_reg = 1; static int csio_sge_timer_reg = 1;
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg) csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
static void static void
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
{ {
sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 + sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
reg * sizeof(uint32_t)); reg * sizeof(uint32_t));
} }
...@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) ...@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
static inline uint32_t static inline uint32_t
csio_wr_qstat_pgsz(struct csio_hw *hw) csio_wr_qstat_pgsz(struct csio_hw *hw)
{ {
return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64; return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
} }
/* Ring freelist doorbell */ /* Ring freelist doorbell */
...@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) ...@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
* 8 freelist buffer pointers (since each pointer is 8 bytes). * 8 freelist buffer pointers (since each pointer is 8 bytes).
*/ */
if (flq->inc_idx >= 8) { if (flq->inc_idx >= 8) {
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
CSIO_HW_PIDX(hw, flq->inc_idx / 8), CSIO_HW_PIDX(hw, flq->inc_idx / 8),
MYPF_REG(SGE_PF_KDOORBELL)); MYPF_REG(SGE_PF_KDOORBELL_A));
flq->inc_idx &= 7; flq->inc_idx &= 7;
} }
} }
...@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) ...@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
static void static void
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
{ {
csio_wr_reg32(hw, CIDXINC(0) | csio_wr_reg32(hw, CIDXINC_V(0) |
INGRESSQID(iqid) | INGRESSQID_V(iqid) |
TIMERREG(X_TIMERREG_RESTART_COUNTER), TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
MYPF_REG(SGE_PF_GTS)); MYPF_REG(SGE_PF_GTS_A));
} }
/* /*
...@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) ...@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
wmb(); wmb();
/* Ring SGE Doorbell writing q->pidx into it */ /* Ring SGE Doorbell writing q->pidx into it */
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
CSIO_HW_PIDX(hw, q->inc_idx), CSIO_HW_PIDX(hw, q->inc_idx),
MYPF_REG(SGE_PF_KDOORBELL)); MYPF_REG(SGE_PF_KDOORBELL_A));
q->inc_idx = 0; q->inc_idx = 0;
return 0; return 0;
...@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, ...@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
restart: restart:
/* Now inform SGE about our incremental index value */ /* Now inform SGE about our incremental index value */
csio_wr_reg32(hw, CIDXINC(q->inc_idx) | csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
INGRESSQID(q->un.iq.physiqid) | INGRESSQID_V(q->un.iq.physiqid) |
TIMERREG(csio_sge_timer_reg), TIMERREG_V(csio_sge_timer_reg),
MYPF_REG(SGE_PF_GTS)); MYPF_REG(SGE_PF_GTS_A));
q->stats.n_tot_rsps += q->inc_idx; q->stats.n_tot_rsps += q->inc_idx;
q->inc_idx = 0; q->inc_idx = 0;
...@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw) ...@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
uint32_t ingpad = 0; uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64; uint32_t stat_len = clsz > 64 ? 128 : 64;
csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) | csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) | HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) | HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps), HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
SGE_HOST_PAGE_SIZE); SGE_HOST_PAGE_SIZE_A);
sge->csio_fl_align = clsz < 32 ? 32 : clsz; sge->csio_fl_align = clsz < 32 ? 32 : clsz;
ingpad = ilog2(sge->csio_fl_align) - 5; ingpad = ilog2(sge->csio_fl_align) - 5;
csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK | csio_set_reg_field(hw, SGE_CONTROL_A,
EGRSTATUSPAGESIZE(1), INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
INGPADBOUNDARY(ingpad) | EGRSTATUSPAGESIZE_F,
EGRSTATUSPAGESIZE(stat_len != 64)); INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
/* /*
* If using hard params, the following will get set correctly * If using hard params, the following will get set correctly
...@@ -1333,20 +1334,21 @@ csio_wr_fixup_host_params(struct csio_hw *hw) ...@@ -1333,20 +1334,21 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
*/ */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw, csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE2); SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw, csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE3); SGE_FL_BUFFER_SIZE3_A);
} }
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
/* default value of rx_dma_offset of the NIC driver */ /* default value of rx_dma_offset of the NIC driver */
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, csio_set_reg_field(hw, SGE_CONTROL_A,
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); PKTSHIFT_V(PKTSHIFT_M),
PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG, csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
CSUM_HAS_PSEUDO_HDR, 0); CSUM_HAS_PSEUDO_HDR, 0);
...@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw) ...@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold; u32 ingress_rx_threshold;
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
ingpad = INGPADBOUNDARY_GET(sge->sge_control); ingpad = INGPADBOUNDARY_G(sge->sge_control);
switch (ingpad) { switch (ingpad) {
case X_INGPCIEBOUNDARY_32B: case X_INGPCIEBOUNDARY_32B:
...@@ -1427,11 +1429,11 @@ csio_wr_get_sge(struct csio_hw *hw) ...@@ -1427,11 +1429,11 @@ csio_wr_get_sge(struct csio_hw *hw)
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE5_GET(timer_value_4_and_5)); TIMERVALUE5_GET(timer_value_4_and_5));
ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD); ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
csio_init_intr_coalesce_parms(hw); csio_init_intr_coalesce_parms(hw);
} }
...@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw) ...@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up our basic SGE mode to deliver CPL messages to our Ingress * Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List. * Queue and Packet Date to the Free List.
*/ */
csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1)); csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL); sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
...@@ -1464,22 +1466,24 @@ csio_wr_set_sge(struct csio_hw *hw) ...@@ -1464,22 +1466,24 @@ csio_wr_set_sge(struct csio_hw *hw)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover. * and generate an interrupt when this occurs so we can recover.
*/ */
csio_set_reg_field(hw, SGE_DBFIFO_STATUS, csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
HP_INT_THRESH(HP_INT_THRESH_MASK) | HP_INT_THRESH_V(HP_INT_THRESH_M) |
CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)), CSIO_HW_LP_INT_THRESH(hw,
HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | CSIO_HW_M_LP_INT_THRESH(hw)),
CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH)); HP_INT_THRESH_V(CSIO_SGE_DBFIFO_INT_THRESH) |
CSIO_HW_LP_INT_THRESH(hw,
CSIO_SGE_DBFIFO_INT_THRESH));
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
ENABLE_DROP); ENABLE_DROP_F);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2); & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
& ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3); & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
...@@ -1502,11 +1506,11 @@ csio_wr_set_sge(struct csio_hw *hw) ...@@ -1502,11 +1506,11 @@ csio_wr_set_sge(struct csio_hw *hw)
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) | csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
THRESHOLD_1(sge->counter_val[1]) | THRESHOLD_1_V(sge->counter_val[1]) |
THRESHOLD_2(sge->counter_val[2]) | THRESHOLD_2_V(sge->counter_val[2]) |
THRESHOLD_3(sge->counter_val[3]), THRESHOLD_3_V(sge->counter_val[3]),
SGE_INGRESS_RX_THRESHOLD); SGE_INGRESS_RX_THRESHOLD_A);
csio_wr_reg32(hw, csio_wr_reg32(hw,
TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) | TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment