Commit a515abd7 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Hariprasad Shenai says:

====================
RDMA/cxgb4/cxgb4vf/csiostor: Cleanup register defines

This series continues to cleanup all the macros/register defines related to
SGE, PCIE, MC, MA, TCAM, MAC, etc that are defined in t4_regs.h and the
affected files.

Will post another 1 or 2 series so that we can cover all the macros so that
they all follow the same style to be consistent.

The patches series is created against 'net-next' tree.
And includes patches on cxgb4, cxgb4vf, iw_cxgb4 and csiostor driver.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5f07b3c5 0d804338
...@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, ...@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else { } else {
PDBG("%s: DB wq->sq.pidx = %d\n", PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx); __func__, wq->sq.pidx);
writel(PIDX_T5(inc), wq->sq.udb); writel(PIDX_T5_V(inc), wq->sq.udb);
} }
/* Flush user doorbell area writes. */ /* Flush user doorbell area writes. */
wmb(); wmb();
return; return;
} }
writel(QID(wq->sq.qid) | PIDX(inc), wq->db); writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
} }
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
...@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, ...@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else { } else {
PDBG("%s: DB wq->rq.pidx = %d\n", PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx); __func__, wq->rq.pidx);
writel(PIDX_T5(inc), wq->rq.udb); writel(PIDX_T5_V(inc), wq->rq.udb);
} }
/* Flush user doorbell area writes. */ /* Flush user doorbell area writes. */
wmb(); wmb();
return; return;
} }
writel(QID(wq->rq.qid) | PIDX(inc), wq->db); writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
} }
static inline int t4_wq_in_error(struct t4_wq *wq) static inline int t4_wq_in_error(struct t4_wq *wq)
...@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) ...@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val; u32 val;
set_bit(CQ_ARMED, &cq->flags); set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) { while (cq->cidx_inc > CIDXINC_M) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_MASK; cq->cidx_inc -= CIDXINC_M;
} }
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc = 0; cq->cidx_inc = 0;
return 0; return 0;
...@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq) ...@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq)
{ {
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val; u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
INGRESSQID(cq->cqid); INGRESSQID_V(cq->cqid);
writel(val, cq->gts); writel(val, cq->gts);
cq->cidx_inc = 0; cq->cidx_inc = 0;
} }
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_api.h" #include "t4fw_api.h"
#include "t4_regs.h" #include "t4_regs.h"
#include "t4_values.h"
#define VLAN_NONE 0xfff #define VLAN_NONE 0xfff
...@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev, ...@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
* in the Compressed Filter Tuple. * in the Compressed Filter Tuple.
*/ */
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0) if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift; ntuple |= (u64)l2t->lport << tp->port_shift;
...@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev, ...@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
u32 pf = FW_VIID_PFN_G(viid); u32 pf = FW_VIID_PFN_G(viid);
u32 vld = FW_VIID_VIVLD_G(viid); u32 vld = FW_VIID_VIVLD_G(viid);
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
V_FT_VNID_ID_PF(pf) | FT_VNID_ID_PF_V(pf) |
V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
} }
return ntuple; return ntuple;
......
This diff is collapsed.
This diff is collapsed.
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_VALUES_H__
#define __T4_VALUES_H__
/* This file contains definitions for various T4 register value hardware
* constants. The types of values encoded here are predominantly those for
* register fields which control "modal" behavior. For the most part, we do
* not include definitions for register fields which are simple numeric
* metrics, etc.
*/
/* SGE register field values.
*/
/* CONTROL1 register */
#define RXPKTCPLMODE_SPLIT_X 1
#define INGPCIEBOUNDARY_SHIFT_X 5
#define INGPCIEBOUNDARY_32B_X 0
#define INGPADBOUNDARY_SHIFT_X 5
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
/* GTS register */
#define SGE_TIMERREGS 6
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
* (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
* we have a Going To Sleep register at offsets 8x+4.
*
* As noted above, we have many instances of the Simple Doorbell and Going To
* Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
* non-64-byte aligned offset for the Simple Doorbell in order to attempt to
* avoid buffering of the writes to the Simple Doorbell and we want to use a
* non-contiguous offset for the Going To Sleep writes in order to avoid
* possible combining between them.
*/
#define SGE_UDB_SIZE 128
#define SGE_UDB_KDOORBELL 8
#define SGE_UDB_GTS 20
#define SGE_UDB_WCDOORBELL 64
/* PCI-E definitions */
#define WINDOW_SHIFT_X 10
#define PCIEOFST_SHIFT_X 10
/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
* Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
* selects for a particular field being present. These fields, when present
* in the Compressed Filter Tuple, have the following widths in bits.
*/
#define FT_FCOE_W 1
#define FT_PORT_W 3
#define FT_VNIC_ID_W 17
#define FT_VLAN_W 17
#define FT_TOS_W 8
#define FT_PROTOCOL_W 8
#define FT_ETHERTYPE_W 16
#define FT_MACMATCH_W 9
#define FT_MPSHITTYPE_W 3
#define FT_FRAGMENTATION_W 1
/* Some of the Compressed Filter Tuple fields have internal structure. These
* bit shifts/masks describe those structures. All shifts are relative to the
* base position of the fields within the Compressed Filter Tuple
*/
#define FT_VLAN_VLD_S 16
#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
#define FT_VNID_ID_VF_S 0
#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
#define FT_VNID_ID_PF_S 7
#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
#define FT_VNID_ID_VLD_S 16
#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
#endif /* __T4_VALUES_H__ */
...@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq) ...@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
* enable interrupts. * enable interrupts.
*/ */
t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC(0) | CIDXINC_V(0) |
SEINTARM(rspq->intr_params) | SEINTARM_V(rspq->intr_params) |
INGRESSQID(rspq->cntxt_id)); INGRESSQID_V(rspq->cntxt_id));
} }
/* /*
...@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter) ...@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
*/ */
if (adapter->flags & USING_MSI) if (adapter->flags & USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC(0) | CIDXINC_V(0) |
SEINTARM(s->intrq.intr_params) | SEINTARM_V(s->intrq.intr_params) |
INGRESSQID(s->intrq.cntxt_id)); INGRESSQID_V(s->intrq.cntxt_id));
} }
...@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev, ...@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
reg_block_dump(adapter, regbuf, reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
reg_block_dump(adapter, regbuf, reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
...@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter) ...@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
* threshold values from the SGE parameters. * threshold values from the SGE parameters.
*/ */
s->timer_val[0] = core_ticks_to_us(adapter, s->timer_val[0] = core_ticks_to_us(adapter,
TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[1] = core_ticks_to_us(adapter, s->timer_val[1] = core_ticks_to_us(adapter,
TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
s->timer_val[2] = core_ticks_to_us(adapter, s->timer_val[2] = core_ticks_to_us(adapter,
TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[3] = core_ticks_to_us(adapter, s->timer_val[3] = core_ticks_to_us(adapter,
TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
s->timer_val[4] = core_ticks_to_us(adapter, s->timer_val[4] = core_ticks_to_us(adapter,
TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
s->timer_val[5] = core_ticks_to_us(adapter, s->timer_val[5] = core_ticks_to_us(adapter,
TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
s->counter_val[0] = s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[1] = s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
s->counter_val[2] =
THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
s->counter_val[3] =
THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
/* /*
* Grab our Virtual Interface resource allocation, extract the * Grab our Virtual Interface resource allocation, extract the
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "t4vf_defs.h" #include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h" #include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h" #include "../cxgb4/t4fw_api.h"
#include "../cxgb4/t4_msg.h" #include "../cxgb4/t4_msg.h"
...@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) ...@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/ */
if (fl->pend_cred >= FL_PER_EQ_UNIT) { if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip)) if (is_t4(adapter->params.chip))
val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else else
val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
DBTYPE(1); DBTYPE_F;
val |= DBPRIO(1); val |= DBPRIO_F;
/* Make sure all memory writes to the Free List queue are /* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them. * committed before we tell the hardware about them.
...@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) ...@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (unlikely(fl->bar2_addr == NULL)) { if (unlikely(fl->bar2_addr == NULL)) {
t4_write_reg(adapter, t4_write_reg(adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(fl->cntxt_id) | val); QID_V(fl->cntxt_id) | val);
} else { } else {
writel(val | QID(fl->bar2_qid), writel(val | QID_V(fl->bar2_qid),
fl->bar2_addr + SGE_UDB_KDOORBELL); fl->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to /* This Write memory Barrier will force the write to
...@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* doorbell mechanism; otherwise use the new BAR2 mechanism. * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ */
if (unlikely(tq->bar2_addr == NULL)) { if (unlikely(tq->bar2_addr == NULL)) {
u32 val = PIDX(n); u32 val = PIDX_V(n);
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | val); QID_V(tq->cntxt_id) | val);
} else { } else {
u32 val = PIDX_T5(n); u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within /* T4 and later chips share the same PIDX field offset within
* the doorbell, but T5 and later shrank the field in order to * the doorbell, but T5 and later shrank the field in order to
...@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* large in the first place (14 bits) so we just use the T5 * large in the first place (14 bits) so we just use the T5
* and later limits and warn if a Queue ID is too large. * and later limits and warn if a Queue ID is too large.
*/ */
WARN_ON(val & DBPRIO(1)); WARN_ON(val & DBPRIO_F);
/* If we're only writing a single Egress Unit and the BAR2 /* If we're only writing a single Egress Unit and the BAR2
* Queue ID is 0, we can use the Write Combining Doorbell * Queue ID is 0, we can use the Write Combining Doorbell
...@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, ...@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
count--; count--;
} }
} else } else
writel(val | QID(tq->bar2_qid), writel(val | QID_V(tq->bar2_qid),
tq->bar2_addr + SGE_UDB_KDOORBELL); tq->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write Memory Barrier will force the write to the User /* This Write Memory Barrier will force the write to the User
...@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) ...@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0)) if (unlikely(work_done == 0))
rspq->unhandled_irqs++; rspq->unhandled_irqs++;
val = CIDXINC(work_done) | SEINTARM(intr_params); val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
if (is_t4(rspq->adapter->params.chip)) { if (is_t4(rspq->adapter->params.chip)) {
t4_write_reg(rspq->adapter, t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID((u32)rspq->cntxt_id)); val | INGRESSQID_V((u32)rspq->cntxt_id));
} else { } else {
writel(val | INGRESSQID(rspq->bar2_qid), writel(val | INGRESSQID_V(rspq->bar2_qid),
rspq->bar2_addr + SGE_UDB_GTS); rspq->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter) ...@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq); rspq_next(intrq);
} }
val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
if (is_t4(adapter->params.chip)) if (is_t4(adapter->params.chip))
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID(intrq->cntxt_id)); val | INGRESSQID_V(intrq->cntxt_id));
else { else {
writel(val | INGRESSQID(intrq->bar2_qid), writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS); intrq->bar2_addr + SGE_UDB_GTS);
wmb(); wmb();
} }
...@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1); fl0, fl1);
return -EINVAL; return -EINVAL;
} }
if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
*/ */
if (fl1) if (fl1)
s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64); ? 128 : 64);
s->pktshift = PKTSHIFT_GET(sge_params->sge_control); s->pktshift = PKTSHIFT_G(sge_params->sge_control);
/* T4 uses a single control field to specify both the PCIe Padding and /* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these * Packing Boundary. T5 introduced the ability to specify these
...@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
* end doing this because it would initialize the Padding Boundary and * end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).) * leave the Packing Boundary initialized to 0 (16 bytes).)
*/ */
ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
X_INGPADBOUNDARY_SHIFT); INGPADBOUNDARY_SHIFT_X);
if (is_t4(adapter->params.chip)) { if (is_t4(adapter->params.chip)) {
s->fl_align = ingpadboundary; s->fl_align = ingpadboundary;
} else { } else {
...@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter) ...@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
* Congestion Threshold is in units of 2 Free List pointers.) * Congestion Threshold is in units of 2 Free List pointers.)
*/ */
s->fl_starve_thres s->fl_starve_thres
= EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
/* /*
* Set up tasklet timers. * Set up tasklet timers.
......
...@@ -64,8 +64,8 @@ ...@@ -64,8 +64,8 @@
* Mailbox Data in the fixed CIM PF map and the programmable VF map must * Mailbox Data in the fixed CIM PF map and the programmable VF map must
* match. However, it's a useful convention ... * match. However, it's a useful convention ...
*/ */
#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA #if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! #error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
#endif #endif
/* /*
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "t4vf_defs.h" #include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h" #include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4_values.h"
#include "../cxgb4/t4fw_api.h" #include "../cxgb4/t4fw_api.h"
/* /*
...@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, ...@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Loop trying to get ownership of the mailbox. Return an error * Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership. * if we can't gain ownership.
*/ */
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV) if (v != MBOX_OWNER_DRV)
return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
...@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, ...@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
t4_read_reg(adapter, mbox_data); /* flush write */ t4_read_reg(adapter, mbox_data); /* flush write */
t4_write_reg(adapter, mbox_ctl, t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */ t4_read_reg(adapter, mbox_ctl); /* flush write */
/* /*
...@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, ...@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* If we're the owner, see if this is the reply we wanted. * If we're the owner, see if this is the reply we wanted.
*/ */
v = t4_read_reg(adapter, mbox_ctl); v = t4_read_reg(adapter, mbox_ctl);
if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
/* /*
* If the Message Valid bit isn't on, revoke ownership * If the Message Valid bit isn't on, revoke ownership
* of the mailbox and continue waiting for our reply. * of the mailbox and continue waiting for our reply.
*/ */
if ((v & MBMSGVALID) == 0) { if ((v & MBMSGVALID_F) == 0) {
t4_write_reg(adapter, mbox_ctl, t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE)); MBOWNER_V(MBOX_OWNER_NONE));
continue; continue;
} }
...@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, ...@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
& FW_CMD_REQUEST_F) != 0); & FW_CMD_REQUEST_F) != 0);
} }
t4_write_reg(adapter, mbox_ctl, t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE)); MBOWNER_V(MBOX_OWNER_NONE));
return -FW_CMD_RETVAL_G(v); return -FW_CMD_RETVAL_G(v);
} }
} }
...@@ -528,19 +529,19 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -528,19 +529,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
int v; int v;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
v = t4vf_query_params(adapter, 7, params, vals); v = t4vf_query_params(adapter, 7, params, vals);
if (v) if (v)
return v; return v;
...@@ -576,9 +577,9 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -576,9 +577,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
} }
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
v = t4vf_query_params(adapter, 2, params, vals); v = t4vf_query_params(adapter, 2, params, vals);
if (v) if (v)
return v; return v;
...@@ -615,8 +616,8 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -615,8 +616,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
* the driver can just use it. * the driver can just use it.
*/ */
whoami = t4_read_reg(adapter, whoami = t4_read_reg(adapter,
T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
pf = SOURCEPF_GET(whoami); pf = SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S + s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
...@@ -628,10 +629,10 @@ int t4vf_get_sge_params(struct adapter *adapter) ...@@ -628,10 +629,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
sge_params->sge_vf_eq_qpp = sge_params->sge_vf_eq_qpp =
((sge_params->sge_egress_queues_per_page >> s_qpp) ((sge_params->sge_egress_queues_per_page >> s_qpp)
& QUEUESPERPAGEPF0_MASK); & QUEUESPERPAGEPF0_M);
sge_params->sge_vf_iq_qpp = sge_params->sge_vf_iq_qpp =
((sge_params->sge_ingress_queues_per_page >> s_qpp) ((sge_params->sge_ingress_queues_per_page >> s_qpp)
& QUEUESPERPAGEPF0_MASK); & QUEUESPERPAGEPF0_M);
} }
return 0; return 0;
...@@ -1590,7 +1591,7 @@ int t4vf_prep_adapter(struct adapter *adapter) ...@@ -1590,7 +1591,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
break; break;
case CHELSIO_T5: case CHELSIO_T5:
chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
break; break;
} }
......
This diff is collapsed.
...@@ -117,10 +117,10 @@ extern int csio_msi; ...@@ -117,10 +117,10 @@ extern int csio_msi;
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 #define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF #define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ #define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | \ EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
ULP_RX | CPL_SWITCH | SGE | \ PM_TX_F | PM_RX_F | ULP_RX_F | \
ULP_TX | SF) CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
/* /*
* Hard parameters used to initialize the card in the absence of a * Hard parameters used to initialize the card in the absence of a
......
...@@ -66,19 +66,19 @@ static inline int csio_is_t5(uint16_t chip) ...@@ -66,19 +66,19 @@ static inline int csio_is_t5(uint16_t chip)
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_HW_PIDX(hw, index) \ #define CSIO_HW_PIDX(hw, index) \
(csio_is_t4(hw->chip_id) ? (PIDX(index)) : \ (csio_is_t4(hw->chip_id) ? (PIDX_V(index)) : \
(PIDX_T5(index) | DBTYPE(1U))) (PIDX_T5_G(index) | DBTYPE_F))
#define CSIO_HW_LP_INT_THRESH(hw, val) \ #define CSIO_HW_LP_INT_THRESH(hw, val) \
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_V(val)) : \
(V_LP_INT_THRESH_T5(val))) (LP_INT_THRESH_T5_V(val)))
#define CSIO_HW_M_LP_INT_THRESH(hw) \ #define CSIO_HW_M_LP_INT_THRESH(hw) \
(csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5)) (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_M) : (LP_INT_THRESH_T5_M))
#define CSIO_MAC_INT_CAUSE_REG(hw, port) \ #define CSIO_MAC_INT_CAUSE_REG(hw, port) \
(csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \ (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE_A)) : \
(T5_PORT_REG(port, MAC_PORT_INT_CAUSE))) (T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)))
#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0) #define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0) #define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
......
...@@ -96,11 +96,11 @@ csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win) ...@@ -96,11 +96,11 @@ csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
* back MA register to ensure that changes propagate before we attempt * back MA register to ensure that changes propagate before we attempt
* to use the new values.) * to use the new values.)
*/ */
csio_wr_reg32(hw, mem_win_base | BIR(0) | csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
WINDOW(ilog2(MEMWIN_APERTURE) - 10), WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
csio_rd_reg32(hw, csio_rd_reg32(hw,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
return 0; return 0;
} }
...@@ -111,69 +111,69 @@ static void ...@@ -111,69 +111,69 @@ static void
csio_t4_pcie_intr_handler(struct csio_hw *hw) csio_t4_pcie_intr_handler(struct csio_hw *hw)
{ {
static struct intr_info sysbus_intr_info[] = { static struct intr_info sysbus_intr_info[] = {
{ RNPP, "RXNP array parity error", -1, 1 }, { RNPP_F, "RXNP array parity error", -1, 1 },
{ RPCP, "RXPC array parity error", -1, 1 }, { RPCP_F, "RXPC array parity error", -1, 1 },
{ RCIP, "RXCIF array parity error", -1, 1 }, { RCIP_F, "RXCIF array parity error", -1, 1 },
{ RCCP, "Rx completions control array parity error", -1, 1 }, { RCCP_F, "Rx completions control array parity error", -1, 1 },
{ RFTP, "RXFT array parity error", -1, 1 }, { RFTP_F, "RXFT array parity error", -1, 1 },
{ 0, NULL, 0, 0 } { 0, NULL, 0, 0 }
}; };
static struct intr_info pcie_port_intr_info[] = { static struct intr_info pcie_port_intr_info[] = {
{ TPCP, "TXPC array parity error", -1, 1 }, { TPCP_F, "TXPC array parity error", -1, 1 },
{ TNPP, "TXNP array parity error", -1, 1 }, { TNPP_F, "TXNP array parity error", -1, 1 },
{ TFTP, "TXFT array parity error", -1, 1 }, { TFTP_F, "TXFT array parity error", -1, 1 },
{ TCAP, "TXCA array parity error", -1, 1 }, { TCAP_F, "TXCA array parity error", -1, 1 },
{ TCIP, "TXCIF array parity error", -1, 1 }, { TCIP_F, "TXCIF array parity error", -1, 1 },
{ RCAP, "RXCA array parity error", -1, 1 }, { RCAP_F, "RXCA array parity error", -1, 1 },
{ OTDD, "outbound request TLP discarded", -1, 1 }, { OTDD_F, "outbound request TLP discarded", -1, 1 },
{ RDPE, "Rx data parity error", -1, 1 }, { RDPE_F, "Rx data parity error", -1, 1 },
{ TDUE, "Tx uncorrectable data error", -1, 1 }, { TDUE_F, "Tx uncorrectable data error", -1, 1 },
{ 0, NULL, 0, 0 } { 0, NULL, 0, 0 }
}; };
static struct intr_info pcie_intr_info[] = { static struct intr_info pcie_intr_info[] = {
{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
{ MSIDATAPERR, "MSI data parity error", -1, 1 }, { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
{ CREQPERR, "PCI CMD channel request parity error", -1, 1 }, { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
{ DREQPERR, "PCI DMA channel request parity error", -1, 1 }, { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
{ HREQPERR, "PCI HMA channel request parity error", -1, 1 }, { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
{ FIDPERR, "PCI FID parity error", -1, 1 }, { FIDPERR_F, "PCI FID parity error", -1, 1 },
{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
{ MATAGPERR, "PCI MA tag parity error", -1, 1 }, { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
{ RXWRPERR, "PCI Rx write parity error", -1, 1 }, { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
{ RPLPERR, "PCI replay buffer parity error", -1, 1 }, { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
{ PCIESINT, "PCI core secondary fault", -1, 1 }, { PCIESINT_F, "PCI core secondary fault", -1, 1 },
{ PCIEPINT, "PCI core primary fault", -1, 1 }, { PCIEPINT_F, "PCI core primary fault", -1, 1 },
{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, { UNXSPLCPLERR_F, "PCI unexpected split completion error", -1,
0 }, 0 },
{ 0, NULL, 0, 0 } { 0, NULL, 0, 0 }
}; };
int fat; int fat;
fat = csio_handle_intr_status(hw, fat = csio_handle_intr_status(hw,
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
sysbus_intr_info) + sysbus_intr_info) +
csio_handle_intr_status(hw, csio_handle_intr_status(hw,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
pcie_port_intr_info) + pcie_port_intr_info) +
csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat) if (fat)
csio_hw_fatal_err(hw); csio_hw_fatal_err(hw);
} }
...@@ -209,19 +209,19 @@ csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, ...@@ -209,19 +209,19 @@ csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
{ {
int i; int i;
if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) if (csio_rd_reg32(hw, MC_BIST_CMD_A) & START_BIST_F)
return -EBUSY; return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR_A);
csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN_A);
csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN_A);
csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
MC_BIST_CMD); MC_BIST_CMD_A);
i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD_A, START_BIST_F,
0, 10, 1, NULL); 0, 10, 1, NULL);
if (i) if (i)
return i; return i;
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
for (i = 15; i >= 0; i--) for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
...@@ -250,19 +250,19 @@ csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, ...@@ -250,19 +250,19 @@ csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
int i; int i;
idx *= EDC_STRIDE; idx *= EDC_STRIDE;
if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) if (csio_rd_reg32(hw, EDC_BIST_CMD_A + idx) & START_BIST_F)
return -EBUSY; return -EBUSY;
csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR_A + idx);
csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN_A + idx);
csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN_A + idx);
csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, csio_wr_reg32(hw, BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F,
EDC_BIST_CMD + idx); EDC_BIST_CMD_A + idx);
i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD_A + idx, START_BIST_F,
0, 10, 1, NULL); 0, 10, 1, NULL);
if (i) if (i)
return i; return i;
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
for (i = 15; i >= 0; i--) for (i = 15; i >= 0; i--)
*data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
...@@ -329,9 +329,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, ...@@ -329,9 +329,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* the address is relative to BAR0. * the address is relative to BAR0.
*/ */
mem_reg = csio_rd_reg32(hw, mem_reg = csio_rd_reg32(hw,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
mem_aperture = 1 << (WINDOW(mem_reg) + 10); mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
mem_base = GET_PCIEOFST(mem_reg) << 10; mem_base = PCIEOFST_G(mem_reg) << 10;
bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
bar0 &= PCI_BASE_ADDRESS_MEM_MASK; bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
...@@ -356,9 +356,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, ...@@ -356,9 +356,9 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* before we attempt to use the new value. * before we attempt to use the new value.
*/ */
csio_wr_reg32(hw, pos, csio_wr_reg32(hw, pos,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
csio_rd_reg32(hw, csio_rd_reg32(hw,
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
while (offset < mem_aperture && len > 0) { while (offset < mem_aperture && len > 0) {
if (dir) if (dir)
......
This diff is collapsed.
...@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id) ...@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
/* Disable the interrupt for this PCI function. */ /* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX) if (hw->intr_mode == CSIO_IM_INTX)
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI)); csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
/* /*
* The read in the following function will flush the * The read in the following function will flush the
......
...@@ -1104,8 +1104,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw, ...@@ -1104,8 +1104,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
void void
csio_mb_intr_enable(struct csio_hw *hw) csio_mb_intr_enable(struct csio_hw *hw)
{ {
csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
} }
/* /*
...@@ -1117,8 +1117,9 @@ csio_mb_intr_enable(struct csio_hw *hw) ...@@ -1117,8 +1117,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
void void
csio_mb_intr_disable(struct csio_hw *hw) csio_mb_intr_disable(struct csio_hw *hw)
{ {
csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE)); csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE)); MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
} }
static void static void
...@@ -1153,8 +1154,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw) ...@@ -1153,8 +1154,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
{ {
int i; int i;
__be64 cmd[CSIO_MB_MAX_REGS]; __be64 cmd[CSIO_MB_MAX_REGS];
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = sizeof(struct fw_debug_cmd); int size = sizeof(struct fw_debug_cmd);
/* Copy mailbox data */ /* Copy mailbox data */
...@@ -1164,8 +1165,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw) ...@@ -1164,8 +1165,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
csio_mb_dump_fw_dbg(hw, cmd); csio_mb_dump_fw_dbg(hw, cmd);
/* Notify FW of mailbox by setting owner as UP */ /* Notify FW of mailbox by setting owner as UP */
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW), csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
ctl_reg); MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
csio_rd_reg32(hw, ctl_reg); csio_rd_reg32(hw, ctl_reg);
wmb(); wmb();
...@@ -1187,8 +1188,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) ...@@ -1187,8 +1188,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
__be64 *cmd = mbp->mb; __be64 *cmd = mbp->mb;
__be64 hdr; __be64 hdr;
struct csio_mbm *mbm = &hw->mbm; struct csio_mbm *mbm = &hw->mbm;
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size = mbp->mb_size; int size = mbp->mb_size;
int rv = -EINVAL; int rv = -EINVAL;
struct fw_cmd_hdr *fw_hdr; struct fw_cmd_hdr *fw_hdr;
...@@ -1224,12 +1225,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) ...@@ -1224,12 +1225,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
} }
/* Now get ownership of mailbox */ /* Now get ownership of mailbox */
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
if (!csio_mb_is_host_owner(owner)) { if (!csio_mb_is_host_owner(owner)) {
for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg)); owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
/* /*
* Mailbox unavailable. In immediate mode, fail the command. * Mailbox unavailable. In immediate mode, fail the command.
* In other modes, enqueue the request. * In other modes, enqueue the request.
...@@ -1271,10 +1272,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) ...@@ -1271,10 +1272,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
if (mbp->mb_cbfn != NULL) { if (mbp->mb_cbfn != NULL) {
mbm->mcurrent = mbp; mbm->mcurrent = mbp;
mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
MBOWNER(CSIO_MBOWNER_FW), ctl_reg); MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
} else } else
csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW), csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
ctl_reg); ctl_reg);
/* Flush posted writes */ /* Flush posted writes */
...@@ -1294,9 +1295,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) ...@@ -1294,9 +1295,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Check for response */ /* Check for response */
ctl = csio_rd_reg32(hw, ctl_reg); ctl = csio_rd_reg32(hw, ctl_reg);
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
if (!(ctl & MBMSGVALID)) { if (!(ctl & MBMSGVALID_F)) {
csio_wr_reg32(hw, 0, ctl_reg); csio_wr_reg32(hw, 0, ctl_reg);
continue; continue;
} }
...@@ -1457,16 +1458,16 @@ csio_mb_isr_handler(struct csio_hw *hw) ...@@ -1457,16 +1458,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
__be64 *cmd; __be64 *cmd;
uint32_t ctl, cim_cause, pl_cause; uint32_t ctl, cim_cause, pl_cause;
int i; int i;
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL); uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA); uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
int size; int size;
__be64 hdr; __be64 hdr;
struct fw_cmd_hdr *fw_hdr; struct fw_cmd_hdr *fw_hdr;
pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE)); pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) { if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
CSIO_INC_STATS(hw, n_mbint_unexp); CSIO_INC_STATS(hw, n_mbint_unexp);
return -EINVAL; return -EINVAL;
} }
...@@ -1477,16 +1478,16 @@ csio_mb_isr_handler(struct csio_hw *hw) ...@@ -1477,16 +1478,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
* the upper level cause register. In other words, CIM-cause * the upper level cause register. In other words, CIM-cause
* first followed by PL-Cause next. * first followed by PL-Cause next.
*/ */
csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE)); csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE)); csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
ctl = csio_rd_reg32(hw, ctl_reg); ctl = csio_rd_reg32(hw, ctl_reg);
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) { if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
CSIO_DUMP_MB(hw, hw->pfn, data_reg); CSIO_DUMP_MB(hw, hw->pfn, data_reg);
if (!(ctl & MBMSGVALID)) { if (!(ctl & MBMSGVALID_F)) {
csio_warn(hw, csio_warn(hw,
"Stray mailbox interrupt recvd," "Stray mailbox interrupt recvd,"
" mailbox data not valid\n"); " mailbox data not valid\n");
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment