Commit 48899291 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-sriov'

Yuval Mintz says:

====================
qed*: Add SR-IOV support

This patch adds SR-IOV support to qed/qede drivers, adding a new PCI
device ID for a VF that is shared between all the various PFs that
support IOV.

This is quite a massive series - the first 7 parts of the series add
the infrastructure of supporting vfs in qed - mainly adding support in a
HW-based vf<->pf channel, as well as diverging all existing configuration
flows based on the pf/vf decision. I.e., while PF-originated requests
head directly to HW/FW, the VF requests first have to traverse to the PF
which will perform the configuration.

The 8th patch is the one that adds the support for the VF device in qede.

The remaining 6 patches each adds some user-based API support related to
VFs that can be used over the PF - forcing mac/vlan, changing speed, etc.

Dave,

Sorry in advance for the length of the series. Most of the bulk here is in
the infrastructure patches that have to go together [or at least, it makes
little sense to try splitting them up].

Please consider applying this to `net-next'.

Thanks,
Yuval

Changes from previous revision:
------------------------------
 - V2 - Replace aligned_u64 with regular u64; This was possible as the
        shared structures [between PF and VF] were already sufficiently
        padded as-is in the API, making this redundant.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 631ad4a3 831bfb0e
...@@ -98,6 +98,16 @@ config QED ...@@ -98,6 +98,16 @@ config QED
---help--- ---help---
This enables the support for ... This enables the support for ...
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
default y
---help---
This configuration parameter enables Single Root Input Output
Virtualization support for QED devices.
This allows for virtual function acceleration in virtualized
environments.
config QEDE config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC" tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED depends on QED
......
...@@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o ...@@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o qed_selftest.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
...@@ -152,6 +152,7 @@ enum QED_RESOURCES { ...@@ -152,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE { enum QED_FEATURE {
QED_PF_L2_QUE, QED_PF_L2_QUE,
QED_VF,
QED_MAX_FEATURES, QED_MAX_FEATURES,
}; };
...@@ -310,6 +311,8 @@ struct qed_hwfn { ...@@ -310,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine; bool first_on_engine;
bool hw_init_done; bool hw_init_done;
u8 num_funcs_on_engine;
/* BAR access */ /* BAR access */
void __iomem *regview; void __iomem *regview;
void __iomem *doorbells; void __iomem *doorbells;
...@@ -360,6 +363,8 @@ struct qed_hwfn { ...@@ -360,6 +363,8 @@ struct qed_hwfn {
/* True if the driver requests for the link */ /* True if the driver requests for the link */
bool b_drv_link_init; bool b_drv_link_init;
struct qed_vf_iov *vf_iov_info;
struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info; struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids; struct qed_hw_cid_data *p_tx_cids;
...@@ -376,6 +381,12 @@ struct qed_hwfn { ...@@ -376,6 +381,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64]; struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
struct workqueue_struct *iov_wq;
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
struct z_stream_s *stream; struct z_stream_s *stream;
}; };
...@@ -484,7 +495,13 @@ struct qed_dev { ...@@ -484,7 +495,13 @@ struct qed_dev {
u8 num_hwfns; u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
unsigned long tunn_mode; unsigned long tunn_mode;
bool b_is_vf;
u32 drv_type; u32 drv_type;
struct qed_eth_stats *reset_stats; struct qed_eth_stats *reset_stats;
...@@ -514,6 +531,8 @@ struct qed_dev { ...@@ -514,6 +531,8 @@ struct qed_dev {
const struct firmware *firmware; const struct firmware *firmware;
}; };
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
...@@ -535,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -535,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8 #define PURE_LB_TC 8
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */ /* Other Linux specific common definitions */
......
...@@ -24,11 +24,13 @@ ...@@ -24,11 +24,13 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_init_ops.h" #include "qed_init_ops.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */ /* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON #define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2 #define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4 #define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */ /* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
...@@ -63,10 +65,12 @@ union conn_context { ...@@ -63,10 +65,12 @@ union conn_context {
struct qed_conn_type_cfg { struct qed_conn_type_cfg {
u32 cid_count; u32 cid_count;
u32 cid_start; u32 cid_start;
u32 cids_per_vf;
}; };
/* ILT Client configuration, Per connection type (protocol) resources. */ /* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0) #define CDUC_BLK (0)
enum ilt_clients { enum ilt_clients {
...@@ -97,6 +101,10 @@ struct qed_ilt_client_cfg { ...@@ -97,6 +101,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */ /* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines; u32 pf_total_lines;
/* ILT client blocks for VFs */
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
u32 vf_total_lines;
}; };
/* Per Path - /* Per Path -
...@@ -123,6 +131,11 @@ struct qed_cxt_mngr { ...@@ -123,6 +131,11 @@ struct qed_cxt_mngr {
/* computed ILT structure */ /* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
/* Acquired CIDs */ /* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
...@@ -131,37 +144,60 @@ struct qed_cxt_mngr { ...@@ -131,37 +144,60 @@ struct qed_cxt_mngr {
u32 pf_start_line; u32 pf_start_line;
}; };
static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr) /* counts the iids for the CDU/CDUC ILT client configuration */
{ struct qed_cdu_iids {
u32 type, pf_cids = 0; u32 pf_cids;
u32 per_vf_cids;
};
for (type = 0; type < MAX_CONN_TYPES; type++) static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
pf_cids += p_mngr->conn_cfg[type].cid_count; struct qed_cdu_iids *iids)
{
u32 type;
return pf_cids; for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
}
} }
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids) struct qed_qm_iids *iids)
{ {
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
int type; u32 vf_cids = 0, type;
for (type = 0; type < MAX_CONN_TYPES; type++) for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count; iids->cids += p_mngr->conn_cfg[type].cid_count;
vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
}
DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); iids->vf_cids += vf_cids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"iids: CIDS %08x vf_cids %08x\n",
iids->cids, iids->vf_cids);
} }
/* set the iids count per protocol */ /* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, enum protocol_type type,
u32 cid_count) u32 cid_count, u32 vf_cid_cnt)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
}
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
} }
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
...@@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ...@@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli; struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk; struct qed_ilt_cli_blk *p_blk;
u32 curr_line, total, pf_cids; struct qed_cdu_iids cdu_iids;
struct qed_qm_iids qm_iids; struct qed_qm_iids qm_iids;
u32 curr_line, total, i;
memset(&qm_iids, 0, sizeof(qm_iids)); memset(&qm_iids, 0, sizeof(qm_iids));
memset(&cdu_iids, 0, sizeof(cdu_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
...@@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ...@@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */ /* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC]; p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line; curr_line = p_mngr->pf_start_line;
/* CDUC PF */
p_cli->pf_total_lines = 0; p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */ /* get the counters for the CDUC and QM clients */
pf_cids = qed_cxt_cdu_iids(p_mngr); qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK]; p_blk = &p_cli->pf_blks[CDUC_BLK];
total = pf_cids * CONN_CXT_SIZE(p_hwfn); total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn)); total, CONN_CXT_SIZE(p_hwfn));
...@@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ...@@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line; p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* CDUC VF */
p_blk = &p_cli->vf_blks[CDUC_BLK];
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUC);
/* QM */ /* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM]; p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0]; p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids); qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
p_hwfn->qm_info.num_pqs, 0); qm_iids.vf_cids, 0,
p_hwfn->qm_info.num_pqs,
DP_VERBOSE(p_hwfn, QED_MSG_ILT, p_hwfn->qm_info.num_vf_pqs);
"QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
qm_iids.cids, p_hwfn->qm_info.num_pqs, total); DP_VERBOSE(p_hwfn,
QED_MSG_ILT,
"QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
qm_iids.cids,
qm_iids.vf_cids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk, qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000, curr_line, total * 0x1000,
...@@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) ...@@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients; struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk; struct qed_ilt_cli_blk *p_blk;
u32 size, i, j; u32 size, i, j, k;
int rc; int rc;
size = qed_cxt_ilt_shadow_size(clients); size = qed_cxt_ilt_shadow_size(clients);
...@@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) ...@@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
if (rc != 0) if (rc != 0)
goto ilt_shadow_fail; goto ilt_shadow_fail;
} }
for (k = 0; k < p_mngr->vf_count; k++) {
for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
u32 lines = clients[i].vf_total_lines * k;
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
if (rc != 0)
goto ilt_shadow_fail;
}
}
} }
return 0; return 0;
...@@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) ...@@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
for (i = 0; i < ILT_CLI_MAX; i++) for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
if (p_hwfn->cdev->p_iov_info)
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
/* Set the cxt mangr pointer priori to further allocations */ /* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr; p_hwfn->p_cxt_mngr = p_mngr;
...@@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) ...@@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine; params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids; params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq; params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs; params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport; params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports; params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq; params.pf_wfq = qm_info->pf_wfq;
...@@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) ...@@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{ {
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 dq_pf_max_cid = 0; u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
/* 5 - PF */ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
/* Connection types 6 & 7 are not in use, yet they must be configured
* as the highest possible connection. Not configuring them means the
* defaults will be used, and with a large number of cids a bug may
* occur, if the defaults will be smaller than dq_pf_max_cid /
* dq_vf_max_cid.
*/
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
} }
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
...@@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) ...@@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
} }
} }
static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
u32 blk_factor;
/* For simplicty we set the 'block' to be an ILT page */
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_VF_BASE_RT_OFFSET,
p_iov->first_vf_in_pf);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
p_iov->first_vf_in_pf + p_iov->total_vfs);
}
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
if (p_cli->active) {
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
blk_factor);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
p_cli->pf_total_lines);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
p_cli->vf_total_lines);
}
}
/* ILT (PSWRQ2) PF */ /* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{ {
...@@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) ...@@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i; u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn); qed_ilt_bounds_init(p_hwfn);
qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr; p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow; p_shdw = p_mngr->ilt_shadow;
...@@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) ...@@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */ /* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */ u32 core_cids = 1; /* SPQ */
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons); p_params->num_cons, 1);
return 0; return 0;
} }
...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type { ...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK QED_ELEM_TASK
}; };
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/** /**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
* *
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
/* API common to all protocols */ /* API common to all protocols */
enum BAR_ID { enum BAR_ID {
...@@ -42,8 +44,12 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, ...@@ -42,8 +44,12 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
{ {
u32 bar_reg = (bar_id == BAR_ID_0 ? u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); u32 val;
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val) if (val)
return 1 << (val + 15); return 1 << (val + 15);
...@@ -113,6 +119,9 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -113,6 +119,9 @@ void qed_resc_free(struct qed_dev *cdev)
{ {
int i; int i;
if (IS_VF(cdev))
return;
kfree(cdev->fw_data); kfree(cdev->fw_data);
cdev->fw_data = NULL; cdev->fw_data = NULL;
...@@ -136,20 +145,26 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -136,20 +145,26 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn); qed_int_free(p_hwfn);
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
} }
} }
static int qed_init_qm_info(struct qed_hwfn *p_hwfn) static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
{ {
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port; struct init_qm_port_params *p_qm_port;
u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1; u16 num_pqs, multi_cos_tcs = 1;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
if (p_hwfn->cdev->p_iov_info)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
#endif
memset(qm_info, 0, sizeof(*qm_info)); memset(qm_info, 0, sizeof(*qm_info));
num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */ /* Sanity checking that setup requires legal number of resources */
...@@ -185,8 +200,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -185,8 +200,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */ /* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) { for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; struct init_qm_pq_params *params =
&qm_info->qm_pq_params[curr_queue];
params->vport_id = vport_id; params->vport_id = vport_id;
params->tc_id = p_hwfn->hw_info.non_offload_tc; params->tc_id = p_hwfn->hw_info.non_offload_tc;
...@@ -194,13 +210,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -194,13 +210,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
} }
/* Then init pure-LB PQ */ /* Then init pure-LB PQ */
qm_info->pure_lb_pq = i; qm_info->pure_lb_pq = curr_queue;
qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); qm_info->qm_pq_params[curr_queue].vport_id =
qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; (u8) RESC_START(p_hwfn, QED_VPORT);
qm_info->qm_pq_params[i].wrr_group = 1; qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
i++; qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
qm_info->offload_pq = 0; qm_info->offload_pq = 0;
/* Then init per-VF PQs */
vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
/* First vport is used by the PF */
qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
qm_info->qm_pq_params[curr_queue].tc_id =
p_hwfn->hw_info.non_offload_tc;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
}
qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs; qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports; qm_info->num_vports = num_vports;
...@@ -218,7 +247,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -218,7 +247,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); qm_info->num_vf_pqs = num_vfs;
qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
for (i = 0; i < qm_info->num_vports; i++) for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1; qm_info->qm_vport_params[i].vport_wfq = 1;
...@@ -242,6 +272,9 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -242,6 +272,9 @@ int qed_resc_alloc(struct qed_dev *cdev)
struct qed_eq *p_eq; struct qed_eq *p_eq;
int i, rc = 0; int i, rc = 0;
if (IS_VF(cdev))
return rc;
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data) if (!cdev->fw_data)
return -ENOMEM; return -ENOMEM;
...@@ -316,6 +349,10 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -316,6 +349,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc) if (rc)
goto alloc_err; goto alloc_err;
rc = qed_iov_alloc(p_hwfn);
if (rc)
goto alloc_err;
/* EQ */ /* EQ */
p_eq = qed_eq_alloc(p_hwfn, 256); p_eq = qed_eq_alloc(p_hwfn, 256);
if (!p_eq) { if (!p_eq) {
...@@ -358,6 +395,9 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -358,6 +395,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{ {
int i; int i;
if (IS_VF(cdev))
return;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -373,14 +413,15 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -373,14 +413,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length); p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
} }
} }
#define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10) #define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt, u16 id, bool is_vf)
u16 id)
{ {
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY; int rc = -EBUSY;
...@@ -388,6 +429,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, ...@@ -388,6 +429,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM + addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
command |= X_FINAL_CLEANUP_AGG_INT << command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
...@@ -500,7 +544,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -500,7 +544,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params; struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u32 concrete_fid;
int rc = 0; int rc = 0;
u8 vf_id;
qed_init_cau_rt_data(cdev); qed_init_cau_rt_data(cdev);
...@@ -550,6 +596,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -550,6 +596,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4, qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
return rc; return rc;
} }
...@@ -612,7 +666,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -612,7 +666,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */ /* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -634,7 +688,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -634,7 +688,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode); qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */ /* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode); rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
} }
...@@ -690,13 +745,20 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -690,13 +745,20 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param; u32 load_code, param;
int rc, mfw_rc, i; int rc, mfw_rc, i;
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data); rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0) if (rc != 0)
return rc; return rc;
}
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1;
continue;
}
/* Enable DMAE in PXP */ /* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
...@@ -821,6 +883,11 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -821,6 +883,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
/* mark the hw as uninitialized... */ /* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false; p_hwfn->hw_init_done = false;
...@@ -852,15 +919,16 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -852,15 +919,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
if (IS_PF(cdev)) {
/* Disable DMAE in PXP - in CMT, this should only be done for /* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have * first hw-function, and only after all transactions have
* stopped for all active hw-functions. * stopped for all active hw-functions.
*/ */
t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
cdev->hwfns[0].p_main_ptt, cdev->hwfns[0].p_main_ptt, false);
false);
if (t_rc != 0) if (t_rc != 0)
rc = t_rc; rc = t_rc;
}
return rc; return rc;
} }
...@@ -873,6 +941,11 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) ...@@ -873,6 +941,11 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, NETIF_MSG_IFDOWN,
"Shutting down the fastpath\n"); "Shutting down the fastpath\n");
...@@ -895,6 +968,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) ...@@ -895,6 +968,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{ {
if (IS_VF(p_hwfn->cdev))
return;
/* Re-open incoming traffic */ /* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
...@@ -924,6 +1000,13 @@ int qed_hw_reset(struct qed_dev *cdev) ...@@ -924,6 +1000,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
rc = qed_vf_pf_reset(p_hwfn);
if (rc)
return rc;
continue;
}
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */ /* Check for incorrect states */
...@@ -1019,13 +1102,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1019,13 +1102,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{ {
u32 *resc_start = p_hwfn->hw_info.resc_start; u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num; u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i; int i, max_vf_vlan_filters;
num_funcs = MAX_NUM_PFS_BB;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
#ifdef CONFIG_QED_SRIOV
max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
#else
max_vf_vlan_filters = 0;
#endif
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32, resc_num[QED_SB] = min_t(u32,
...@@ -1230,6 +1319,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, ...@@ -1230,6 +1319,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
} }
static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 reg_function_hide, tmp, eng_mask;
u8 num_funcs;
num_funcs = MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
* Bits 1-15 are for functions 1-15, respectively, and their value is
* '0' only for enabled functions (function 0 always exists and
* enabled).
* In case of CMT, only the "even" functions are enabled, and thus the
* number of functions for both hwfns is learnt from the same bits.
*/
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
num_funcs = 1;
eng_mask = 0x5554;
}
/* Get the number of the enabled functions on the engine */
tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
while (tmp) {
if (tmp & 0x1)
num_funcs++;
tmp >>= 0x1;
}
}
p_hwfn->num_funcs_on_engine = num_funcs;
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
"PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
p_hwfn->num_funcs_on_engine);
}
static int static int
qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
...@@ -1238,6 +1372,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -1238,6 +1372,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode; u32 port_mode;
int rc; int rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = qed_iov_hw_info(p_hwfn);
if (rc)
return rc;
}
/* Read the port mode */ /* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt, port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0); CNIG_REG_NW_PORT_MODE_BB_B0);
...@@ -1281,6 +1422,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -1281,6 +1422,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol; p_hwfn->hw_info.personality = protocol;
} }
qed_get_num_funcs(p_hwfn, p_ptt);
qed_hw_get_resc(p_hwfn); qed_hw_get_resc(p_hwfn);
return rc; return rc;
...@@ -1346,6 +1489,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, ...@@ -1346,6 +1489,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview; p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells; p_hwfn->doorbells = p_doorbells;
if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
/* Validate that chip access is feasible */ /* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
...@@ -1397,6 +1543,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, ...@@ -1397,6 +1543,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc; return rc;
err2: err2:
if (IS_LEAD_HWFN(p_hwfn))
qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn); qed_mcp_free(p_hwfn);
err1: err1:
qed_hw_hwfn_free(p_hwfn); qed_hw_hwfn_free(p_hwfn);
...@@ -1411,6 +1559,7 @@ int qed_hw_prepare(struct qed_dev *cdev, ...@@ -1411,6 +1559,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc; int rc;
/* Store the precompiled init data ptrs */ /* Store the precompiled init data ptrs */
if (IS_PF(cdev))
qed_init_iro_array(cdev); qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */ /* Initialize the first hwfn - will learn number of hwfns */
...@@ -1443,11 +1592,13 @@ int qed_hw_prepare(struct qed_dev *cdev, ...@@ -1443,11 +1592,13 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0. * initiliazed hwfn 0.
*/ */
if (rc) { if (rc) {
if (IS_PF(cdev)) {
qed_init_free(p_hwfn); qed_init_free(p_hwfn);
qed_mcp_free(p_hwfn); qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn); qed_hw_hwfn_free(p_hwfn);
} }
} }
}
return rc; return rc;
} }
...@@ -1459,10 +1610,17 @@ void qed_hw_remove(struct qed_dev *cdev) ...@@ -1459,10 +1610,17 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
qed_vf_pf_release(p_hwfn);
continue;
}
qed_init_free(p_hwfn); qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn); qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn); qed_mcp_free(p_hwfn);
} }
qed_iov_free_hw_info(cdev);
} }
int qed_chain_alloc(struct qed_dev *cdev, int qed_chain_alloc(struct qed_dev *cdev,
...@@ -1732,6 +1890,32 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, ...@@ -1732,6 +1890,32 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
{
struct qed_mcp_link_state *p_link;
int rc = 0;
p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
if (!p_link->min_pf_rate) {
p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
p_hwfn->qm_info.wfq_data[vp_id].configured = true;
return rc;
}
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
if (rc == 0)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
DP_NOTICE(p_hwfn,
"Validation failed while configuring min rate\n");
return rc;
}
static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 min_pf_rate) u32 min_pf_rate)
...@@ -1766,6 +1950,42 @@ static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, ...@@ -1766,6 +1950,42 @@ static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
/* Main API for qed clients to configure vport min rate.
* vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
* rate - Speed in Mbps needs to be assigned to a given vport.
*/
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
{
int i, rc = -EINVAL;
/* Currently not supported; Might change in future */
if (cdev->num_hwfns > 1) {
DP_NOTICE(cdev,
"WFQ configuration is not supported for this device\n");
return rc;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
if (!rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
qed_ptt_release(p_hwfn, p_ptt);
}
return rc;
}
/* API to configure WFQ from mcp link change */ /* API to configure WFQ from mcp link change */
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{ {
...@@ -1912,3 +2132,17 @@ int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) ...@@ -1912,3 +2132,17 @@ int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
return rc; return rc;
} }
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_link_state *p_link;
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
...@@ -183,10 +183,14 @@ enum qed_dmae_address_type_t { ...@@ -183,10 +183,14 @@ enum qed_dmae_address_type_t {
* using DMA * using DMA
*/ */
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 #define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 #define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params { struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */ u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
}; };
/** /**
...@@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords, u32 size_in_dwords,
u32 flags); u32 flags);
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params);
/** /**
* @brief qed_chain_alloc - Allocate and initialize a chain * @brief qed_chain_alloc - Allocate and initialize a chain
* *
...@@ -282,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, ...@@ -282,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative. * @param id - For PF, engine-relative. For VF, PF-relative.
* @param is_vf - true iff cleanup is made for a VF.
* *
* @return int * @return int
*/ */
int qed_final_cleanup(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt, u16 id, bool is_vf);
u16 id);
#endif #endif
...@@ -29,9 +29,9 @@ struct qed_ptt; ...@@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode { enum common_event_opcode {
COMMON_EVENT_PF_START, COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP, COMMON_EVENT_PF_STOP,
COMMON_EVENT_RESERVED, COMMON_EVENT_VF_START,
COMMON_EVENT_RESERVED2, COMMON_EVENT_VF_STOP,
COMMON_EVENT_RESERVED3, COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5, COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6, COMMON_EVENT_RESERVED6,
...@@ -44,8 +44,8 @@ enum common_ramrod_cmd_id { ...@@ -44,8 +44,8 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED, COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED, COMMON_RAMROD_VF_START,
COMMON_RAMROD_RESERVED2, COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY, COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID MAX_COMMON_RAMROD_CMD_ID
...@@ -573,6 +573,14 @@ union event_ring_element { ...@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr; struct event_ring_next_addr next_addr;
}; };
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
};
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
};
enum personality_type { enum personality_type {
BAD_PERSONALITY_TYP, BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED, PERSONALITY_RESERVED,
...@@ -671,6 +679,16 @@ enum ports_mode { ...@@ -671,6 +679,16 @@ enum ports_mode {
MAX_PORTS_MODE MAX_PORTS_MODE
}; };
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
};
/* Ramrod Header of SPQE */ /* Ramrod Header of SPQE */
struct ramrod_header { struct ramrod_header {
__le32 cid /* Slowpath Connection CID */; __le32 cid /* Slowpath Connection CID */;
...@@ -700,6 +718,36 @@ struct tstorm_per_port_stat { ...@@ -700,6 +718,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt; struct regpair preroce_irregular_pkt;
}; };
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
u8 reserved[3];
};
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
__le16 reserved1;
__le32 reserved2;
};
struct atten_status_block { struct atten_status_block {
__le32 atten_bits; __le32 atten_bits;
__le32 atten_ack; __le32 atten_ack;
...@@ -1026,7 +1074,7 @@ enum init_phases { ...@@ -1026,7 +1074,7 @@ enum init_phases {
PHASE_ENGINE, PHASE_ENGINE,
PHASE_PORT, PHASE_PORT,
PHASE_PF, PHASE_PF,
PHASE_RESERVED, PHASE_VF,
PHASE_QM_PF, PHASE_QM_PF,
MAX_INIT_PHASES MAX_INIT_PHASES
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000 #define QED_BAR_ACQUIRE_TIMEOUT 1000
...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, ...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done, quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
if (IS_PF(p_hwfn->cdev)) {
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt); hw_offset = qed_ptt_get_bar_addr(p_ptt);
} else {
hw_offset = hw_addr + done;
}
dw_count = quota / 4; dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done); host_addr = (u32 *)((u8 *)addr + done);
...@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, ...@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend); *(u32 *)&p_ptt->pxp.pretend);
} }
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
return concrete_fid;
}
/* DMAE */ /* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc, const u8 is_src_type_grc,
const u8 is_dst_type_grc, const u8 is_dst_type_grc,
struct qed_dmae_params *p_params) struct qed_dmae_params *p_params)
{ {
u16 opcode_b = 0;
u32 opcode = 0; u32 opcode = 0;
u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC. /* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe * 0- The source is the PCIe
...@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, ...@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT); DMAE_CMD_DST_ADDR_RESET_SHIFT);
opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << /* SRC/DST VFID: all 1's - pf, otherwise VF id */
DMAE_CMD_SRC_VF_ID_SHIFT); if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
}
opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
DMAE_CMD_DST_VF_ID_SHIFT); opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
} }
u32 qed_dmae_idx_to_go_cmd(u8 idx) u32 qed_dmae_idx_to_go_cmd(u8 idx)
...@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
mutex_lock(&(p_hwfn->dmae_info.mutex));
rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
QED_DMAE_ADDRESS_HOST_PHYS,
QED_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
mutex_unlock(&(p_hwfn->dmae_info.mutex));
return rc;
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto, enum protocol_type proto,
union qed_qm_pq_params *p_params) union qed_qm_pq_params *p_params)
...@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, ...@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break; break;
case PROTOCOLID_ETH: case PROTOCOLID_ETH:
pq_id = p_params->eth.tc; pq_id = p_params->eth.tc;
if (p_params->eth.is_vf)
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break; break;
default: default:
pq_id = 0; pq_id = 0;
......
...@@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, ...@@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
void qed_port_unpretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/** /**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it. * this is declared here since other files will require it.
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_init_ops.h" #include "qed_init_ops.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500 #define QED_INIT_POLL_PERIOD_US 500
...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) ...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_rt_data *rt_data = &p_hwfn->rt_data; struct qed_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->cdev))
return 0;
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!rt_data->b_valid) if (!rt_data->b_valid)
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
struct qed_pi_info { struct qed_pi_info {
qed_int_comp_cb_t comp_cb; qed_int_comp_cb_t comp_cb;
...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, ...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset; u32 sb_offset;
u32 pi_offset; u32 pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB; sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
...@@ -2542,6 +2547,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, ...@@ -2542,6 +2547,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0; sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
if (IS_PF(p_hwfn->cdev))
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0); sb_info->igu_sb_id, 0, 0);
} }
...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, ...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */ /* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID) if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, ...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be /* The igu address will hold the absolute address that needs to be
* written to for a specific status block * written to for a specific status block
*/ */
if (IS_PF(p_hwfn->cdev)) {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD + GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3); (sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE +
sb_info->igu_sb_id) << 3);
}
sb_info->flags |= QED_SB_INFO_INIT; sb_info->flags |= QED_SB_INFO_INIT;
...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, ...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{ {
p_hwfn->b_int_enabled = 0; p_hwfn->b_int_enabled = 0;
if (IS_VF(p_hwfn->cdev))
return;
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
} }
...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
struct qed_igu_info *p_igu_info; struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk; struct qed_igu_block *blk;
u32 val;
u16 sb_id;
u16 prev_sb_id = 0xFF; u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs */ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff; p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) { sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id]; blk = &p_igu_info->igu_map.igu_blocks[sb_id];
...@@ -2986,13 +3011,42 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2986,13 +3011,42 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++; (p_igu_info->igu_sb_cnt)++;
} }
} }
} else {
if ((blk->function_id >= min_vf) &&
(blk->function_id < max_vf)) {
/* Available for VFs of this PF */
if (p_igu_info->igu_base_sb_iov == 0xffff) {
p_igu_info->igu_base_sb_iov = sb_id;
} else if (last_iov_sb_id != sb_id - 1) {
if (!val) {
DP_VERBOSE(p_hwfn->cdev,
NETIF_MSG_INTR,
"First uninitialized IGU CAM entry at index 0x%04x\n",
sb_id);
} else {
DP_NOTICE(p_hwfn->cdev,
"Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
p_hwfn->rel_pf_id,
last_iov_sb_id,
sb_id); }
break;
} }
blk->status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
last_iov_sb_id = sb_id;
} }
}
}
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, DP_VERBOSE(
"IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", p_hwfn,
NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb, p_igu_info->igu_base_sb,
p_igu_info->igu_base_sb_iov,
p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt,
p_igu_info->igu_sb_cnt_iov,
p_igu_info->igu_dsb_id); p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff || if (p_igu_info->igu_base_sb == 0xffff ||
...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks; p_sb_cnt_info->sb_free_blk = info->free_blks;
} }
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
}
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev) void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{ {
int i; int i;
......
...@@ -20,6 +20,12 @@ ...@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands /* Igu control commands
*/ */
...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn); ...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn, void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** /**
* @brief - Enable Interrupt & Attention for hw function * @brief - Enable Interrupt & Attention for hw function
* *
......
...@@ -31,124 +31,25 @@ ...@@ -31,124 +31,25 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_int.h" #include "qed_int.h"
#include "qed_l2.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_approx_mcast_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
};
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
#define QED_MAX_SGES_NUM 16 #define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41 #define CRC32_POLY 0x1edc6f41
static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params) struct qed_sp_vport_start_params *p_params)
{ {
struct vport_start_ramrod_data *p_ramrod = NULL; struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
int rc = -EINVAL; int rc = -EINVAL;
u16 rx_mode = 0; u16 rx_mode = 0;
u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0) if (rc != 0)
...@@ -198,6 +99,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, ...@@ -198,6 +99,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break; break;
} }
p_ramrod->tx_switching_en = p_params->tx_switching;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid); p_params->concrete_fid);
...@@ -205,6 +108,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, ...@@ -205,6 +108,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
p_params->mtu,
p_params->remove_inner_vlan,
p_params->tpa_mode,
p_params->max_buffers_per_cqe,
p_params->only_untagged);
}
return qed_sp_eth_vport_start(p_hwfn, p_params);
}
static int static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod, struct vport_update_ramrod_data *p_ramrod,
...@@ -349,6 +267,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, ...@@ -349,6 +267,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
} }
} }
static void
qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
if (!p_params) {
p_ramrod->common.update_tpa_param_flg = 0;
p_ramrod->common.update_tpa_en_flg = 0;
p_ramrod->common.update_tpa_param_flg = 0;
return;
}
p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
p_tpa = &p_ramrod->tpa_param;
p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
p_tpa->tpa_max_size = p_params->tpa_max_size;
p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
}
static void static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod, struct vport_update_ramrod_data *p_ramrod,
...@@ -370,8 +320,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, ...@@ -370,8 +320,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
} }
} }
static int int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params, struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data) struct qed_spq_comp_cb *p_comp_data)
...@@ -381,9 +330,14 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -381,9 +330,14 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL; struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
u8 abs_vport_id = 0; u8 abs_vport_id = 0, val;
int rc = -EINVAL; int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev)) {
rc = qed_vf_pf_vport_update(p_hwfn, p_params);
return rc;
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -412,6 +366,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -412,6 +366,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan; p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg = p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg; p_params->update_accept_any_vlan_flg;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
p_cmn->update_inner_vlan_removal_en_flg = val;
p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
val = p_params->update_default_vlan_enable_flg;
p_cmn->update_default_vlan_en_flg = val;
p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
val = p_params->update_anti_spoofing_en_flg;
p_ramrod->common.update_anti_spoofing_en_flg = val;
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) { if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/ /* Return spq entry which is taken in qed_sp_init_request()*/
...@@ -423,12 +398,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -423,12 +398,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
u16 opaque_fid,
u8 vport_id)
{ {
struct vport_stop_ramrod_data *p_ramrod; struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
...@@ -436,6 +410,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, ...@@ -436,6 +410,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
int rc; int rc;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -457,6 +434,19 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, ...@@ -457,6 +434,19 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
static int
qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
struct qed_filter_accept_flags *p_accept_flags)
{
struct qed_sp_vport_update_params s_params;
memset(&s_params, 0, sizeof(s_params));
memcpy(&s_params.accept_flags, p_accept_flags,
sizeof(struct qed_filter_accept_flags));
return qed_vf_pf_vport_update(p_hwfn, &s_params);
}
static int qed_filter_accept_cmd(struct qed_dev *cdev, static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport, u8 vport,
struct qed_filter_accept_flags accept_flags, struct qed_filter_accept_flags accept_flags,
...@@ -480,6 +470,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, ...@@ -480,6 +470,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (IS_VF(cdev)) {
rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
if (rc)
return rc;
continue;
}
rc = qed_sp_vport_update(p_hwfn, &vport_update_params, rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data); comp_mode, p_comp_data);
if (rc != 0) { if (rc != 0) {
...@@ -514,16 +511,14 @@ static int qed_sp_release_queue_cid( ...@@ -514,16 +511,14 @@ static int qed_sp_release_queue_cid(
return 0; return 0;
} }
static int int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
u32 cid, u32 cid,
struct qed_queue_start_common_params *params, struct qed_queue_start_common_params *params,
u8 stats_id, u8 stats_id,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
u16 cqe_pbl_size)
{ {
struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -592,8 +587,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -592,8 +587,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, u16 cqe_pbl_size, void __iomem **pp_prod)
void __iomem **pp_prod)
{ {
struct qed_hw_cid_data *p_rx_cid; struct qed_hw_cid_data *p_rx_cid;
u64 init_prod_val = 0; u64 init_prod_val = 0;
...@@ -601,6 +595,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -601,6 +595,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u8 abs_stats_id = 0; u8 abs_stats_id = 0;
int rc; int rc;
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
params->queue_id,
params->sb,
params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -643,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -643,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct rx_queue_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_rx_cid;
u16 qid, abs_rx_q_id = 0;
int rc = -EINVAL;
u8 i;
memset(&init_data, 0, sizeof(init_data));
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
qid = rx_queue_id + i;
p_rx_cid = &p_hwfn->p_rx_cids[qid];
/* Get SPQ entry */
init_data.cid = p_rx_cid->cid;
init_data.opaque_fid = p_rx_cid->opaque_fid;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
return rc;
}
return rc;
}
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id, u16 rx_queue_id,
bool eq_completion_only, bool eq_completion_only, bool cqe_completion)
bool cqe_completion)
{ {
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL; struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
...@@ -655,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, ...@@ -655,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0; u16 abs_rx_q_id = 0;
int rc = -EINVAL; int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid; init_data.cid = p_rx_cid->cid;
...@@ -690,8 +746,7 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, ...@@ -690,8 +746,7 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
} }
static int int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
u32 cid, u32 cid,
struct qed_queue_start_common_params *p_params, struct qed_queue_start_common_params *p_params,
...@@ -752,14 +807,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -752,14 +807,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
struct qed_queue_start_common_params *p_params, struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr, dma_addr_t pbl_addr,
u16 pbl_size, u16 pbl_size, void __iomem **pp_doorbell)
void __iomem **pp_doorbell)
{ {
struct qed_hw_cid_data *p_tx_cid; struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params; union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0; u8 abs_stats_id = 0;
int rc; int rc;
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_txq_start(p_hwfn,
p_params->queue_id,
p_params->sb,
p_params->sb_idx,
pbl_addr, pbl_size, pp_doorbell);
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc) if (rc)
return rc; return rc;
...@@ -800,14 +862,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -800,14 +862,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
u16 tx_queue_id)
{ {
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid; init_data.cid = p_tx_cid->cid;
...@@ -1003,7 +1067,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, ...@@ -1003,7 +1067,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd, struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode, enum spq_mode comp_mode,
...@@ -1105,7 +1169,7 @@ static inline u32 qed_crc32c_le(u32 seed, ...@@ -1105,7 +1169,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
} }
static u8 qed_mcast_bin_from_mac(u8 *mac) u8 qed_mcast_bin_from_mac(u8 *mac)
{ {
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN); mac, ETH_ALEN);
...@@ -1188,8 +1252,7 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, ...@@ -1188,8 +1252,7 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
static int static int qed_filter_mcast_cmd(struct qed_dev *cdev,
qed_filter_mcast_cmd(struct qed_dev *cdev,
struct qed_filter_mcast *p_filter_cmd, struct qed_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data) struct qed_spq_comp_cb *p_comp_data)
...@@ -1208,8 +1271,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev, ...@@ -1208,8 +1271,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid; u16 opaque_fid;
if (rc != 0) if (IS_VF(cdev)) {
break; qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid;
...@@ -1234,8 +1299,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, ...@@ -1234,8 +1299,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid; u16 opaque_fid;
if (rc != 0) if (IS_VF(cdev)) {
break; rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
continue;
}
opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid;
...@@ -1244,6 +1311,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, ...@@ -1244,6 +1311,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd, p_filter_cmd,
comp_mode, comp_mode,
p_comp_data); p_comp_data);
if (rc != 0)
break;
} }
return rc; return rc;
...@@ -1252,12 +1321,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, ...@@ -1252,12 +1321,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */ /* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr, u32 *p_addr,
u32 *p_len, u32 *p_len, u16 statistics_bin)
u16 statistics_bin)
{ {
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_PSDM_RAM + *p_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(statistics_bin); PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_pstorm_per_queue_stat); *p_len = sizeof(struct eth_pstorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
*p_len = p_resp->pfdev_info.stats_info.pstats.len;
}
} }
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
...@@ -1272,32 +1348,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, ...@@ -1272,32 +1348,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin); statistics_bin);
memset(&pstats, 0, sizeof(pstats)); memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
pstats_addr, pstats_len);
p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
p_stats->tx_ucast_bytes += p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
HILO_64_REGPAIR(pstats.sent_ucast_bytes); p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
p_stats->tx_mcast_bytes += p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
HILO_64_REGPAIR(pstats.sent_mcast_bytes); p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->tx_bcast_bytes += p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
HILO_64_REGPAIR(pstats.sent_bcast_bytes); p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
p_stats->tx_ucast_pkts +=
HILO_64_REGPAIR(pstats.sent_ucast_pkts);
p_stats->tx_mcast_pkts +=
HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->tx_bcast_pkts +=
HILO_64_REGPAIR(pstats.sent_bcast_pkts);
p_stats->tx_err_drop_pkts +=
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len)
{
*p_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
*p_len = sizeof(struct tstorm_per_port_stat);
} }
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
...@@ -1305,14 +1364,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, ...@@ -1305,14 +1364,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats, struct qed_eth_stats *p_stats,
u16 statistics_bin) u16 statistics_bin)
{ {
u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats; struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
__qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); if (IS_PF(p_hwfn->cdev)) {
tstats_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
tstats_len = sizeof(struct tstorm_per_port_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
}
memset(&tstats, 0, sizeof(tstats)); memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
tstats_addr, tstats_len);
p_stats->mftag_filter_discards += p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard); HILO_64_REGPAIR(tstats.mftag_filter_discard);
...@@ -1322,12 +1390,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, ...@@ -1322,12 +1390,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr, u32 *p_addr,
u32 *p_len, u32 *p_len, u16 statistics_bin)
u16 statistics_bin)
{ {
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_USDM_RAM + *p_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(statistics_bin); USTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_ustorm_per_queue_stat); *p_len = sizeof(struct eth_ustorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
*p_len = p_resp->pfdev_info.stats_info.ustats.len;
}
} }
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
...@@ -1342,31 +1417,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, ...@@ -1342,31 +1417,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin); statistics_bin);
memset(&ustats, 0, sizeof(ustats)); memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
ustats_addr, ustats_len);
p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
p_stats->rx_ucast_bytes += p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
HILO_64_REGPAIR(ustats.rcv_ucast_bytes); p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->rx_mcast_bytes += p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
HILO_64_REGPAIR(ustats.rcv_mcast_bytes); p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->rx_bcast_bytes += p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->rx_ucast_pkts +=
HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
p_stats->rx_mcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->rx_bcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
} }
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr, u32 *p_addr,
u32 *p_len, u32 *p_len, u16 statistics_bin)
u16 statistics_bin)
{ {
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_MSDM_RAM + *p_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(statistics_bin); MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_mstorm_per_queue_stat); *p_len = sizeof(struct eth_mstorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
*p_len = p_resp->pfdev_info.stats_info.mstats.len;
}
} }
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
...@@ -1381,21 +1456,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, ...@@ -1381,21 +1456,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin); statistics_bin);
memset(&mstats, 0, sizeof(mstats)); memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
mstats_addr, mstats_len);
p_stats->no_buff_discards += p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard += p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard); HILO_64_REGPAIR(mstats.packet_too_big_discard);
p_stats->ttl0_discard += p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts += p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events += p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events); HILO_64_REGPAIR(mstats.tpa_coalesced_events);
p_stats->tpa_aborts_num += p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes += p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
} }
...@@ -1468,14 +1539,14 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, ...@@ -1468,14 +1539,14 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_eth_stats *stats, struct qed_eth_stats *stats,
u16 statistics_bin) u16 statistics_bin, bool b_get_port_stats)
{ {
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
if (p_hwfn->mcp_info) if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats); __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
} }
...@@ -1489,22 +1560,27 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, ...@@ -1489,22 +1560,27 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
if (IS_PF(cdev)) {
/* The main vport index is relative first */ /* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
DP_ERR(p_hwfn, "No vport available!\n"); DP_ERR(p_hwfn, "No vport available!\n");
continue; goto out;
}
} }
p_ptt = qed_ptt_acquire(p_hwfn); if (IS_PF(cdev) && !p_ptt) {
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n"); DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue; continue;
} }
__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
IS_PF(cdev) ? true : false);
out:
if (IS_PF(cdev) && p_ptt)
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
} }
} }
...@@ -1539,10 +1615,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev) ...@@ -1539,10 +1615,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats; struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats; struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats; struct eth_pstorm_per_queue_stat pstats;
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
u32 addr = 0, len = 0; u32 addr = 0, len = 0;
if (!p_ptt) { if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n"); DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue; continue;
} }
...@@ -1559,6 +1636,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev) ...@@ -1559,6 +1636,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
if (IS_PF(cdev))
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
} }
...@@ -1580,12 +1658,14 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1580,12 +1658,14 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1; info->num_tc = 1;
if (IS_PF(cdev)) {
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
info->num_queues += FEAT_NUM(&cdev->hwfns[i], info->num_queues +=
QED_PF_L2_QUE); FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
if (cdev->int_params.fp_msix_cnt) if (cdev->int_params.fp_msix_cnt)
info->num_queues = min_t(u8, info->num_queues, info->num_queues =
min_t(u8, info->num_queues,
cdev->int_params.fp_msix_cnt); cdev->int_params.fp_msix_cnt);
} else { } else {
info->num_queues = cdev->num_hwfns; info->num_queues = cdev->num_hwfns;
...@@ -1594,18 +1674,45 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1594,18 +1674,45 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
ether_addr_copy(info->port_mac, ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr); cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
if (cdev->num_hwfns > 1) {
u8 queues = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
info->num_queues += queues;
}
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
}
qed_fill_dev_info(cdev, &info->common); qed_fill_dev_info(cdev, &info->common);
if (IS_VF(cdev))
memset(info->common.hw_mac, 0, ETH_ALEN);
return 0; return 0;
} }
static void qed_register_eth_ops(struct qed_dev *cdev, static void qed_register_eth_ops(struct qed_dev *cdev,
struct qed_eth_cb_ops *ops, struct qed_eth_cb_ops *ops, void *cookie)
void *cookie)
{ {
cdev->protocol_ops.eth = ops; cdev->protocol_ops.eth = ops;
cdev->ops_cookie = cookie; cdev->ops_cookie = cookie;
/* For VF, we start bulletin reading */
if (IS_VF(cdev))
qed_vf_start_iov_wq(cdev);
}
static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
{
if (IS_PF(cdev))
return true;
return qed_vf_check_mac(&cdev->hwfns[0], mac);
} }
static int qed_start_vport(struct qed_dev *cdev, static int qed_start_vport(struct qed_dev *cdev,
...@@ -1620,6 +1727,7 @@ static int qed_start_vport(struct qed_dev *cdev, ...@@ -1620,6 +1727,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE; QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan; start.remove_inner_vlan = params->remove_inner_vlan;
start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0; start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid;
...@@ -1686,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev, ...@@ -1686,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg; params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan; sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg = sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg; params->update_accept_any_vlan_flg;
...@@ -1890,6 +2000,9 @@ static int qed_tunn_configure(struct qed_dev *cdev, ...@@ -1890,6 +2000,9 @@ static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_update_params tunn_info; struct qed_tunn_update_params tunn_info;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) { if (tunn_params->update_vxlan_port == 1) {
tunn_info.update_vxlan_udp_port = 1; tunn_info.update_vxlan_udp_port = 1;
...@@ -2041,10 +2154,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, ...@@ -2041,10 +2154,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe); cqe);
} }
#ifdef CONFIG_QED_SRIOV
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
static const struct qed_eth_ops qed_eth_ops_pass = { static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass, .common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
.iov = &qed_iov_ops_pass,
#endif
.fill_dev_info = &qed_fill_eth_dev_info, .fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops, .register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
.vport_start = &qed_start_vport, .vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport, .vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport, .vport_update = &qed_update_vport,
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_L2_H
#define _QED_L2_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_eth_if.h>
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
struct qed_sge_tpa_params {
u8 max_buffers_per_cqe;
u8 update_tpa_en_flg;
u8 tpa_ipv4_en_flg;
u8 tpa_ipv6_en_flg;
u8 tpa_ipv4_tunn_en_flg;
u8 tpa_ipv6_tunn_en_flg;
u8 update_tpa_param_flg;
u8 tpa_pkt_split_flg;
u8 tpa_hdr_data_split_flg;
u8 tpa_gro_consistent_flg;
u8 tpa_max_aggs_num;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
};
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only, bool cqe_completion);
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool tx_switching;
bool only_untagged;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_inner_vlan_removal_flg;
u8 inner_vlan_removal_flg;
u8 silent_vlan_removal_flg;
u8 update_default_vlan_enable_flg;
u8 default_vlan_enable_flg;
u8 update_default_vlan_flg;
u16 default_vlan;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_approx_mcast_flg;
u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_vport_stop -
*
* This ramrod closes a VPort after all its RX and TX queues are terminated.
* An Assert is generated if any queues are left open.
*
* @param p_hwfn
* @param opaque_fid
* @param vport_id VPort ID
*
* @return int
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_rx_eth_queues_update -
*
* This ramrod updates an RX queue. It is used for setting the active state
* of the queue and updating the TPA and SGE parameters.
*
* @note At the moment - only used by non-linux VFs.
*
* @param p_hwfn
* @param rx_queue_id RX Queue ID
* @param num_rxqs Allow to update multiple rx
* queues, from rx_queue_id to
* (rx_queue_id + num_rxqs)
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
*
* @return int
*/
int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params);
u8 qed_mcast_bin_from_mac(u8 *mac);
#endif /* _QED_L2_H */
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include "qed.h" #include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_dev_api.h" #include "qed_dev_api.h"
#include "qed_mcp.h" #include "qed_mcp.h"
...@@ -125,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -125,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1; goto err1;
} }
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n"); DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO; rc = -EIO;
goto err1; goto err1;
...@@ -175,6 +176,7 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -175,6 +176,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2; goto err2;
} }
if (IS_PF(cdev)) {
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2); cdev->db_size = pci_resource_len(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
...@@ -182,6 +184,7 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -182,6 +184,7 @@ static int qed_init_pci(struct qed_dev *cdev,
DP_NOTICE(cdev, "Cannot map doorbell space\n"); DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM; return -ENOMEM;
} }
}
return 0; return 0;
...@@ -207,21 +210,34 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -207,21 +210,34 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION; dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode; dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
&dev_info->fw_eng);
}
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); if (IS_PF(cdev)) {
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) { if (ptt) {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size); &dev_info->flash_size);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
} }
} else {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
&dev_info->mfw_rev, NULL);
}
return 0; return 0;
} }
...@@ -257,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev, ...@@ -257,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */ /* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev, static struct qed_dev *qed_probe(struct pci_dev *pdev,
enum qed_protocol protocol, struct qed_probe_params *params)
u32 dp_module,
u8 dp_level)
{ {
struct qed_dev *cdev; struct qed_dev *cdev;
int rc; int rc;
...@@ -268,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, ...@@ -268,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev) if (!cdev)
goto err0; goto err0;
cdev->protocol = protocol; cdev->protocol = params->protocol;
if (params->is_vf)
cdev->b_is_vf = true;
qed_init_dp(cdev, dp_module, dp_level); qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev); rc = qed_init_pci(cdev, pdev);
if (rc) { if (rc) {
...@@ -664,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -664,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0; return 0;
} }
static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
{
int rc;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
&cdev->int_params.in.num_vectors);
if (cdev->num_hwfns > 1) {
u8 vectors = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
cdev->int_params.in.num_vectors += vectors;
}
/* We want a minimum of one fastpath vector per vf hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
rc = qed_set_int_mode(cdev, true);
if (rc)
return rc;
cdev->int_params.fp_msix_base = 0;
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
return 0;
}
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf) u8 *input_buf, u32 max_size, u8 *unzip_buf)
{ {
...@@ -749,8 +795,12 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -749,8 +795,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
const u8 *data = NULL; const u8 *data = NULL;
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;
int rc; int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
goto err;
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev); &cdev->pdev->dev);
if (rc) { if (rc) {
...@@ -759,15 +809,20 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -759,15 +809,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
QED_FW_FILE_NAME); QED_FW_FILE_NAME);
goto err; goto err;
} }
}
rc = qed_nic_setup(cdev); rc = qed_nic_setup(cdev);
if (rc) if (rc)
goto err; goto err;
if (IS_PF(cdev))
rc = qed_slowpath_setup_int(cdev, params->int_mode); rc = qed_slowpath_setup_int(cdev, params->int_mode);
else
rc = qed_slowpath_vf_setup_int(cdev);
if (rc) if (rc)
goto err1; goto err1;
if (IS_PF(cdev)) {
/* Allocate stream for unzipping */ /* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev); rc = qed_alloc_stream_mem(cdev);
if (rc) { if (rc) {
...@@ -775,8 +830,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -775,8 +830,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err2; goto err2;
} }
/* Start the slowpath */
data = cdev->firmware->data; data = cdev->firmware->data;
}
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
...@@ -789,6 +844,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -789,6 +844,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true, rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode, cdev->int_params.out.int_mode,
true, data); true, data);
...@@ -798,6 +854,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -798,6 +854,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev, DP_INFO(cdev,
"HW initialization and function start completed successfully\n"); "HW initialization and function start completed successfully\n");
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev); hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) | drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) | (params->drv_minor << 16) |
...@@ -811,6 +868,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -811,6 +868,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_NOTICE(cdev, "Failed sending drv version command\n"); DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc; return rc;
} }
}
qed_reset_vport_stats(cdev); qed_reset_vport_stats(cdev);
...@@ -818,14 +876,18 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -818,14 +876,18 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2: err2:
qed_hw_timers_stop_all(cdev); qed_hw_timers_stop_all(cdev);
if (IS_PF(cdev))
qed_slowpath_irq_free(cdev); qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev); qed_free_stream_mem(cdev);
qed_disable_msix(cdev); qed_disable_msix(cdev);
err1: err1:
qed_resc_free(cdev); qed_resc_free(cdev);
err: err:
if (IS_PF(cdev))
release_firmware(cdev->firmware); release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, false);
return rc; return rc;
} }
...@@ -834,14 +896,20 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -834,14 +896,20 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev); qed_free_stream_mem(cdev);
qed_sriov_disable(cdev, true);
qed_nic_stop(cdev); qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev); qed_slowpath_irq_free(cdev);
}
qed_disable_msix(cdev); qed_disable_msix(cdev);
qed_nic_reset(cdev); qed_nic_reset(cdev);
qed_iov_wq_stop(cdev, true);
if (IS_PF(cdev))
release_firmware(cdev->firmware); release_firmware(cdev->firmware);
return 0; return 0;
...@@ -932,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev, ...@@ -932,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (IS_VF(cdev))
return 0;
/* The link should be set only once per PF */ /* The link should be set only once per PF */
hwfn = &cdev->hwfns[0]; hwfn = &cdev->hwfns[0];
...@@ -1043,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1043,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link)); memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */ /* Prepare source inputs */
if (IS_PF(hwfn->cdev)) {
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params)); memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps)); sizeof(link_caps));
} else {
qed_vf_get_link_params(hwfn, &params);
qed_vf_get_link_state(hwfn, &link);
qed_vf_get_link_caps(hwfn, &link_caps);
}
/* Set the link parameters to pass to protocol driver */ /* Set the link parameters to pass to protocol driver */
if (link.link_up) if (link.link_up)
...@@ -1148,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1148,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev, static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link) struct qed_link_output *if_link)
{ {
int i;
qed_fill_link(&cdev->hwfns[0], if_link); qed_fill_link(&cdev->hwfns[0], if_link);
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
} }
void qed_link_update(struct qed_hwfn *hwfn) void qed_link_update(struct qed_hwfn *hwfn)
...@@ -1158,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn) ...@@ -1158,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link; struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link); qed_fill_link(hwfn, &if_link);
qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie) if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link); op->link_update(cookie, &if_link);
...@@ -1169,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev) ...@@ -1169,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt; struct qed_ptt *ptt;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i]; hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn); ptt = qed_ptt_acquire(hwfn);
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10 #define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
...@@ -440,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -440,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PATH);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
QED_PATH_ID(p_hwfn));
u32 disabled_vfs[VF_MAX_STATIC / 32];
int i;
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Reading Disabled VF information from [offset %08x], path_addr %08x\n",
mfw_path_offsize, path_addr);
for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
path_addr +
offsetof(struct public_path,
mcp_vf_disabled) +
sizeof(u32) * i);
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"FLR-ed VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
}
if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
}
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc;
int i;
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
mb_params.p_data_src = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
return -EBUSY;
}
/* Clear the ACK bits */
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
qed_wr(p_hwfn, p_ptt,
func_addr +
offsetof(struct public_func, drv_ack_vf_disabled) +
i * sizeof(u32), 0);
return rc;
}
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
...@@ -751,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -751,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE: case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false); qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break; break;
case MFW_DRV_MSG_VF_DISABLED:
qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break; break;
...@@ -787,26 +861,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -787,26 +861,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *p_mfw_ver) struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{ {
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
u32 global_offsize; u32 global_offsize;
p_ptt = qed_ptt_acquire(p_hwfn); if (IS_VF(p_hwfn->cdev)) {
if (!p_ptt) if (p_hwfn->vf_iov_info) {
return -EBUSY; struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return 0;
} else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF requested MFW version prior to ACQUIRE\n");
return -EINVAL;
}
}
global_offsize = qed_rd(p_hwfn, p_ptt, global_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> SECTION_OFFSIZE_ADDR(p_hwfn->
public_base, mcp_info->public_base,
PUBLIC_GLOBAL)); PUBLIC_GLOBAL));
*p_mfw_ver = qed_rd(p_hwfn, p_ptt, *p_mfw_ver =
SECTION_ADDR(global_offsize, 0) + qed_rd(p_hwfn, p_ptt,
offsetof(struct public_global, mfw_ver)); SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
qed_ptt_release(p_hwfn, p_ptt); if (p_running_bundle_id != NULL) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
running_bundle_id));
}
return 0; return 0;
} }
...@@ -817,6 +907,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, ...@@ -817,6 +907,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
if (IS_VF(cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY; return -EBUSY;
...@@ -951,6 +1044,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -951,6 +1044,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{ {
u32 flash_size; u32 flash_size;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
...@@ -961,6 +1057,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -961,6 +1057,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
/* Only Leader can configure MSIX, and need to take CMT into account */
if (!IS_LEAD_HWFN(p_hwfn))
return 0;
num *= p_hwfn->cdev->num_hwfns;
param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
&resp, &rc_param);
if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
rc = -EINVAL;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
num, vf_id);
}
return rc;
}
int int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, ...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/** /**
* @brief Get the management firmware version value * @brief Get the management firmware version value
* *
* @param cdev - qed dev pointer * @param p_hwfn
* @param mfw_ver - mfw version value * @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
* *
* @return int - 0 - operation was successul. * @return int - 0 - operation was successful.
*/ */
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *mfw_ver); struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/** /**
* @brief Get media type value of the port. * @brief Get media type value of the port.
...@@ -389,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -389,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
*
* @param p_hwfn
* @param p_ptt
* @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
*
* @param return int - 0 upon success.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/** /**
* @brief - calls during init to read shmem of all function-related info. * @brief - calls during init to read shmem of all function-related info.
* *
...@@ -418,6 +433,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, ...@@ -418,6 +433,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
* @return true iff MFW is running and mcp_info is initialized * @return true iff MFW is running and mcp_info is initialized
*/ */
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return int
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
0x2aae04UL 0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL 0x2aa16cUL
#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
0x2aa118UL
#define PSWHST_REG_ZONE_PERMISSION_TABLE \
0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \ #define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL 0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \ #define BAR0_MAP_REG_USDM_RAM \
...@@ -77,6 +81,8 @@ ...@@ -77,6 +81,8 @@
0x2f2eb0UL 0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \ #define DORQ_REG_PF_DB_ENABLE \
0x100508UL 0x100508UL
#define DORQ_REG_VF_USAGE_CNT \
0x1009c4UL
#define QM_REG_PF_EN \ #define QM_REG_PF_EN \
0x2f2ea4UL 0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \ #define TCFC_REG_STRONG_ENABLE_PF \
...@@ -111,6 +117,8 @@ ...@@ -111,6 +117,8 @@
0x009778UL 0x009778UL
#define MISCS_REG_CHIP_METAL \ #define MISCS_REG_CHIP_METAL \
0x009774UL 0x009774UL
#define MISCS_REG_FUNCTION_HIDE \
0x0096f0UL
#define BRB_REG_HEADER_SIZE \ #define BRB_REG_HEADER_SIZE \
0x340804UL 0x340804UL
#define BTB_REG_HEADER_SIZE \ #define BTB_REG_HEADER_SIZE \
...@@ -119,6 +127,8 @@ ...@@ -119,6 +127,8 @@
0x1c0708UL 0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \ #define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL 0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \ #define CDU_REG_CID_ADDR_PARAMS \
0x580900UL 0x580900UL
#define DBG_REG_CLIENT_ENABLE \ #define DBG_REG_CLIENT_ENABLE \
...@@ -161,6 +171,10 @@ ...@@ -161,6 +171,10 @@
0x040200UL 0x040200UL
#define PBF_REG_INIT \ #define PBF_REG_INIT \
0xd80000UL 0xd80000UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
0xd806c8UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \ #define PTU_REG_ATC_INIT_ARRAY \
0x560000UL 0x560000UL
#define PCM_REG_INIT \ #define PCM_REG_INIT \
...@@ -385,6 +399,8 @@ ...@@ -385,6 +399,8 @@
0x1d0000UL 0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \ #define IGU_REG_PF_CONFIGURATION \
0x180800UL 0x180800UL
#define IGU_REG_VF_CONFIGURATION \
0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL 0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \ #define MISC_REG_AEU_AFTER_INVERT_1_IGU \
...@@ -411,6 +427,8 @@ ...@@ -411,6 +427,8 @@
0x1 << 0) 0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \ #define IGU_REG_MAPPING_MEMORY \
0x184000UL 0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
0x180408UL
#define MISCS_REG_GENERIC_POR_0 \ #define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL 0x0096d4UL
#define MCP_REG_NVM_CFG4 \ #define MCP_REG_NVM_CFG4 \
......
...@@ -62,6 +62,9 @@ union ramrod_data { ...@@ -62,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop; struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update; struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update; struct vport_filter_update_ramrod_data vport_filter_update;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
}; };
#define EQ_MAX_CREDIT 0xffffffff #define EQ_MAX_CREDIT 0xffffffff
...@@ -341,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -341,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_tunn * @param p_tunn
* @param mode * @param mode
* @param allow_npar_tx_switch
* *
* @return int * @return int
*/ */
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode); enum qed_mf_mode mode, bool allow_npar_tx_switch);
/** /**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod * @brief qed_sp_pf_stop - PF Function Stop Ramrod
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "qed_int.h" #include "qed_int.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
...@@ -298,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, ...@@ -298,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode) enum qed_mf_mode mode, bool allow_npar_tx_switch)
{ {
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn); u16 sb = qed_int_get_sp_sb_id(p_hwfn);
...@@ -357,6 +358,16 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -357,6 +358,16 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
&p_ramrod->tunnel_config); &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH; p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, sb, sb_index,
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
/*************************************************************************** /***************************************************************************
* Structures & Definitions * Structures & Definitions
...@@ -242,10 +243,17 @@ static int ...@@ -242,10 +243,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
switch (p_eqe->protocol_id) {
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
default:
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n", "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id); p_eqe->protocol_id);
return -EINVAL; return -EINVAL;
}
} }
/*************************************************************************** /***************************************************************************
...@@ -379,6 +387,9 @@ static int qed_cqe_completion( ...@@ -379,6 +387,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe, struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol) enum protocol_type protocol)
{ {
if (IS_VF(p_hwfn->cdev))
return 0;
/* @@@tmp - it's possible we'll eventually want to handle some /* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only * actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe * used to complete the ramrod using the echo value on the cqe
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
/* IOV ramrods */
static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid)
{
struct vf_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = opaque_vfid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_VF_START,
PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.vf_start;
p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
p_ramrod->personality = PERSONALITY_ETH;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid)
{
struct vf_stop_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = opaque_vfid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_VF_STOP,
PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.vf_stop;
p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id, bool b_enabled_only)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
return false;
}
if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
(rel_vf_id < 0))
return false;
if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
b_enabled_only)
return false;
return true;
}
static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
return NULL;
}
if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
relative_vf_id);
return vf;
}
int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
struct qed_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
struct qed_dmae_params params;
struct qed_vf_info *p_vf;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf)
return -EINVAL;
if (!p_vf->vf_bulletin)
return -EINVAL;
p_bulletin = p_vf->bulletin.p_virt;
/* Increment bulletin board version and compute crc */
p_bulletin->version++;
p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
p_vf->bulletin.size - crc_size);
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
/* propagate bulletin board via dmae to vm memory */
memset(&params, 0, sizeof(params));
params.flags = QED_DMAE_FLAG_VF_DST;
params.dst_vfid = p_vf->abs_vf_id;
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
&params);
}
static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
{
struct qed_hw_sriov_info *iov = cdev->p_iov_info;
int pos = iov->pos;
DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
pci_read_config_word(cdev->pdev,
pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
pci_read_config_word(cdev->pdev,
pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
if (iov->num_vfs) {
DP_VERBOSE(cdev,
QED_MSG_IOV,
"Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
iov->num_vfs = 0;
}
pci_read_config_word(cdev->pdev,
pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
pci_read_config_word(cdev->pdev,
pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
pci_read_config_word(cdev->pdev,
pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
pci_read_config_dword(cdev->pdev,
pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
DP_VERBOSE(cdev,
QED_MSG_IOV,
"IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
iov->nres,
iov->cap,
iov->ctrl,
iov->total_vfs,
iov->initial_vfs,
iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
/* Some sanity checks */
if (iov->num_vfs > NUM_OF_VFS(cdev) ||
iov->total_vfs > NUM_OF_VFS(cdev)) {
/* This can happen only due to a bug. In this case we set
* num_vfs to zero to avoid memory corruption in the code that
* assumes max number of vfs
*/
DP_NOTICE(cdev,
"IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
iov->num_vfs);
iov->num_vfs = 0;
iov->total_vfs = 0;
}
return 0;
}
static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_block *p_sb;
u16 sb_id;
u32 val;
if (!p_hwfn->hw_info.p_igu_info) {
DP_ERR(p_hwfn,
"qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
return;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
if ((p_sb->status & QED_IGU_STATUS_FREE) &&
!(p_sb->status & QED_IGU_STATUS_PF)) {
val = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sb_id * 4);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
}
}
}
static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
{
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
struct qed_bulletin_content *p_bulletin_virt;
dma_addr_t req_p, rply_p, bulletin_p;
union pfvf_tlvs *p_reply_virt_addr;
union vfpf_tlvs *p_req_virt_addr;
u8 idx = 0;
memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
req_p = p_iov_info->mbx_msg_phys_addr;
p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
rply_p = p_iov_info->mbx_reply_phys_addr;
p_bulletin_virt = p_iov_info->p_bulletins;
bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn,
"qed_iov_setup_vfdb called without allocating mem first\n");
return;
}
for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
vf->vf_mbx.req_virt = p_req_virt_addr + idx;
vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
vf->state = VF_STOPPED;
vf->b_init = false;
vf->bulletin.phys = idx *
sizeof(struct qed_bulletin_content) +
bulletin_p;
vf->bulletin.p_virt = p_bulletin_virt + idx;
vf->bulletin.size = sizeof(struct qed_bulletin_content);
vf->relative_vf_id = idx;
vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
vf->concrete_fid = concrete;
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
vf->vport_id = idx + 1;
}
}
static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
{
struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
void **p_v_addr;
u16 num_vfs = 0;
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
p_v_addr = &p_iov_info->mbx_msg_virt_addr;
*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->mbx_msg_size,
&p_iov_info->mbx_msg_phys_addr,
GFP_KERNEL);
if (!*p_v_addr)
return -ENOMEM;
/* Allocate PF Mailbox Reply buffer (per-VF) */
p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
p_v_addr = &p_iov_info->mbx_reply_virt_addr;
*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->mbx_reply_size,
&p_iov_info->mbx_reply_phys_addr,
GFP_KERNEL);
if (!*p_v_addr)
return -ENOMEM;
p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
num_vfs;
p_v_addr = &p_iov_info->p_bulletins;
*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->bulletins_size,
&p_iov_info->bulletins_phys,
GFP_KERNEL);
if (!*p_v_addr)
return -ENOMEM;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr,
(u64) p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
(u64) p_iov_info->mbx_reply_phys_addr,
p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
return 0;
}
static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
{
struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->mbx_msg_size,
p_iov_info->mbx_msg_virt_addr,
p_iov_info->mbx_msg_phys_addr);
if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->mbx_reply_size,
p_iov_info->mbx_reply_virt_addr,
p_iov_info->mbx_reply_phys_addr);
if (p_iov_info->p_bulletins)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_iov_info->bulletins_size,
p_iov_info->p_bulletins,
p_iov_info->bulletins_phys);
}
int qed_iov_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_pf_iov *p_sriov;
if (!IS_PF_SRIOV(p_hwfn)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"No SR-IOV - no need for IOV db\n");
return 0;
}
p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
if (!p_sriov) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
return -ENOMEM;
}
p_hwfn->pf_iov_info = p_sriov;
return qed_iov_allocate_vfdb(p_hwfn);
}
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
qed_iov_setup_vfdb(p_hwfn);
qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void qed_iov_free(struct qed_hwfn *p_hwfn)
{
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info);
}
}
void qed_iov_free_hw_info(struct qed_dev *cdev)
{
kfree(cdev->p_iov_info);
cdev->p_iov_info = NULL;
}
int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
int pos;
int rc;
if (IS_VF(p_hwfn->cdev))
return 0;
/* Learn the PCI configuration */
pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
PCI_EXT_CAP_ID_SRIOV);
if (!pos) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
return 0;
}
/* Allocate a new struct for IOV information */
cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
if (!cdev->p_iov_info) {
DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
return -ENOMEM;
}
cdev->p_iov_info->pos = pos;
rc = qed_iov_pci_cfg_info(cdev);
if (rc)
return rc;
/* We want PF IOV to be synonemous with the existance of p_iov_info;
* In case the capability is published but there are no VFs, simply
* de-allocate the struct.
*/
if (!cdev->p_iov_info->total_vfs) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"IOV capabilities, but no VFs are published\n");
kfree(cdev->p_iov_info);
cdev->p_iov_info = NULL;
return 0;
}
/* Calculate the first VF index - this is a bit tricky; Basically,
* VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
* after the first engine's VFs.
*/
cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
p_hwfn->abs_pf_id - 16;
if (QED_PATH_ID(p_hwfn))
cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n",
cdev->p_iov_info->first_vf_in_pf);
return 0;
}
static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
/* Check PF supports sriov */
if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return false;
/* Check VF validity */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
!IS_PF_SRIOV_ALLOC(p_hwfn))
return false;
return true;
}
static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
u16 rel_vf_id, u8 to_disable)
{
struct qed_vf_info *vf;
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
if (!vf)
continue;
vf->to_disable = to_disable;
}
}
void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
{
u16 i;
if (!IS_QED_SRIOV(cdev))
return;
for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
qed_iov_set_vf_to_disable(cdev, i, to_disable);
}
static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 abs_vfid)
{
qed_wr(p_hwfn, p_ptt,
PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
1 << (abs_vfid & 0x1f));
}
static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_vf_info *vf)
{
u16 igu_sb_id;
int i;
/* Set VF masks and configuration - pretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"value in VF_CONFIGURATION of vf %d after write %x\n",
vf->abs_vf_id,
qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++) {
igu_sb_id = vf->igu_sbs[i];
/* Set then clear... */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
vf->opaque_fid);
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
vf->opaque_fid);
}
}
static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, bool enable)
{
u32 igu_vf_conf;
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
if (enable)
igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
else
igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
}
static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
int rc;
if (vf->to_disable)
return 0;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"Enable internal access for vf %x [abs %x]\n",
vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
p_hwfn->hw_info.hw_mode);
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
if (vf->state != VF_STOPPED) {
DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
vf->abs_vf_id);
return -EINVAL;
}
/* Start VF */
rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
if (rc)
DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
vf->state = VF_FREE;
return rc;
}
/**
* @brief qed_iov_config_perm_table - configure the permission
* zone table.
* In E4, queue zone permission table size is 320x9. There
* are 320 VF queues for single engine device (256 for dual
* engine device), and each entry has the following format:
* {Valid, VF[7:0]}
* @param p_hwfn
* @param p_ptt
* @param vf
* @param enable
*/
static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u8 enable)
{
u32 reg_addr, val;
u16 qzone_id = 0;
int qid;
for (qid = 0; qid < vf->num_rxqs; qid++) {
qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
/* Reset vf in IGU - interrupts are still disabled */
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
/* Permission Table */
qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
}
static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u16 num_rx_queues)
{
struct qed_igu_block *igu_blocks;
int qid = 0, igu_id = 0;
u32 val = 0;
igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
while ((qid < num_rx_queues) &&
(igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
struct cau_sb_entry sb_entry;
vf->igu_sbs[qid] = (u16)igu_id;
igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
val);
/* Configure igu sb in CAU which were marked valid */
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id,
vf->abs_vf_id, 1);
qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
igu_id * sizeof(u64), 2, 0);
qid++;
}
igu_id++;
}
vf->num_sbs = (u8) num_rx_queues;
return vf->num_sbs;
}
static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
int idx, igu_id;
u32 addr, val;
/* Invalidate igu CAM lines and mark them as free */
for (idx = 0; idx < vf->num_sbs; idx++) {
igu_id = vf->igu_sbs[idx];
addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
val = qed_rd(p_hwfn, p_ptt, addr);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt, addr, val);
p_info->igu_map.igu_blocks[igu_id].status |=
QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
}
vf->num_sbs = 0;
}
static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 rel_vf_id, u16 num_rx_queues)
{
u8 num_of_vf_avaiable_chains = 0;
struct qed_vf_info *vf = NULL;
int rc = 0;
u32 cids;
u8 i;
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
return -EINVAL;
}
if (vf->b_init) {
DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
return -EINVAL;
}
/* Limit number of queues according to number of CIDs */
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
vf->relative_vf_id, num_rx_queues, (u16) cids);
num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
num_rx_queues);
if (!num_of_vf_avaiable_chains) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return -ENOMEM;
}
/* Choose queue number and index ranges */
vf->num_rxqs = num_of_vf_avaiable_chains;
vf->num_txqs = num_of_vf_avaiable_chains;
for (i = 0; i < vf->num_rxqs; i++) {
u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
vf->igu_sbs[i]);
if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
DP_NOTICE(p_hwfn,
"VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
vf->relative_vf_id, queue_id);
return -EINVAL;
}
/* CIDs are per-VF, so no problem having them 0-based. */
vf->vf_queues[i].fw_rx_qid = queue_id;
vf->vf_queues[i].fw_tx_qid = queue_id;
vf->vf_queues[i].fw_cid = i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
}
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (!rc) {
vf->b_init = true;
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->p_iov_info->num_vfs++;
}
return rc;
}
static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 rel_vf_id)
{
struct qed_vf_info *vf = NULL;
int rc = 0;
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
return -EINVAL;
}
if (vf->bulletin.p_virt)
memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
if (vf->state != VF_STOPPED) {
/* Stopping the VF */
rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
if (rc != 0) {
DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
rc);
return rc;
}
vf->state = VF_STOPPED;
}
/* disablng interrupts and resetting permission table was done during
* vf-close, however, we could get here without going through vf_close
*/
/* Disable Interrupts for VF */
qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
/* Reset Permission table */
qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
vf->num_rxqs = 0;
vf->num_txqs = 0;
qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
if (vf->b_init) {
vf->b_init = false;
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->p_iov_info->num_vfs--;
}
return 0;
}
static bool qed_iov_tlv_supported(u16 tlvtype)
{
return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
tl->type = type;
tl->length = length;
/* Offset should keep pointing to next TLV (the end of the last) */
*offset += length;
/* Return a pointer to the start of the added tlv */
return *offset - length;
}
/* list the types and lengths of the tlvs on the buffer */
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
{
u16 i = 1, total_length = 0;
struct channel_tlv *tlv;
do {
tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
/* output tlv */
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"TLV number %d: type %d, length %d\n",
i, tlv->type, tlv->length);
if (tlv->type == CHANNEL_TLV_LIST_END)
return;
/* Validate entry - protect against malicious VFs */
if (!tlv->length) {
DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
return;
}
total_length += tlv->length;
if (total_length >= sizeof(struct tlv_buffer_size)) {
DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
return;
}
i++;
} while (1);
}
static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf,
u16 length, u8 status)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct qed_dmae_params params;
u8 eng_vf_id;
mbx->reply_virt->default_resp.hdr.status = status;
qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
eng_vf_id = p_vf->abs_vf_id;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = QED_DMAE_FLAG_VF_DST;
params.dst_vfid = eng_vf_id;
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
mbx->req_virt->first_tlv.reply_address +
sizeof(u64),
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
sizeof(u64) / 4, &params);
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
}
static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
enum qed_iov_vport_update_flag flag)
{
switch (flag) {
case QED_IOV_VP_UPDATE_ACTIVATE:
return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
case QED_IOV_VP_UPDATE_VLAN_STRIP:
return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
case QED_IOV_VP_UPDATE_TX_SWITCH:
return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
case QED_IOV_VP_UPDATE_MCAST:
return CHANNEL_TLV_VPORT_UPDATE_MCAST;
case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
case QED_IOV_VP_UPDATE_RSS:
return CHANNEL_TLV_VPORT_UPDATE_RSS;
case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
case QED_IOV_VP_UPDATE_SGE_TPA:
return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
default:
return 0;
}
}
static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf,
struct qed_iov_vf_mbx *p_mbx,
u8 status,
u16 tlvs_mask, u16 tlvs_accepted)
{
struct pfvf_def_resp_tlv *resp;
u16 size, total_len, i;
memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
p_mbx->offset = (u8 *)p_mbx->reply_virt;
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - vport_update response: TLV %d, status %02x\n",
p_vf->relative_vf_id,
qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
total_len += size;
}
qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
}
static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf_info,
u16 type, u16 length, u8 status)
{
struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
mbx->offset = (u8 *)mbx->reply_virt;
qed_add_tlv(p_hwfn, &mbx->offset, type, length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
if (!vf)
return NULL;
return &vf->p_vf_info;
}
void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
{
struct qed_public_vf_info *vf_info;
vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
if (!vf_info)
return;
/* Clear the VF mac */
memset(vf_info->mac, 0, ETH_ALEN);
}
static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
u32 i;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
p_vf->num_mac_filters = 0;
p_vf->num_vlan_filters = 0;
p_vf->configured_features = 0;
/* If VF previously requested less resources, go back to default */
p_vf->num_rxqs = p_vf->num_sbs;
p_vf->num_txqs = p_vf->num_sbs;
p_vf->num_active_rxqs = 0;
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
p_vf->vf_queues[i].rxq_active = 0;
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
struct pf_vf_resc *resc = &resp->resc;
/* Validate FW compatibility */
if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
DP_INFO(p_hwfn,
"VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
vf->abs_vf_id,
req->vfdev_info.fw_major,
req->vfdev_info.fw_minor,
req->vfdev_info.fw_revision,
req->vfdev_info.fw_engineering,
FW_MAJOR_VERSION,
FW_MINOR_VERSION,
FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
/* On 100g PFs, prevent old VFs from loading */
if ((p_hwfn->cdev->num_hwfns > 1) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support 100g\n",
vf->abs_vf_id);
vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
memset(resp, 0, sizeof(*resp));
/* Fill in vf info stuff */
vf->opaque_fid = req->vfdev_info.opaque_fid;
vf->num_mac_filters = 1;
vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
vf->vf_bulletin = req->bulletin_addr;
vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
vf->bulletin.size : req->bulletin_size;
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
pfdev_info->indices_per_sb = PIS_PER_SB;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
pfdev_info->stats_info.mstats.address =
PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
pfdev_info->stats_info.mstats.len =
sizeof(struct eth_mstorm_per_queue_stat);
pfdev_info->stats_info.ustats.address =
PXP_VF_BAR0_START_USDM_ZONE_B +
offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
pfdev_info->stats_info.ustats.len =
sizeof(struct eth_ustorm_per_queue_stat);
pfdev_info->stats_info.pstats.address =
PXP_VF_BAR0_START_PSDM_ZONE_B +
offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
pfdev_info->stats_info.pstats.len =
sizeof(struct eth_pstorm_per_queue_stat);
pfdev_info->stats_info.tstats.address = 0;
pfdev_info->stats_info.tstats.len = 0;
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
pfdev_info->fw_major = FW_MAJOR_VERSION;
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
pfdev_info->dev_type = p_hwfn->cdev->type;
pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
resc->num_rxqs = vf->num_rxqs;
resc->num_txqs = vf->num_txqs;
resc->num_sbs = vf->num_sbs;
for (i = 0; i < resc->num_sbs; i++) {
resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
resc->hw_sbs[i].sb_qid = 0;
}
for (i = 0; i < resc->num_rxqs; i++) {
qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
(u16 *)&resc->hw_qid[i]);
resc->cid[i] = vf->vf_queues[i].fw_cid;
}
resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
req->resc_request.num_mac_filters);
resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
req->resc_request.num_vlan_filters);
/* This isn't really required as VF isn't limited, but some VFs might
* actually test this value, so need to provide it.
*/
resc->num_mc_filters = req->resc_request.num_mc_filters;
/* Fill agreed size of bulletin board in response */
resp->bulletin_size = vf->bulletin.size;
qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
vf->abs_vf_id,
resp->pfdev_info.chip_num,
resp->pfdev_info.db_size,
resp->pfdev_info.indices_per_sb,
resp->pfdev_info.capabilities,
resc->num_rxqs,
resc->num_txqs,
resc->num_sbs,
resc->num_mac_filters,
resc->num_vlan_filters);
vf->state = VF_ACQUIRED;
/* Prepare Response */
out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
}
static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, bool val)
{
struct qed_sp_vport_update_params params;
int rc;
if (val == p_vf->spoof_chk) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Spoofchk value[%d] is already configured\n", val);
return 0;
}
memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
params.opaque_fid = p_vf->opaque_fid;
params.vport_id = p_vf->vport_id;
params.update_anti_spoofing_en_flg = 1;
params.anti_spoofing_en = val;
rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
if (rc) {
p_vf->spoof_chk = val;
p_vf->req_spoofchk_val = p_vf->spoof_chk;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Spoofchk val[%d] configured\n", val);
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Spoofchk configuration[val:%d] failed for VF[%d]\n",
val, p_vf->relative_vf_id);
}
return rc;
}
static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
struct qed_filter_ucast filter;
int rc = 0;
int i;
memset(&filter, 0, sizeof(filter));
filter.is_rx_filter = 1;
filter.is_tx_filter = 1;
filter.vport_to_add_to = p_vf->vport_id;
filter.opcode = QED_FILTER_ADD;
/* Reconfigure vlans */
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
if (!p_vf->shadow_config.vlans[i].used)
continue;
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
rc = qed_sp_eth_filter_ucast(p_hwfn,
p_vf->opaque_fid,
&filter,
QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
break;
}
}
return rc;
}
static int
qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, u64 events)
{
int rc = 0;
if ((events & (1 << VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
return rc;
}
static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, u64 events)
{
int rc = 0;
struct qed_filter_ucast filter;
if (!p_vf->vport_instance)
return -EINVAL;
if (events & (1 << MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
memset(&filter, 0, sizeof(filter));
filter.type = QED_FILTER_MAC;
filter.opcode = QED_FILTER_REPLACE;
filter.is_rx_filter = 1;
filter.is_tx_filter = 1;
filter.vport_to_add_to = p_vf->vport_id;
ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
&filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"PF failed to configure MAC for VF\n");
return rc;
}
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
memset(&filter, 0, sizeof(filter));
filter.type = QED_FILTER_VLAN;
filter.is_rx_filter = 1;
filter.is_tx_filter = 1;
filter.vport_to_add_to = p_vf->vport_id;
filter.vlan = p_vf->bulletin.p_virt->pvid;
filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
QED_FILTER_FLUSH;
/* Send the ramrod */
rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
&filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"PF failed to configure VLAN for VF\n");
return rc;
}
/* Update the default-vlan & silent vlan stripping */
memset(&vport_update, 0, sizeof(vport_update));
vport_update.opaque_fid = p_vf->opaque_fid;
vport_update.vport_id = p_vf->vport_id;
vport_update.update_default_vlan_enable_flg = 1;
vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
vport_update.update_default_vlan_flg = 1;
vport_update.default_vlan = filter.vlan;
vport_update.update_inner_vlan_removal_flg = 1;
removal = filter.vlan ? 1
: p_vf->shadow_config.inner_vlan_removal;
vport_update.inner_vlan_removal_flg = removal;
vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
rc = qed_sp_vport_update(p_hwfn,
&vport_update,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"PF failed to configure VF vport for vlan\n");
return rc;
}
/* Update all the Rx queues */
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
u16 qid;
if (!p_vf->vf_queues[i].rxq_active)
continue;
qid = p_vf->vf_queues[i].fw_rx_qid;
rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
1, 0, 1,
QED_SPQ_MODE_EBLOCK,
NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send Rx update fo queue[0x%04x]\n",
qid);
return rc;
}
}
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
* configuration back again.
*/
if (events)
qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
return rc;
}
static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_sp_vport_start_params params = { 0 };
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_vport_start_tlv *start;
u8 status = PFVF_STATUS_SUCCESS;
struct qed_vf_info *vf_info;
u64 *p_bitmap;
int sb_id;
int rc;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n",
vf->relative_vf_id);
return;
}
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] did not fill the address of SB %d\n",
vf->relative_vf_id, sb_id);
break;
}
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
/* Take into consideration configuration forced by hypervisor;
* If none is configured, use the supplied VF values [for old
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
}
params.tpa_mode = start->tpa_mode;
params.remove_inner_vlan = start->inner_vlan_removal;
params.tx_switching = true;
params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
params.drop_ttl0 = false;
params.concrete_fid = vf->concrete_fid;
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc != 0) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
} else {
vf->vport_instance++;
/* Force configuration if needed on the newly opened vport */
qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
}
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
sizeof(struct pfvf_def_resp_tlv), status);
}
static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u8 status = PFVF_STATUS_SUCCESS;
int rc;
vf->vport_instance--;
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != 0) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
}
/* Forget the configuration on the vport */
vf->configured_features = 0;
memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u8 status)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req;
mbx->offset = (u8 *)mbx->reply_virt;
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
sizeof(*p_tlv));
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if (status == PFVF_STATUS_SUCCESS) {
u16 hw_qid = 0;
req = &mbx->req_virt->start_rxq;
qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
&hw_qid);
p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
hw_qid * MSTORM_QZONE_SIZE +
offsetof(struct mstorm_eth_queue_zone,
rx_producers);
}
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_start_rxq_tlv *req;
int rc;
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_rxq;
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr, req->cqe_pbl_size);
if (rc) {
status = PFVF_STATUS_FAILURE;
} else {
vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
}
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
union qed_qm_pq_params pq_params;
u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_start_txq_tlv *req;
int rc;
/* Prepare the parameters which would choose the right PQ */
memset(&pq_params, 0, sizeof(pq_params));
pq_params.eth.is_vf = 1;
pq_params.eth.vf_id = vf->relative_vf_id;
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
vf->opaque_fid,
vf->vf_queues[req->tx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->pbl_addr,
req->pbl_size, &pq_params);
if (rc)
status = PFVF_STATUS_FAILURE;
else
vf->vf_queues[req->tx_qid].txq_active = true;
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
length, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
u16 rxq_id, u8 num_rxqs, bool cqe_completion)
{
int rc = 0;
int qid;
if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
return -EINVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
if (vf->vf_queues[qid].rxq_active) {
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
vf->vf_queues[qid].
fw_rx_qid, false,
cqe_completion);
if (rc)
return rc;
}
vf->vf_queues[qid].rxq_active = false;
vf->num_active_rxqs--;
}
return rc;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
{
int rc = 0;
int qid;
if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
return -EINVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
if (vf->vf_queues[qid].txq_active) {
rc = qed_sp_eth_tx_queue_stop(p_hwfn,
vf->vf_queues[qid].
fw_tx_qid);
if (rc)
return rc;
}
vf->vf_queues[qid].txq_active = false;
}
return rc;
}
static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_stop_rxqs_tlv *req;
int rc;
/* We give the option of starting from qid != 0, in this case we
* need to make sure that qid + num_qs doesn't exceed the actual
* amount of queues that exist.
*/
req = &mbx->req_virt->stop_rxqs;
rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
req->num_rxqs, req->cqe_completion);
if (rc)
status = PFVF_STATUS_FAILURE;
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_stop_txqs_tlv *req;
int rc;
/* We give the option of starting from qid != 0, in this case we
* need to make sure that qid + num_qs doesn't exceed the actual
* amount of queues that exist.
*/
req = &mbx->req_virt->stop_txqs;
rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
if (rc)
status = PFVF_STATUS_FAILURE;
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
u8 status = PFVF_STATUS_SUCCESS;
u8 complete_event_flg;
u8 complete_cqe_flg;
u16 qid;
int rc;
u8 i;
req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
for (i = 0; i < req->num_rxqs; i++) {
qid = req->rx_qid + i;
if (!vf->vf_queues[qid].rxq_active) {
DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
qid);
status = PFVF_STATUS_FAILURE;
break;
}
rc = qed_sp_eth_rx_queues_update(p_hwfn,
vf->vf_queues[qid].fw_rx_qid,
1,
complete_cqe_flg,
complete_event_flg,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc) {
status = PFVF_STATUS_FAILURE;
break;
}
}
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type)
{
struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
int len = 0;
do {
if (!p_tlv->length) {
DP_NOTICE(p_hwfn, "Zero length TLV found\n");
return NULL;
}
if (p_tlv->type == req_type) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Extended tlv type %d, length %d found\n",
p_tlv->type, p_tlv->length);
return p_tlv;
}
len += p_tlv->length;
p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
return NULL;
}
} while (p_tlv->type != CHANNEL_TLV_LIST_END);
return NULL;
}
static void
qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_activate_tlv *p_act_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_act_tlv)
return;
p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
p_data->vport_active_rx_flg = p_act_tlv->active_rx;
p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
p_data->vport_active_tx_flg = p_act_tlv->active_tx;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
}
static void
qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_vf_info *p_vf,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_vlan_tlv)
return;
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
}
static void
qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
tlv);
if (!p_tx_switch_tlv)
return;
p_data->update_tx_switching_flg = 1;
p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
}
static void
qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_mcast_tlv)
return;
p_data->update_approx_mcast_flg = 1;
memcpy(p_data->bins, p_mcast_tlv->bins,
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
}
static void
qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_accept_tlv)
return;
p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
}
static void
qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
tlv);
if (!p_accept_any_vlan)
return;
p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
p_data->update_accept_any_vlan_flg =
p_accept_any_vlan->update_accept_any_vlan_flg;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
}
static void
qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
struct qed_sp_vport_update_params *p_data,
struct qed_rss_params *p_rss,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
u16 i, q_idx, max_q_idx;
u16 table_size;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_rss_tlv) {
p_data->rss_params = NULL;
return;
}
memset(p_rss, 0, sizeof(struct qed_rss_params));
p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
VFPF_UPDATE_RSS_CONFIG_FLAG);
p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
VFPF_UPDATE_RSS_CAPS_FLAG);
p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
VFPF_UPDATE_RSS_IND_TABLE_FLAG);
p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
VFPF_UPDATE_RSS_KEY_FLAG);
p_rss->rss_enable = p_rss_tlv->rss_enable;
p_rss->rss_eng_id = vf->relative_vf_id + 1;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
sizeof(p_rss->rss_ind_table));
memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
max_q_idx = ARRAY_SIZE(vf->vf_queues);
for (i = 0; i < table_size; i++) {
u16 index = vf->vf_queues[0].fw_rx_qid;
q_idx = p_rss->rss_ind_table[i];
if (q_idx >= max_q_idx)
DP_NOTICE(p_hwfn,
"rss_ind_table[%d] = %d, rxq is out of range\n",
i, q_idx);
else if (!vf->vf_queues[q_idx].rxq_active)
DP_NOTICE(p_hwfn,
"rss_ind_table[%d] = %d, rxq is not active\n",
i, q_idx);
else
index = vf->vf_queues[q_idx].fw_rx_qid;
p_rss->rss_ind_table[i] = index;
}
p_data->rss_params = p_rss;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
}
static void
qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
struct qed_sp_vport_update_params *p_data,
struct qed_sge_tpa_params *p_sge_tpa,
struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
if (!p_sge_tpa_tlv) {
p_data->sge_tpa_params = NULL;
return;
}
memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
p_sge_tpa->update_tpa_en_flg =
!!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
p_sge_tpa->update_tpa_param_flg =
!!(p_sge_tpa_tlv->update_sge_tpa_flags &
VFPF_UPDATE_TPA_PARAM_FLAG);
p_sge_tpa->tpa_ipv4_en_flg =
!!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
p_sge_tpa->tpa_ipv6_en_flg =
!!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
p_sge_tpa->tpa_pkt_split_flg =
!!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
p_sge_tpa->tpa_hdr_data_split_flg =
!!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
p_sge_tpa->tpa_gro_consistent_flg =
!!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
p_data->sge_tpa_params = p_sge_tpa;
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
}
static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_sp_vport_update_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct qed_sge_tpa_params sge_tpa_params;
struct qed_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 tlvs_mask = 0;
u16 length;
int rc;
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
params.rss_params = NULL;
/* Search for extended tlvs list and update values
* from VF in struct qed_sp_vport_update_params.
*/
qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
mbx, &tlvs_mask);
qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
if (!tlvs_mask) {
DP_NOTICE(p_hwfn,
"No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
out:
length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_mask);
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf,
struct qed_filter_ucast *p_params)
{
int i;
if (p_params->type == QED_FILTER_MAC)
return 0;
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
if (p_vf->shadow_config.vlans[i].used &&
p_vf->shadow_config.vlans[i].vid ==
p_params->vlan) {
p_vf->shadow_config.vlans[i].used = false;
break;
}
if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Tries to remove a non-existing vlan\n",
p_vf->relative_vf_id);
return -EINVAL;
}
} else if (p_params->opcode == QED_FILTER_REPLACE ||
p_params->opcode == QED_FILTER_FLUSH) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
p_vf->shadow_config.vlans[i].used = false;
}
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
p_params->opcode == QED_FILTER_REPLACE) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
if (p_vf->shadow_config.vlans[i].used)
continue;
p_vf->shadow_config.vlans[i].used = true;
p_vf->shadow_config.vlans[i].vid = p_params->vlan;
break;
}
if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Tries to configure more than %d vlan filters\n",
p_vf->relative_vf_id,
QED_ETH_VF_NUM_VLAN_FILTERS + 1);
return -EINVAL;
}
}
return 0;
}
int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
int vfid, struct qed_filter_ucast *params)
{
struct qed_public_vf_info *vf;
vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
if (!vf)
return -EINVAL;
/* No real decision to make; Store the configured MAC */
if (params->type == QED_FILTER_MAC ||
params->type == QED_FILTER_MAC_VLAN)
ether_addr_copy(vf->mac, params->mac);
return 0;
}
static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_ucast_filter_tlv *req;
u8 status = PFVF_STATUS_SUCCESS;
struct qed_filter_ucast params;
int rc;
/* Prepare the unicast filter params */
memset(&params, 0, sizeof(struct qed_filter_ucast));
req = &mbx->req_virt->ucast_filter;
params.opcode = (enum qed_filter_opcode)req->opcode;
params.type = (enum qed_filter_ucast_type)req->type;
params.is_rx_filter = 1;
params.is_tx_filter = 1;
params.vport_to_remove_from = vf->vport_id;
params.vport_to_add_to = vf->vport_id;
memcpy(params.mac, req->mac, ETH_ALEN);
params.vlan = req->vlan;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
vf->abs_vf_id, params.opcode, params.type,
params.is_rx_filter ? "RX" : "",
params.is_tx_filter ? "TX" : "",
params.vport_to_add_to,
params.mac[0], params.mac[1],
params.mac[2], params.mac[3],
params.mac[4], params.mac[5], params.vlan);
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
vf->abs_vf_id);
status = PFVF_STATUS_FAILURE;
goto out;
}
/* Update shadow copy of the VF configuration */
if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
status = PFVF_STATUS_FAILURE;
goto out;
}
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
* to add/replace any further VLANs.
*/
if (params.opcode == QED_FILTER_ADD ||
params.opcode == QED_FILTER_REPLACE)
status = PFVF_STATUS_FORCED;
goto out;
}
if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
(params.opcode != QED_FILTER_ADD &&
params.opcode != QED_FILTER_REPLACE))
status = PFVF_STATUS_FORCED;
goto out;
}
rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
if (rc) {
status = PFVF_STATUS_FAILURE;
goto out;
}
rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
QED_SPQ_MODE_CB, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
sizeof(struct pfvf_def_resp_tlv), status);
}
static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
int i;
/* Reset the SBs */
for (i = 0; i < vf->num_sbs; i++)
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
vf->igu_sbs[i],
vf->opaque_fid, false);
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
sizeof(struct pfvf_def_resp_tlv),
PFVF_STATUS_SUCCESS);
}
static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_vf_info *vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
u8 status = PFVF_STATUS_SUCCESS;
/* Disable Interrupts for VF */
qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
/* Reset Permission table */
qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
length, status);
}
static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
qed_iov_vf_cleanup(p_hwfn, p_vf);
qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
length, PFVF_STATUS_SUCCESS);
}
static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
int cnt;
u32 val;
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
if (!val)
break;
msleep(20);
}
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
if (cnt == 50) {
DP_ERR(p_hwfn,
"VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
p_vf->abs_vf_id, val);
return -EBUSY;
}
return 0;
}
static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
int i, cnt;
/* Read initial consumers & producers */
for (i = 0; i < MAX_NUM_VOQS; i++) {
u32 prod;
cons[i] = qed_rd(p_hwfn, p_ptt,
PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
i * 0x40);
prod = qed_rd(p_hwfn, p_ptt,
PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
i * 0x40);
distance[i] = prod - cons[i];
}
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
for (; i < MAX_NUM_VOQS; i++) {
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt,
PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
i * 0x40);
if (distance[i] > tmp - cons[i])
break;
}
if (i == MAX_NUM_VOQS)
break;
msleep(20);
}
if (cnt == 50) {
DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
p_vf->abs_vf_id, i);
return -EBUSY;
}
return 0;
}
static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
int rc;
rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
if (rc)
return rc;
rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
if (rc)
return rc;
return 0;
}
static int
qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 rel_vf_id, u32 *ack_vfs)
{
struct qed_vf_info *p_vf;
int rc = 0;
p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
if (!p_vf)
return 0;
if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
(1ULL << (rel_vf_id % 64))) {
u16 vfid = p_vf->abs_vf_id;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] - Handling FLR\n", vfid);
qed_iov_vf_cleanup(p_hwfn, p_vf);
/* If VF isn't active, no need for anything but SW */
if (!p_vf->b_init)
goto cleanup;
rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
if (rc)
goto cleanup;
rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
if (rc) {
DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
return rc;
}
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
*/
p_vf->state = VF_STOPPED;
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
vfid);
return rc;
}
cleanup:
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
}
return rc;
}
int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ack_vfs[VF_MAX_STATIC / 32];
int rc = 0;
u16 i;
memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
/* Since BRB <-> PRS interface can't be tested as part of the flr
* polling due to HW limitations, simply sleep a bit. And since
* there's no need to wait per-vf, do it before looping.
*/
msleep(100);
for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
return rc;
}
int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
u16 i, found = 0;
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"[%08x,...,%08x]: %08x\n",
i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
if (!p_hwfn->cdev->p_iov_info) {
DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
return 0;
}
/* Mark VFs */
for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
struct qed_vf_info *p_vf;
u8 vfid;
p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
if (!p_vf)
continue;
vfid = p_vf->abs_vf_id;
if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] [rel %d] got FLR-ed\n",
vfid, rel_vf_id);
p_vf->state = VF_RESET;
/* No need to lock here, since pending_flr should
* only change here and before ACKing MFw. Since
* MFW will not trigger an additional attention for
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
found = 1;
}
}
return found;
}
void qed_iov_set_link(struct qed_hwfn *p_hwfn,
u16 vfid,
struct qed_mcp_link_params *params,
struct qed_mcp_link_state *link,
struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
false);
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
return;
p_bulletin = p_vf->bulletin.p_virt;
p_bulletin->req_autoneg = params->speed.autoneg;
p_bulletin->req_adv_speed = params->speed.advertised_speeds;
p_bulletin->req_forced_speed = params->speed.forced_speed;
p_bulletin->req_autoneg_pause = params->pause.autoneg;
p_bulletin->req_forced_rx = params->pause.forced_rx;
p_bulletin->req_forced_tx = params->pause.forced_tx;
p_bulletin->req_loopback = params->loopback_mode;
p_bulletin->link_up = link->link_up;
p_bulletin->speed = link->speed;
p_bulletin->full_duplex = link->full_duplex;
p_bulletin->autoneg = link->an;
p_bulletin->autoneg_complete = link->an_complete;
p_bulletin->parallel_detection = link->parallel_detection;
p_bulletin->pfc_enabled = link->pfc_enabled;
p_bulletin->partner_adv_speed = link->partner_adv_speed;
p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
p_bulletin->partner_adv_pause = link->partner_adv_pause;
p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
p_bulletin->capability_speed = p_caps->speed_capabilities;
}
static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
u16 vfid,
struct qed_mcp_link_params *p_params,
struct qed_mcp_link_state *p_link,
struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
false);
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
return;
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
if (p_link)
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
if (p_caps)
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
}
static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid)
{
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
int i;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf)
return;
mbx = &p_vf->vf_mbx;
/* qed_iov_process_mbx_request */
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
mbx->first_tlv = mbx->req_virt->first_tlv;
/* check if tlv type is known */
if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_VPORT_START:
qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_VPORT_TEARDOWN:
qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_START_RXQ:
qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_START_TXQ:
qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_STOP_RXQS:
qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_STOP_TXQS:
qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_UPDATE_RXQ:
qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_VPORT_UPDATE:
qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_UCAST_FILTER:
qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_CLOSE:
qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_INT_CLEANUP:
qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_RELEASE:
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
}
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
* supports features unknown as of yet. Too bad since we don't
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
DP_ERR(p_hwfn,
"unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
for (i = 0; i < 20; i++) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"%x ",
mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
}
}
}
void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
{
u64 add_bit = 1ULL << (vfid % 64);
p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
}
static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
u64 *events)
{
u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
u16 abs_vfid, struct regpair *vf_msg)
{
u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
struct qed_vf_info *p_vf;
if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
abs_vfid);
return 0;
}
p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
/* List the physical address of the request so that handler
* could later on copy the message from it.
*/
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
/* Mark the event and schedule the workqueue */
qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
return 0;
}
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
return -EINVAL;
}
}
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
u16 i;
if (!p_iov)
goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++)
if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
return i;
out:
return MAX_NUM_VFS;
}
static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
int vfid)
{
struct qed_dmae_params params;
struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!vf_info)
return -EINVAL;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
params.src_vfid = vf_info->abs_vf_id;
if (qed_dmae_host2host(p_hwfn, ptt,
vf_info->vf_mbx.pending_req,
vf_info->vf_mbx.req_phys,
sizeof(union vfpf_tlvs) / 4, &params)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Failed to copy message from VF 0x%02x\n", vfid);
return -EIO;
}
return 0;
}
static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
u8 *mac, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
return;
}
feature = 1 << MAC_ADDR_FORCED;
memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
vf_info->bulletin.p_virt->valid_bitmap &=
~(1 << VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
u16 pvid, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
return;
}
feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid;
if (pvid)
vf_info->bulletin.p_virt->valid_bitmap |= feature;
else
vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf_info)
return false;
return !!p_vf_info->vport_instance;
}
bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf_info)
return true;
return p_vf_info->state == VF_STOPPED;
}
static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!vf_info)
return false;
return vf_info->spoof_chk;
}
int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
{
struct qed_vf_info *vf;
int rc = -EINVAL;
if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn,
"SR-IOV sanity check failed, can't set spoofchk\n");
goto out;
}
vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!vf)
goto out;
if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
/* After VF VPORT start PF will configure spoof check */
vf->req_spoofchk_val = val;
rc = 0;
goto out;
}
rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
out:
return rc;
}
static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
}
u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
}
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
int rc;
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf)
return -EINVAL;
rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
if (rc)
return rc;
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
{
struct qed_vf_info *vf;
u8 vport_id;
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn,
"SR-IOV sanity check failed, can't set min rate\n");
return -EINVAL;
}
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
}
static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!vf_info)
return 0;
vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
if (vf_vp_wfq->configured)
return vf_vp_wfq->min_speed;
else
return 0;
}
/**
* qed_schedule_iov - schedules IOV task for VF and PF
* @hwfn: hardware function pointer
* @flag: IOV flag for VF/PF
*/
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{
smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags);
smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
}
void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
int i;
for_each_hwfn(cdev, i)
queue_delayed_work(cdev->hwfns[i].iov_wq,
&cdev->hwfns[i].iov_task, 0);
}
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
{
int i, j;
for_each_hwfn(cdev, i)
if (cdev->hwfns[i].iov_wq)
flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true);
if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
pci_disable_sriov(cdev->pdev);
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
* where the first engine has already relased IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
return -EBUSY;
}
/* Clean WFQ db and configure equal weight for all vports */
qed_clean_wfq_db(hwfn, ptt);
qed_for_each_vf(hwfn, j) {
int k;
if (!qed_iov_is_valid_vfid(hwfn, j, true))
continue;
/* Wait until VF is disabled before releasing */
for (k = 0; k < 100; k++) {
if (!qed_iov_is_vf_stopped(hwfn, j))
msleep(20);
else
break;
}
if (k < 100)
qed_iov_release_hw_for_vf(&cdev->hwfns[i],
ptt, j);
else
DP_ERR(hwfn,
"Timeout waiting for VF's FLR to end\n");
}
qed_ptt_release(hwfn, ptt);
}
qed_iov_set_vfs_to_disable(cdev, false);
return 0;
}
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
struct qed_sb_cnt_info sb_cnt_info;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
DP_NOTICE(cdev, "Can start at most %d VFs\n",
RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
return -EINVAL;
}
/* Initialize HW for VF access */
for_each_hwfn(cdev, j) {
struct qed_hwfn *hwfn = &cdev->hwfns[j];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
int num_sbs = 0, limit = 16;
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
rc = -EBUSY;
goto err;
}
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(hwfn, &sb_cnt_info);
num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
for (i = 0; i < num; i++) {
if (!qed_iov_is_valid_vfid(hwfn, i, false))
continue;
rc = qed_iov_init_hw_for_vf(hwfn,
ptt, i, num_sbs / num);
if (rc) {
DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
qed_ptt_release(hwfn, ptt);
goto err;
}
}
qed_ptt_release(hwfn, ptt);
}
/* Enable SRIOV PCIe functions */
rc = pci_enable_sriov(cdev->pdev, num);
if (rc) {
DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
goto err;
}
return num;
err:
qed_sriov_disable(cdev, false);
return rc;
}
static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
{
if (!IS_QED_SRIOV(cdev)) {
DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
return -EOPNOTSUPP;
}
if (num_vfs_param)
return qed_sriov_enable(cdev, num_vfs_param);
else
return qed_sriov_disable(cdev, true);
}
static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
{
int i;
if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set a VF MAC; Sriov is not enabled\n");
return -EINVAL;
}
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_public_vf_info *vf_info;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
if (!vf_info)
continue;
/* Set the forced MAC, and schedule the IOV task */
ether_addr_copy(vf_info->forced_mac, mac);
qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
}
return 0;
}
static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
{
int i;
if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set a VF MAC; Sriov is not enabled\n");
return -EINVAL;
}
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_public_vf_info *vf_info;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
if (!vf_info)
continue;
/* Set the forced vlan, and schedule the IOV task */
vf_info->forced_vlan = vid;
qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
}
return 0;
}
static int qed_get_vf_config(struct qed_dev *cdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_public_vf_info *vf_info;
struct qed_mcp_link_state link;
u32 tx_rate;
/* Sanitize request */
if (IS_VF(cdev))
return -EINVAL;
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id);
return -EINVAL;
}
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
/* Fill information about VF */
ivi->vf = vf_id;
if (is_valid_ether_addr(vf_info->forced_mac))
ether_addr_copy(ivi->mac, vf_info->forced_mac);
else
ether_addr_copy(ivi->mac, vf_info->mac);
ivi->vlan = vf_info->forced_vlan;
ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
ivi->linkstate = vf_info->link_state;
tx_rate = vf_info->tx_rate;
ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
return 0;
}
void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
struct qed_mcp_link_capabilities caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
int i;
if (!hwfn->pf_iov_info)
return;
/* Update bulletin of all future possible VFs with link configuration */
for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
struct qed_public_vf_info *vf_info;
vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
if (!vf_info)
continue;
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(caps));
/* Modify link according to the VF's configured link state */
switch (vf_info->link_state) {
case IFLA_VF_LINK_STATE_DISABLE:
link.link_up = false;
break;
case IFLA_VF_LINK_STATE_ENABLE:
link.link_up = true;
/* Set speed according to maximum supported by HW.
* that is 40G for regular devices and 100G for CMT
* mode devices.
*/
link.speed = (hwfn->cdev->num_hwfns > 1) ?
100000 : 40000;
default:
/* In auto mode pass PF link image to VF */
break;
}
if (link.link_up && vf_info->tx_rate) {
struct qed_ptt *ptt;
int rate;
rate = min_t(int, vf_info->tx_rate, link.speed);
ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
DP_NOTICE(hwfn, "Failed to acquire PTT\n");
return;
}
if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
vf_info->tx_rate = rate;
link.speed = rate;
}
qed_ptt_release(hwfn, ptt);
}
qed_iov_set_link(hwfn, i, &params, &link, &caps);
}
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
static int qed_set_vf_link_state(struct qed_dev *cdev,
int vf_id, int link_state)
{
int i;
/* Sanitize request */
if (IS_VF(cdev))
return -EINVAL;
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id);
return -EINVAL;
}
/* Handle configuration of link state */
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_public_vf_info *vf;
vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
if (!vf)
continue;
if (vf->link_state == link_state)
continue;
vf->link_state = link_state;
qed_inform_vf_link_state(&cdev->hwfns[i]);
}
return 0;
}
static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
{
int i, rc = -EINVAL;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
if (rc)
break;
}
return rc;
}
static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
{
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_public_vf_info *vf;
if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn,
"SR-IOV sanity check failed, can't set tx rate\n");
return -EINVAL;
}
vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
vf->tx_rate = rate;
qed_inform_vf_link_state(p_hwfn);
}
return 0;
}
static int qed_set_vf_rate(struct qed_dev *cdev,
int vfid, u32 min_rate, u32 max_rate)
{
int rc_min = 0, rc_max = 0;
if (max_rate)
rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
if (min_rate)
rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
if (rc_max | rc_min)
return -EINVAL;
return 0;
}
static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
{
u64 events[QED_VF_ARRAY_LENGTH];
struct qed_ptt *ptt;
int i;
ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Can't acquire PTT; re-scheduling\n");
qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
return;
}
qed_iov_pf_get_and_clear_pending_events(hwfn, events);
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
events[0], events[1], events[2]);
qed_for_each_vf(hwfn, i) {
/* Skip VFs with no pending messages */
if (!(events[i / 64] & (1ULL << (i % 64))))
continue;
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
/* Copy VF's message to PF's request buffer for that VF */
if (qed_iov_copy_vf_msg(hwfn, ptt, i))
continue;
qed_iov_process_mbx_req(hwfn, ptt, i);
}
qed_ptt_release(hwfn, ptt);
}
static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
{
int i;
qed_for_each_vf(hwfn, i) {
struct qed_public_vf_info *info;
bool update = false;
u8 *mac;
info = qed_iov_get_public_vf_info(hwfn, i, true);
if (!info)
continue;
/* Update data on bulletin board */
mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
if (is_valid_ether_addr(info->forced_mac) &&
(!mac || !ether_addr_equal(mac, info->forced_mac))) {
DP_VERBOSE(hwfn,
QED_MSG_IOV,
"Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
i,
hwfn->cdev->p_iov_info->first_vf_in_pf + i);
/* Update bulletin board with forced MAC */
qed_iov_bulletin_set_forced_mac(hwfn,
info->forced_mac, i);
update = true;
}
if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
info->forced_vlan) {
DP_VERBOSE(hwfn,
QED_MSG_IOV,
"Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
info->forced_vlan,
i,
hwfn->cdev->p_iov_info->first_vf_in_pf + i);
qed_iov_bulletin_set_forced_vlan(hwfn,
info->forced_vlan, i);
update = true;
}
if (update)
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
}
static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
{
struct qed_ptt *ptt;
int i;
ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
return;
}
qed_for_each_vf(hwfn, i)
qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt);
}
void qed_iov_pf_task(struct work_struct *work)
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
int rc;
if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
return;
if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
return;
}
rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
if (rc)
qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
qed_ptt_release(hwfn, ptt);
}
if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
qed_handle_vf_msg(hwfn);
if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
&hwfn->iov_task_flags))
qed_handle_pf_set_vf_unicast(hwfn);
if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
&hwfn->iov_task_flags))
qed_handle_bulletin_post(hwfn);
}
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
int i;
for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].iov_wq)
continue;
if (schedule_first) {
qed_schedule_iov(&cdev->hwfns[i],
QED_IOV_WQ_STOP_WQ_FLAG);
cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
}
flush_workqueue(cdev->hwfns[i].iov_wq);
destroy_workqueue(cdev->hwfns[i].iov_wq);
}
}
int qed_iov_wq_start(struct qed_dev *cdev)
{
char name[NAME_SIZE];
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
/* PFs needs a dedicated workqueue only if they support IOV.
* VFs always require one.
*/
if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
continue;
snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
p_hwfn->iov_wq = create_singlethread_workqueue(name);
if (!p_hwfn->iov_wq) {
DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
return -ENOMEM;
}
if (IS_PF(cdev))
INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
else
INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
}
return 0;
}
const struct qed_iov_hv_ops qed_iov_ops_pass = {
.configure = &qed_sriov_configure,
.set_mac = &qed_sriov_pf_set_mac,
.set_vlan = &qed_sriov_pf_set_vlan,
.get_config = &qed_get_vf_config,
.set_link_state = &qed_set_vf_link_state,
.set_spoof = &qed_spoof_configure,
.set_rate = &qed_set_vf_rate,
};
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
enum qed_iov_vport_update_flag {
QED_IOV_VP_UPDATE_ACTIVATE,
QED_IOV_VP_UPDATE_VLAN_STRIP,
QED_IOV_VP_UPDATE_TX_SWITCH,
QED_IOV_VP_UPDATE_MCAST,
QED_IOV_VP_UPDATE_ACCEPT_PARAM,
QED_IOV_VP_UPDATE_RSS,
QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
QED_IOV_VP_UPDATE_SGE_TPA,
QED_IOV_VP_UPDATE_MAX,
};
struct qed_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
u8 mac[ETH_ALEN];
/* IFLA_VF_LINK_STATE_<X> */
int link_state;
/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
int tx_rate;
};
/* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/
struct qed_hw_sriov_info {
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total_vfs; /* total VFs associated with the PF */
u16 num_vfs; /* number of vfs that have been started */
u16 initial_vfs; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u16 vf_device_id; /* VF device id */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u32 first_vf_in_pf;
};
/* This mailbox is maintained per VF in its PF contains all information
* required for sending / receiving a message.
*/
struct qed_iov_vf_mbx {
union vfpf_tlvs *req_virt;
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
u8 *offset;
/* saved VF request header */
struct vfpf_first_tlv first_tlv;
};
struct qed_vf_q_info {
u16 fw_rx_qid;
u16 fw_tx_qid;
u8 fw_cid;
u8 rxq_active;
u8 txq_active;
};
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
};
struct qed_vf_vlan_shadow {
bool used;
u16 vid;
};
struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
u8 inner_vlan_removal;
};
/* PFs maintain an array of this structure, per VF */
struct qed_vf_info {
struct qed_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
u8 to_disable;
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
u8 vport_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
u8 vport_instance;
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
bool spoof_chk;
bool req_spoofchk_val;
/* Stores the configuration requested by VF */
struct qed_vf_shadow_config shadow_config;
/* A bitfield using bulletin's valid-map bits, used to indicate
* which of the bulletin board features have been configured.
*/
u64 configured_features;
#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
(1 << VLAN_ADDR_FORCED))
};
/* This structure is part of qed_hwfn and used only for PFs that have sriov
* capability enabled.
*/
struct qed_pf_iov {
struct qed_vf_info vfs_array[MAX_NUM_VFS];
u64 pending_events[QED_VF_ARRAY_LENGTH];
u64 pending_flr[QED_VF_ARRAY_LENGTH];
/* Allocate message address continuosuly and split to each VF */
void *mbx_msg_virt_addr;
dma_addr_t mbx_msg_phys_addr;
u32 mbx_msg_size;
void *mbx_reply_virt_addr;
dma_addr_t mbx_reply_phys_addr;
u32 mbx_reply_size;
void *p_bulletins;
dma_addr_t bulletins_phys;
u32 bulletins_size;
};
enum qed_iov_wq_flag {
QED_IOV_WQ_MSG_FLAG,
QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
* @return MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
* @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
* @brief qed_iov_alloc - allocate sriov related resources
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_iov_free - free sriov related resources
*
* @param p_hwfn
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
* @brief free sriov related memory that was allocated during hw_prepare
*
* @param cdev
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
return MAX_NUM_VFS;
}
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
}
static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
{
}
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
return 0;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
}
static inline int qed_iov_wq_start(struct qed_dev *cdev)
{
return 0;
}
static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag)
{
}
static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
}
static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
{
return 0;
}
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
_i < MAX_NUM_VFS; \
_i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_vf.h"
static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
void *p_tlv;
/* This lock is released when we receive PF's response
* in qed_send_msg2pf().
* So, qed_vf_pf_prep() and qed_send_msg2pf()
* must come in sequence.
*/
mutex_lock(&(p_iov->mutex));
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"preparing to send 0x%04x tlv over vf pf channel\n",
type);
/* Reset Requst offset */
p_iov->offset = (u8 *)p_iov->vf2pf_request;
/* Clear mailbox - both request and reply */
memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* Init type and length */
p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
/* Init first tlv header */
((struct vfpf_first_tlv *)p_tlv)->reply_address =
(u64)p_iov->pf2vf_reply_phys;
return p_tlv;
}
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
int rc = 0, time = 100;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
/* output tlvs list */
qed_dp_tlv_list(p_hwfn, p_req);
/* need to add the END TLV to the message size */
resp_size += sizeof(struct channel_list_end_tlv);
/* Send TLVs over HW channel */
memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PFID),
upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
&zone_data->non_trigger.vf_pf_msg_addr,
*((u32 *)&trigger), &zone_data->trigger);
REG_WR(p_hwfn,
(uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
REG_WR(p_hwfn,
(uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
/* The message data must be written first, to prevent trigger before
* data is written.
*/
wmb();
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
/* When PF would be done with the response, it would write back to the
* `done' address. Poll until then.
*/
while ((!*done) && time) {
msleep(25);
time--;
}
if (!*done) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = -EBUSY;
goto exit;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
exit:
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
return rc;
}
#define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
u8 rx_count = 1, tx_count = 1, num_sbs = 1;
u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
/* starting filling the request */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
req->resc_request.num_rxqs = rx_count;
req->resc_request.num_txqs = tx_count;
req->resc_request.num_sbs = num_sbs;
req->resc_request.num_mac_filters = num_mac;
req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
while (!resources_acquired) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV, "attempting to acquire resources\n");
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
/* copy acquire response from buffer to p_hwfn */
memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
attempts++;
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
/* PF agrees to allocate our resources */
if (!(resp->pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
DP_INFO(p_hwfn,
"PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
resources_acquired = true;
} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"PF unwilling to fullfill resource request. Try PF recommended amount\n");
/* humble our request */
req->resc_request.num_txqs = resp->resc.num_txqs;
req->resc_request.num_rxqs = resp->resc.num_rxqs;
req->resc_request.num_sbs = resp->resc.num_sbs;
req->resc_request.num_mac_filters =
resp->resc.num_mac_filters;
req->resc_request.num_vlan_filters =
resp->resc.num_vlan_filters;
/* Clear response buffer */
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
resp->hdr.status);
return -EAGAIN;
}
}
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
/* get HW info */
p_hwfn->cdev->type = resp->pfdev_info.dev_type;
p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
/* Learn of the possibility of CMT */
if (IS_LEAD_HWFN(p_hwfn)) {
if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
DP_NOTICE(p_hwfn, "100g VF\n");
p_hwfn->cdev->num_hwfns = 2;
}
}
return 0;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov;
u32 reg;
/* Set number of hwfns - might be overriden once leading hwfn learns
* actual configuration from PF.
*/
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->num_hwfns = 1;
/* Set the doorbell bar. Assumption: regview is set */
p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_DQ;
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
/* Allocate vf sriov info */
p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
if (!p_iov) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
return -ENOMEM;
}
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
&p_iov->vf2pf_request_phys,
GFP_KERNEL);
if (!p_iov->vf2pf_request) {
DP_NOTICE(p_hwfn,
"Failed to allocate `vf2pf_request' DMA memory\n");
goto free_p_iov;
}
p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
&p_iov->pf2vf_reply_phys,
GFP_KERNEL);
if (!p_iov->pf2vf_reply) {
DP_NOTICE(p_hwfn,
"Failed to allocate `pf2vf_reply' DMA memory\n");
goto free_vf2pf_request;
}
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
p_iov->vf2pf_request,
(u64) p_iov->vf2pf_request_phys,
p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
/* Allocate Bulletin board */
p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_iov->bulletin.size,
&p_iov->bulletin.phys,
GFP_KERNEL);
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt,
(u64)p_iov->bulletin.phys, p_iov->bulletin.size);
mutex_init(&p_iov->mutex);
p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = QED_PCI_ETH;
return qed_vf_pf_acquire(p_hwfn);
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
free_p_iov:
kfree(p_iov);
return -ENOMEM;
}
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_qid,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
req->rx_qid = rx_qid;
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
req->hw_sb = sb;
req->sb_index = sb_index;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
/* Learn the address of the producer from the response */
if (pp_prod) {
u64 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
rx_qid, *pp_prod, resp->offset);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
(u32 *)&init_prod_val);
}
return rc;
}
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
req->rx_qid = rx_qid;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
return rc;
}
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_start_txq_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
req->tx_qid = tx_queue_id;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
req->hw_sb = sb;
req->sb_index = sb_index;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
if (pp_doorbell) {
u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(cid, DQ_DEMS_LEGACY);
}
return rc;
}
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
req->tx_qid = tx_qid;
req->num_txqs = 1;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
return rc;
}
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum qed_tpa_mode tpa_mode,
u8 max_buffers_per_cqe, u8 only_untagged)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_start_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc, i;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
req->mtu = mtu;
req->vport_id = vport_id;
req->inner_vlan_removal = inner_vlan_removal;
req->tpa_mode = tpa_mode;
req->max_buffers_per_cqe = max_buffers_per_cqe;
req->only_untagged = only_untagged;
/* status blocks */
for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
if (p_hwfn->sbs_info[i])
req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
return rc;
}
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
int rc;
/* clear mailbox and prep first tlv */
qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
return rc;
}
static bool
qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data,
u16 tlv)
{
switch (tlv) {
case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
return !!(p_data->update_vport_active_rx_flg ||
p_data->update_vport_active_tx_flg);
case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
return !!p_data->update_tx_switching_flg;
case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
return !!p_data->update_inner_vlan_removal_flg;
case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
return !!p_data->update_accept_any_vlan_flg;
case CHANNEL_TLV_VPORT_UPDATE_MCAST:
return !!p_data->update_approx_mcast_flg;
case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
return !!(p_data->accept_flags.update_rx_mode_config ||
p_data->accept_flags.update_tx_mode_config);
case CHANNEL_TLV_VPORT_UPDATE_RSS:
return !!p_data->rss_params;
case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
return !!p_data->sge_tpa_params;
default:
DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
tlv);
return false;
}
}
static void
qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_data)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *p_resp;
u16 tlv;
for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
continue;
p_resp = (struct pfvf_def_resp_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
tlv);
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"TLV[%d] Configuration %s\n",
tlv,
(p_resp && p_resp->hdr.status) ? "succeeded"
: "failed");
}
}
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_update_tlv *req;
struct pfvf_def_resp_tlv *resp;
u8 update_rx, update_tx;
u32 resp_size = 0;
u16 size, tlv;
int rc;
resp = &p_iov->pf2vf_reply->default_resp;
resp_size = sizeof(*resp);
update_rx = p_params->update_vport_active_rx_flg;
update_tx = p_params->update_vport_active_tx_flg;
/* clear mailbox and prep header tlv */
qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
/* Prepare extended tlvs */
if (update_rx || update_tx) {
struct vfpf_vport_update_activate_tlv *p_act_tlv;
size = sizeof(struct vfpf_vport_update_activate_tlv);
p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_act_tlv->update_rx = update_rx;
p_act_tlv->active_rx = p_params->vport_active_rx_flg;
}
if (update_tx) {
p_act_tlv->update_tx = update_tx;
p_act_tlv->active_tx = p_params->vport_active_tx_flg;
}
}
if (p_params->update_tx_switching_flg) {
struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
}
if (p_params->update_approx_mcast_flg) {
struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
update_tx = p_params->accept_flags.update_tx_mode_config;
if (update_rx || update_tx) {
struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_accept_tlv->update_rx_mode = update_rx;
p_accept_tlv->rx_accept_filter =
p_params->accept_flags.rx_accept_filter;
}
if (update_tx) {
p_accept_tlv->update_tx_mode = update_tx;
p_accept_tlv->tx_accept_filter =
p_params->accept_flags.tx_accept_filter;
}
}
if (p_params->rss_params) {
struct qed_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = qed_add_tlv(p_hwfn,
&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (rss_params->update_rss_config)
p_rss_tlv->update_rss_flags |=
VFPF_UPDATE_RSS_CONFIG_FLAG;
if (rss_params->update_rss_capabilities)
p_rss_tlv->update_rss_flags |=
VFPF_UPDATE_RSS_CAPS_FLAG;
if (rss_params->update_rss_ind_table)
p_rss_tlv->update_rss_flags |=
VFPF_UPDATE_RSS_IND_TABLE_FLAG;
if (rss_params->update_rss_key)
p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
sizeof(rss_params->rss_ind_table));
memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
if (p_params->update_accept_any_vlan_flg) {
struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
p_any_vlan_tlv->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
}
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
return rc;
}
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EAGAIN;
p_hwfn->b_int_enabled = 0;
return 0;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
p_iov->vf2pf_request,
p_iov->vf2pf_request_phys);
if (p_iov->pf2vf_reply)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
if (p_iov->bulletin.p_virt) {
size = sizeof(struct qed_bulletin_content);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
size,
p_iov->bulletin.p_virt, p_iov->bulletin.phys);
}
kfree(p_hwfn->vf_iov_info);
p_hwfn->vf_iov_info = NULL;
return rc;
}
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
struct qed_sp_vport_update_params sp_params;
int i;
memset(&sp_params, 0, sizeof(sp_params));
sp_params.update_approx_mcast_flg = 1;
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
u32 bit;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
__set_bit(bit, sp_params.bins);
}
}
qed_vf_pf_vport_update(p_hwfn, &sp_params);
}
int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_filter_ucast *p_ucast)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_ucast_filter_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
req->opcode = (u8) p_ucast->opcode;
req->type = (u8) p_ucast->type;
memcpy(req->mac, p_ucast->mac, ETH_ALEN);
req->vlan = p_ucast->vlan;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EAGAIN;
return 0;
}
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
int rc;
/* clear mailbox and prep first tlv */
qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
return rc;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
return -EINVAL;
return 0;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
if (!p_iov) {
DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
return 0;
}
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct qed_bulletin_content shadow;
u32 crc, crc_size;
crc_size = sizeof(p_iov->bulletin.p_virt->crc);
*p_change = 0;
/* Need to guarantee PF is not in the middle of writing it */
memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
/* If version did not update, no need to do anything */
if (shadow.version == p_iov->bulletin_shadow.version)
return 0;
/* Verify the bulletin we see is valid */
crc = crc32(0, (u8 *)&shadow + crc_size,
p_iov->bulletin.size - crc_size);
if (crc != shadow.crc)
return -EAGAIN;
/* Set the shadow bulletin and process it */
memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Read a bulletin update %08x\n", shadow.version);
*p_change = 1;
return 0;
}
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin)
{
memset(p_params, 0, sizeof(*p_params));
p_params->speed.autoneg = p_bulletin->req_autoneg;
p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
p_params->speed.forced_speed = p_bulletin->req_forced_speed;
p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
p_params->pause.forced_rx = p_bulletin->req_forced_rx;
p_params->pause.forced_tx = p_bulletin->req_forced_tx;
p_params->loopback_mode = p_bulletin->req_loopback;
}
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
{
__qed_vf_get_link_params(p_hwfn, params,
&(p_hwfn->vf_iov_info->bulletin_shadow));
}
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin)
{
memset(p_link, 0, sizeof(*p_link));
p_link->link_up = p_bulletin->link_up;
p_link->speed = p_bulletin->speed;
p_link->full_duplex = p_bulletin->full_duplex;
p_link->an = p_bulletin->autoneg;
p_link->an_complete = p_bulletin->autoneg_complete;
p_link->parallel_detection = p_bulletin->parallel_detection;
p_link->pfc_enabled = p_bulletin->pfc_enabled;
p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
}
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link)
{
__qed_vf_get_link_state(p_hwfn, link,
&(p_hwfn->vf_iov_info->bulletin_shadow));
}
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
struct qed_bulletin_content *p_bulletin)
{
memset(p_link_caps, 0, sizeof(*p_link_caps));
p_link_caps->speed_capabilities = p_bulletin->capability_speed;
}
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps)
{
__qed_vf_get_link_caps(p_hwfn, p_link_caps,
&(p_hwfn->vf_iov_info->bulletin_shadow));
}
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
memcpy(port_mac,
p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
}
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
{
struct qed_vf_iov *p_vf;
p_vf = p_hwfn->vf_iov_info;
*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
}
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
{
struct qed_bulletin_content *bulletin;
bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
return true;
/* Forbid VF from changing a MAC enforced by PF */
if (ether_addr_equal(bulletin->mac, mac))
return false;
return false;
}
bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
u8 *dst_mac, u8 *p_is_forced)
{
struct qed_bulletin_content *bulletin;
bulletin = &hwfn->vf_iov_info->bulletin_shadow;
if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
if (p_is_forced)
*p_is_forced = 1;
} else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
if (p_is_forced)
*p_is_forced = 0;
} else {
return false;
}
ether_addr_copy(dst_mac, bulletin->mac);
return true;
}
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
{
struct pf_vf_pfdev_info *info;
info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
*fw_major = info->fw_major;
*fw_minor = info->fw_minor;
*fw_rev = info->fw_rev;
*fw_eng = info->fw_eng;
}
static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
{
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie;
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && is_mac_forced && cookie)
ops->force_mac(cookie, mac);
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
void qed_iov_vf_task(struct work_struct *work)
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
u8 change = 0;
if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
return;
/* Handle bulletin board changes */
qed_vf_read_bulletin(hwfn, &change);
if (change)
qed_handle_bulletin_change(hwfn);
/* As VF is polling bulletin board, need to constantly re-schedule */
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
}
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_VF_H
#define _QED_VF_H
#include "qed_l2.h"
#include "qed_mcp.h"
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY_SIZE 10
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u16 padding;
};
struct hw_sb_info {
u16 hw_sb_id;
u8 sb_qid;
u8 padding[5];
};
#define TLV_BUFFER_SIZE 1024
enum {
PFVF_STATUS_WAITING,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate reponse
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
#define VFPF_ACQUIRE_OS_LINUX (0)
#define VFPF_ACQUIRE_OS_WINDOWS (1)
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 padding[5];
} vfdev_info;
struct vf_pf_resc_request resc_request;
u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
struct channel_tlv tl;
u8 update_rss_flags;
#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
u8 rss_enable;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY_SIZE];
};
struct pfvf_storm_stats {
u32 address;
u32 len;
};
struct pfvf_stats_info {
struct pfvf_storm_stats mstats;
struct pfvf_storm_stats pstats;
struct pfvf_storm_stats tstats;
struct pfvf_storm_stats ustats;
};
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 mfw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
/* There are old PF versions where the PF might mistakenly override the sanity
* mechanism [version-based] and allow a VF that can't be supported to pass
* the acquisition phase.
* To overcome this, PFs now indicate that they're past that point and the new
* VFs would fail probe on the older PFs that fail to do so.
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* These should match the PF's qed_dev values */
u16 chip_rev;
u8 dev_type;
u8 padding;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
u8 padding2[2];
} pfdev_info;
struct pf_vf_resc {
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 padding[2];
} resc;
u32 bulletin_size;
u32 padding;
};
struct pfvf_start_queue_resp_tlv {
struct pfvf_tlv hdr;
u32 offset; /* offset to consumer/producer of queue */
u8 padding[4];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 rxq_addr;
u64 deprecated_sge_addr;
u64 cqe_pbl_addr;
u16 cqe_pbl_size;
u16 hw_sb;
u16 rx_qid;
u16 hc_rate; /* desired interrupts per sec. */
u16 bd_max_bytes;
u16 stat_id;
u8 sb_index;
u8 padding[3];
};
struct vfpf_start_txq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 pbl_addr;
u16 pbl_size;
u16 stat_id;
u16 tx_qid;
u16 hw_sb;
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 hc_rate; /* desired interrupts per sec. */
u8 sb_index;
u8 padding[3];
};
/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
};
/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
u8 num_txqs;
u8 padding[5];
};
struct vfpf_update_rxq_tlv {
struct vfpf_first_tlv first_tlv;
u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
u16 rx_qid;
u8 num_rxqs;
u8 flags;
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
u8 padding[4];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
u8 padding[4];
};
/* Start a vport */
struct vfpf_vport_start_tlv {
struct vfpf_first_tlv first_tlv;
u64 sb_addr[PFVF_MAX_SBS_PER_VF];
u32 tpa_mode;
u16 dep1;
u16 mtu;
u8 vport_id;
u8 inner_vlan_removal;
u8 only_untagged;
u8 max_buffers_per_cqe;
u8 padding[4];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
struct channel_tlv tl;
u8 update_rx;
u8 update_tx;
u8 active_rx;
u8 active_tx;
};
struct vfpf_vport_update_tx_switch_tlv {
struct channel_tlv tl;
u8 tx_switching;
u8 padding[3];
};
struct vfpf_vport_update_vlan_strip_tlv {
struct channel_tlv tl;
u8 remove_vlan;
u8 padding[3];
};
struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
u64 bins[8];
};
struct vfpf_vport_update_accept_param_tlv {
struct channel_tlv tl;
u8 update_rx_mode;
u8 update_tx_mode;
u8 rx_accept_filter;
u8 tx_accept_filter;
};
struct vfpf_vport_update_accept_any_vlan_tlv {
struct channel_tlv tl;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 padding[2];
};
struct vfpf_vport_update_sge_tpa_tlv {
struct channel_tlv tl;
u16 sge_tpa_flags;
#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
u8 max_buffers_per_cqe;
u16 deprecated_sge_buff_size;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
u8 tpa_max_aggs_num;
u8 padding[7];
};
/* Primary tlv as a header for various extended tlvs for
* various functionalities in vport update ramrod.
*/
struct vfpf_vport_update_tlv {
struct vfpf_first_tlv first_tlv;
};
struct vfpf_ucast_filter_tlv {
struct vfpf_first_tlv first_tlv;
u8 opcode;
u8 type;
u8 mac[ETH_ALEN];
u16 vlan;
u16 padding[3];
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_start_rxq_tlv start_rxq;
struct vfpf_start_txq_tlv start_txq;
struct vfpf_stop_rxqs_tlv stop_rxqs;
struct vfpf_stop_txqs_tlv stop_txqs;
struct vfpf_update_rxq_tlv update_rxq;
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
};
enum qed_bulletin_bit {
/* Alert the VF that a forced MAC was set by the PF */
MAC_ADDR_FORCED = 0,
/* Alert the VF that a forced VLAN was set by the PF */
VLAN_ADDR_FORCED = 2,
/* Indicate that `default_only_untagged' contains actual data */
VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
/* Alert the VF that suggested mac was sent by the PF.
* MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
*/
VFPF_BULLETIN_MAC_ADDR = 5
};
struct qed_bulletin_content {
/* crc of structure to ensure is not in mid-update */
u32 crc;
u32 version;
/* bitmap indicating which fields hold valid values */
u64 valid_bitmap;
/* used for MAC_ADDR or MAC_ADDR_FORCED */
u8 mac[ETH_ALEN];
/* If valid, 1 => only untagged Rx if no vlan is configured */
u8 default_only_untagged;
u8 padding;
/* The following is a 'copy' of qed_mcp_link_state,
* qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
* possible the structs will increase further along the road we cannot
* have it here; Instead we need to have all of its fields.
*/
u8 req_autoneg;
u8 req_autoneg_pause;
u8 req_forced_rx;
u8 req_forced_tx;
u8 padding2[4];
u32 req_adv_speed;
u32 req_forced_speed;
u32 req_loopback;
u32 padding3;
u8 link_up;
u8 full_duplex;
u8 autoneg;
u8 autoneg_complete;
u8 parallel_detection;
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
u8 padding4[6];
u32 speed;
u32 partner_adv_speed;
u32 capability_speed;
/* Forced vlan */
u16 pvid;
u16 padding5;
};
struct qed_bulletin {
dma_addr_t phys;
struct qed_bulletin_content *p_virt;
u32 size;
};
enum {
CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
CHANNEL_TLV_VPORT_TEARDOWN,
CHANNEL_TLV_START_RXQ,
CHANNEL_TLV_START_TXQ,
CHANNEL_TLV_STOP_RXQS,
CHANNEL_TLV_STOP_TXQS,
CHANNEL_TLV_UPDATE_RXQ,
CHANNEL_TLV_INT_CLEANUP,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_UCAST_FILTER,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
* Will break in case non-sequential vport-update tlvs.
*/
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys;
union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */
struct mutex mutex;
u8 *offset;
/* Bulletin Board */
struct qed_bulletin bulletin;
struct qed_bulletin_content bulletin_shadow;
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed
*
* @param p_hwfn
* @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
*
* @return enum _qed_status
*/
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/**
* @brief Get link paramters for VF from qed
*
* @param p_hwfn
* @param params - the link params structure to be filled for the VF
*/
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params);
/**
* @brief Get link state for VF from qed
*
* @param p_hwfn
* @param link - the link state structure to be filled for the VF
*/
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link);
/**
* @brief Get link capabilities for VF from qed
*
* @param p_hwfn
* @param p_link_caps - the link capabilities structure to be filled for the VF
*/
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps);
/**
* @brief Get number of Rx queues allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
* @brief Get port mac address for VF
*
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated VLAN filters
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
* @brief Check if VF can set a MAC address
*
* @param p_hwfn
* @param mac
*
* @return bool
*/
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/**
* @brief Set firmware version information in dev_info from VFs acquire response tlv
*
* @param p_hwfn
* @param fw_major
* @param fw_minor
* @param fw_rev
* @param fw_eng
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
* @brief hw preparation for VF
* sends ACQUIRE message
*
* @param p_hwfn
*
* @return int
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
* @brief VF - start the RX Queue by sending a message to the PF
* @param p_hwfn
* @param cid - zero based within the VF
* @param rx_queue_id - zero based within the VF
* @param sb - VF status block for this queue
* @param sb_index - Index within the status block
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
* @param cqe_pbl_size - pbl size
* @param pp_prod - pointer to the producer to be
* used in fastpath
*
* @return int
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod);
/**
* @brief VF - start the TX queue by sending a message to the
* PF.
*
* @param p_hwfn
* @param tx_queue_id - zero based within the VF
* @param sb - status block for this queue
* @param sb_index - index within the status block
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return int
*/
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
* @param rx_qid
* @param cqe_completion
*
* @return int
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
* @param tx_qid
*
* @return int
*/
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
/**
* @brief VF - send a vport update command
*
* @param p_hwfn
* @param params
*
* @return int
*/
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params);
/**
*
* @brief VF - send a close message to PF
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
/**
* @brief VF - free vf`s memories
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
/**
* @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
* @param p_hwfn
* @param sb_id
*
* @return INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief qed_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
* @param vport_id
* @param mtu
* @param inner_vlan_removal
* @param tpa_mode
* @param max_buffers_per_cqe,
* @param only_untagged - default behavior regarding vlan acceptance
*
* @return enum _qed_status
*/
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum qed_tpa_mode tpa_mode,
u8 max_buffers_per_cqe, u8 only_untagged);
/**
* @brief qed_vf_pf_vport_stop - stop the VF's vport
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_filter_ucast *p_param);
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd);
/**
* @brief qed_vf_pf_int_cleanup - clean the SB of the VF
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
* @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
{
}
static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link)
{
}
static inline void
qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps)
{
}
static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
}
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters)
{
}
static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
{
return false;
}
static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
{
}
static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_adr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
return -EINVAL;
}
static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
return -EINVAL;
}
static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion)
{
return -EINVAL;
}
static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
{
return -EINVAL;
}
static inline int
qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params)
{
return -EINVAL;
}
static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
return 0;
}
static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum qed_tpa_mode tpa_mode,
u8 max_buffers_per_cqe,
u8 only_untagged)
{
return -EINVAL;
}
static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_filter_ucast *p_param)
{
return -EINVAL;
}
static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
}
static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params
*p_params,
struct qed_bulletin_content
*p_bulletin)
{
}
static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content
*p_bulletin)
{
}
static inline void
__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
struct qed_bulletin_content *p_bulletin)
{
}
static inline void qed_iov_vf_task(struct work_struct *work)
{
}
#endif
#endif
...@@ -112,6 +112,10 @@ struct qede_dev { ...@@ -112,6 +112,10 @@ struct qede_dev {
u32 dp_module; u32 dp_module;
u8 dp_level; u8 dp_level;
u32 flags;
#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
const struct qed_eth_ops *ops; const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
......
...@@ -151,6 +151,8 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -151,6 +151,8 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
int i, j, k; int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
strcpy(buf + j * ETH_GSTRING_LEN, strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string); qede_stats_arr[i].string);
j++; j++;
...@@ -194,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -194,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock); mutex_lock(&edev->qede_lock);
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx); buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
}
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0; buf[cnt] = 0;
...@@ -214,6 +219,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) ...@@ -214,6 +219,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
if (IS_VF(edev)) {
int i;
for (i = 0; i < QEDE_NUM_STATS; i++)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
return num_stats + QEDE_NUM_RQSTATS; return num_stats + QEDE_NUM_RQSTATS;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN; return QEDE_PRI_FLAG_LEN;
...@@ -1142,7 +1154,34 @@ static const struct ethtool_ops qede_ethtool_ops = { ...@@ -1142,7 +1154,34 @@ static const struct ethtool_ops qede_ethtool_ops = {
.self_test = qede_self_test, .self_test = qede_self_test,
}; };
static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_settings = qede_get_settings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.get_link = qede_get_link,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_strings = qede_get_strings,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
};
void qede_set_ethtool_ops(struct net_device *dev) void qede_set_ethtool_ops(struct net_device *dev)
{ {
struct qede_dev *edev = netdev_priv(dev);
if (IS_VF(edev))
dev->ethtool_ops = &qede_vf_ethtool_ops;
else
dev->ethtool_ops = &qede_ethtool_ops; dev->ethtool_ops = &qede_ethtool_ops;
} }
...@@ -63,6 +63,7 @@ static const struct qed_eth_ops *qed_ops; ...@@ -63,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656 #define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E #ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
...@@ -71,15 +72,22 @@ static const struct qed_eth_ops *qed_ops; ...@@ -71,15 +72,22 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif #endif
enum qede_pci_private {
QEDE_PRIVATE_PF,
QEDE_PRIVATE_VF
};
static const struct pci_device_id qede_pci_tbl[] = { static const struct pci_device_id qede_pci_tbl[] = {
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
{ 0 } { 0 }
}; };
...@@ -94,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, ...@@ -94,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq); struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link); static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
{
struct qede_dev *edev = netdev_priv(ndev);
if (vlan > 4095) {
DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
return -EINVAL;
}
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
}
static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
{
struct qede_dev *edev = netdev_priv(ndev);
DP_VERBOSE(edev, QED_MSG_IOV,
"Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
if (!is_valid_ether_addr(mac)) {
DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
return -EINVAL;
}
return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
}
static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
{
struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
struct qed_dev_info *qed_info = &edev->dev_info.common;
int rc;
DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
/* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
struct qed_update_vport_params params;
memset(&params, 0, sizeof(params));
params.vport_id = 0;
params.update_tx_switching_flg = 1;
params.tx_switching_flg = num_vfs_param ? 1 : 0;
edev->ops->vport_update(edev->cdev, &params);
}
return rc;
}
#endif
static struct pci_driver qede_pci_driver = { static struct pci_driver qede_pci_driver = {
.name = "qede", .name = "qede",
.id_table = qede_pci_tbl, .id_table = qede_pci_tbl,
.probe = qede_probe, .probe = qede_probe,
.remove = qede_remove, .remove = qede_remove,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
}; };
static void qede_force_mac(void *dev, u8 *mac)
{
struct qede_dev *edev = dev;
ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac);
}
static struct qed_eth_cb_ops qede_ll_ops = { static struct qed_eth_cb_ops qede_ll_ops = {
{ {
.link_update = qede_link_update, .link_update = qede_link_update,
}, },
.force_mac = qede_force_mac,
}; };
static int qede_netdev_event(struct notifier_block *this, unsigned long event, static int qede_netdev_event(struct notifier_block *this, unsigned long event,
...@@ -1730,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64( ...@@ -1730,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats; return stats;
} }
#ifdef CONFIG_QED_SRIOV
static int qede_get_vf_config(struct net_device *dev, int vfidx,
struct ifla_vf_info *ivi)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
}
static int qede_set_vf_rate(struct net_device *dev, int vfidx,
int min_tx_rate, int max_tx_rate)
{
struct qede_dev *edev = netdev_priv(dev);
return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
max_tx_rate);
}
static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
}
static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
int link_state)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
}
#endif
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{ {
struct qed_update_vport_params params; struct qed_update_vport_params params;
...@@ -2049,9 +2170,19 @@ static const struct net_device_ops qede_netdev_ops = { ...@@ -2049,9 +2170,19 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr, .ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu, .ndo_change_mtu = qede_change_mtu,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64, .ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,
.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
#ifdef CONFIG_QEDE_VXLAN #ifdef CONFIG_QEDE_VXLAN
.ndo_add_vxlan_port = qede_add_vxlan_port, .ndo_add_vxlan_port = qede_add_vxlan_port,
.ndo_del_vxlan_port = qede_del_vxlan_port, .ndo_del_vxlan_port = qede_del_vxlan_port,
...@@ -2283,8 +2414,9 @@ enum qede_probe_mode { ...@@ -2283,8 +2414,9 @@ enum qede_probe_mode {
}; };
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
enum qede_probe_mode mode) bool is_vf, enum qede_probe_mode mode)
{ {
struct qed_probe_params probe_params;
struct qed_slowpath_params params; struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
struct qede_dev *edev; struct qede_dev *edev;
...@@ -2294,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -2294,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO)) if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n"); pr_notice("Starting qede probe\n");
cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, memset(&probe_params, 0, sizeof(probe_params));
dp_module, dp_level); probe_params.protocol = QED_PROTOCOL_ETH;
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) { if (!cdev) {
rc = -ENODEV; rc = -ENODEV;
goto err0; goto err0;
...@@ -2329,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -2329,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2; goto err2;
} }
if (is_vf)
edev->flags |= QEDE_FLAG_IS_VF;
qede_init_ndev(edev); qede_init_ndev(edev);
rc = register_netdev(edev->ndev); rc = register_netdev(edev->ndev);
...@@ -2360,12 +2499,24 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -2360,12 +2499,24 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
bool is_vf = false;
u32 dp_module = 0; u32 dp_module = 0;
u8 dp_level = 0; u8 dp_level = 0;
switch ((enum qede_pci_private)id->driver_data) {
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
is_vf = true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a PF\n");
}
qede_config_debug(debug, &dp_module, &dp_level); qede_config_debug(debug, &dp_module, &dp_level);
return __qede_probe(pdev, dp_module, dp_level, return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL); QEDE_PROBE_NORMAL);
} }
...@@ -3062,6 +3213,7 @@ static int qede_start_queues(struct qede_dev *edev) ...@@ -3062,6 +3213,7 @@ static int qede_start_queues(struct qede_dev *edev)
struct qed_dev *cdev = edev->cdev; struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_params vport_update_params; struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params; struct qed_queue_start_common_params q_params;
struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0}; struct qed_start_vport_params start = {0};
bool reset_rss_indir = false; bool reset_rss_indir = false;
...@@ -3155,6 +3307,12 @@ static int qede_start_queues(struct qede_dev *edev) ...@@ -3155,6 +3307,12 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1; vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1; vport_update_params.vport_active_flg = 1;
if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) {
vport_update_params.update_tx_switching_flg = 1;
vport_update_params.tx_switching_flg = 1;
}
/* Fill struct with RSS params */ /* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) { if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1; vport_update_params.update_rss_flg = 1;
...@@ -3451,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p) ...@@ -3451,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT; return -EFAULT;
} }
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
DP_NOTICE(edev, "qed prevents setting MAC\n");
return -EINVAL;
}
ether_addr_copy(ndev->dev_addr, addr->sa_data); ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) { if (!netif_running(ndev)) {
......
...@@ -285,6 +285,63 @@ ...@@ -285,6 +285,63 @@
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
PXP_VF_BAR0_IGU_LENGTH - 1)
#define PXP_VF_BAR0_START_DQ 0x3000
#define PXP_VF_BAR0_DQ_LENGTH 0x200
#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ 4)
#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_LENGTH - 1)
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
/* ILT Records */ /* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000 #define PXP_NUM_ILT_RECORDS_K2 11000
...@@ -327,9 +384,14 @@ struct regpair { ...@@ -327,9 +384,14 @@ struct regpair {
__le32 hi; __le32 hi;
}; };
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
/* Event Data Union */ /* Event Data Union */
union event_ring_data { union event_ring_data {
u8 bytes[8]; u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct async_data async_info; struct async_data async_info;
}; };
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/qed/eth_common.h> #include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
struct qed_dev_eth_info { struct qed_dev_eth_info {
struct qed_dev_info common; struct qed_dev_info common;
...@@ -34,6 +35,8 @@ struct qed_update_vport_params { ...@@ -34,6 +35,8 @@ struct qed_update_vport_params {
u8 vport_id; u8 vport_id;
u8 update_vport_active_flg; u8 update_vport_active_flg;
u8 vport_active_flg; u8 vport_active_flg;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_accept_any_vlan_flg; u8 update_accept_any_vlan_flg;
u8 accept_any_vlan; u8 accept_any_vlan;
u8 update_rss_flg; u8 update_rss_flg;
...@@ -121,10 +124,14 @@ struct qed_tunn_params { ...@@ -121,10 +124,14 @@ struct qed_tunn_params {
struct qed_eth_cb_ops { struct qed_eth_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac);
}; };
struct qed_eth_ops { struct qed_eth_ops {
const struct qed_common_ops *common; const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
const struct qed_iov_hv_ops *iov;
#endif
int (*fill_dev_info)(struct qed_dev *cdev, int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info); struct qed_dev_eth_info *info);
...@@ -133,6 +140,8 @@ struct qed_eth_ops { ...@@ -133,6 +140,8 @@ struct qed_eth_ops {
struct qed_eth_cb_ops *ops, struct qed_eth_cb_ops *ops,
void *cookie); void *cookie);
bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
int (*vport_start)(struct qed_dev *cdev, int (*vport_start)(struct qed_dev *cdev,
struct qed_start_vport_params *params); struct qed_start_vport_params *params);
......
...@@ -93,6 +93,7 @@ struct qed_dev_info { ...@@ -93,6 +93,7 @@ struct qed_dev_info {
u32 flash_size; u32 flash_size;
u8 mf_mode; u8 mf_mode;
bool tx_switching;
}; };
enum qed_sb_type { enum qed_sb_type {
...@@ -140,6 +141,13 @@ struct qed_link_output { ...@@ -140,6 +141,13 @@ struct qed_link_output {
u32 pause_config; u32 pause_config;
}; };
struct qed_probe_params {
enum qed_protocol protocol;
u32 dp_module;
u8 dp_level;
bool is_vf;
};
#define QED_DRV_VER_STR_SIZE 12 #define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params { struct qed_slowpath_params {
u32 int_mode; u32 int_mode;
...@@ -207,8 +215,7 @@ struct qed_common_ops { ...@@ -207,8 +215,7 @@ struct qed_common_ops {
struct qed_selftest_ops *selftest; struct qed_selftest_ops *selftest;
struct qed_dev* (*probe)(struct pci_dev *dev, struct qed_dev* (*probe)(struct pci_dev *dev,
enum qed_protocol protocol, struct qed_probe_params *params);
u32 dp_module, u8 dp_level);
void (*remove)(struct qed_dev *cdev); void (*remove)(struct qed_dev *cdev);
......
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_IOV_IF_H
#define _QED_IOV_IF_H
#include <linux/qed/qed_if.h>
/* Structs used by PF to control and manipulate child VFs */
struct qed_iov_hv_ops {
int (*configure)(struct qed_dev *cdev, int num_vfs_param);
int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
int (*get_config) (struct qed_dev *cdev, int vf_id,
struct ifla_vf_info *ivi);
int (*set_link_state) (struct qed_dev *cdev, int vf_id,
int link_state);
int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
int (*set_rate) (struct qed_dev *cdev, int vfid,
u32 min_rate, u32 max_rate);
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment