Commit 48899291 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-sriov'

Yuval Mintz says:

====================
qed*: Add SR-IOV support

This patch adds SR-IOV support to qed/qede drivers, adding a new PCI
device ID for a VF that is shared between all the various PFs that
support IOV.

This is quite a massive series - the first 7 parts of the series add
the infrastructure of supporting vfs in qed - mainly adding support in a
HW-based vf<->pf channel, as well as diverging all existing configuration
flows based on the pf/vf decision. I.e., while PF-originated requests
head directly to HW/FW, the VF requests first have to traverse to the PF
which will perform the configuration.

The 8th patch is the one that adds the support for the VF device in qede.

The remaining 6 patches each adds some user-based API support related to
VFs that can be used over the PF - forcing mac/vlan, changing speed, etc.

Dave,

Sorry in advance for the length of the series. Most of the bulk here is in
the infrastructure patches that have to go together [or at least, it makes
little sense to try splitting them up].

Please consider applying this to `net-next'.

Thanks,
Yuval

Changes from previous revision:
------------------------------
 - V2 - Replace aligned_u64 with regular u64; This was possible as the
        shared structures [between PF and VF] were already sufficiently
        padded as-is in the API, making this redundant.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 631ad4a3 831bfb0e
...@@ -98,6 +98,16 @@ config QED ...@@ -98,6 +98,16 @@ config QED
---help--- ---help---
This enables the support for ... This enables the support for ...
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
default y
---help---
This configuration parameter enables Single Root Input Output
Virtualization support for QED devices.
This allows for virtual function acceleration in virtualized
environments.
config QEDE config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC" tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED depends on QED
......
...@@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o ...@@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o qed_selftest.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
...@@ -152,6 +152,7 @@ enum QED_RESOURCES { ...@@ -152,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE { enum QED_FEATURE {
QED_PF_L2_QUE, QED_PF_L2_QUE,
QED_VF,
QED_MAX_FEATURES, QED_MAX_FEATURES,
}; };
...@@ -310,6 +311,8 @@ struct qed_hwfn { ...@@ -310,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine; bool first_on_engine;
bool hw_init_done; bool hw_init_done;
u8 num_funcs_on_engine;
/* BAR access */ /* BAR access */
void __iomem *regview; void __iomem *regview;
void __iomem *doorbells; void __iomem *doorbells;
...@@ -360,6 +363,8 @@ struct qed_hwfn { ...@@ -360,6 +363,8 @@ struct qed_hwfn {
/* True if the driver requests for the link */ /* True if the driver requests for the link */
bool b_drv_link_init; bool b_drv_link_init;
struct qed_vf_iov *vf_iov_info;
struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info; struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids; struct qed_hw_cid_data *p_tx_cids;
...@@ -376,6 +381,12 @@ struct qed_hwfn { ...@@ -376,6 +381,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64]; struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
struct workqueue_struct *iov_wq;
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
struct z_stream_s *stream; struct z_stream_s *stream;
}; };
...@@ -484,7 +495,13 @@ struct qed_dev { ...@@ -484,7 +495,13 @@ struct qed_dev {
u8 num_hwfns; u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
unsigned long tunn_mode; unsigned long tunn_mode;
bool b_is_vf;
u32 drv_type; u32 drv_type;
struct qed_eth_stats *reset_stats; struct qed_eth_stats *reset_stats;
...@@ -514,6 +531,8 @@ struct qed_dev { ...@@ -514,6 +531,8 @@ struct qed_dev {
const struct firmware *firmware; const struct firmware *firmware;
}; };
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
...@@ -535,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -535,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8 #define PURE_LB_TC 8
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */ /* Other Linux specific common definitions */
......
This diff is collapsed.
...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type { ...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK QED_ELEM_TASK
}; };
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/** /**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
* *
......
This diff is collapsed.
...@@ -182,11 +182,15 @@ enum qed_dmae_address_type_t { ...@@ -182,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address * used mostly to write a zeroed buffer to destination address
* using DMA * using DMA
*/ */
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 #define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 #define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params { struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */ u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
}; };
/** /**
...@@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords, u32 size_in_dwords,
u32 flags); u32 flags);
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params);
/** /**
* @brief qed_chain_alloc - Allocate and initialize a chain * @brief qed_chain_alloc - Allocate and initialize a chain
* *
...@@ -282,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, ...@@ -282,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative. * @param id - For PF, engine-relative. For VF, PF-relative.
* @param is_vf - true iff cleanup is made for a VF.
* *
* @return int * @return int
*/ */
int qed_final_cleanup(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt, u16 id, bool is_vf);
u16 id);
#endif #endif
...@@ -29,9 +29,9 @@ struct qed_ptt; ...@@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode { enum common_event_opcode {
COMMON_EVENT_PF_START, COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP, COMMON_EVENT_PF_STOP,
COMMON_EVENT_RESERVED, COMMON_EVENT_VF_START,
COMMON_EVENT_RESERVED2, COMMON_EVENT_VF_STOP,
COMMON_EVENT_RESERVED3, COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5, COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6, COMMON_EVENT_RESERVED6,
...@@ -44,8 +44,8 @@ enum common_ramrod_cmd_id { ...@@ -44,8 +44,8 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED, COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED, COMMON_RAMROD_VF_START,
COMMON_RAMROD_RESERVED2, COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY, COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID MAX_COMMON_RAMROD_CMD_ID
...@@ -573,6 +573,14 @@ union event_ring_element { ...@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr; struct event_ring_next_addr next_addr;
}; };
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
};
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
};
enum personality_type { enum personality_type {
BAD_PERSONALITY_TYP, BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED, PERSONALITY_RESERVED,
...@@ -671,6 +679,16 @@ enum ports_mode { ...@@ -671,6 +679,16 @@ enum ports_mode {
MAX_PORTS_MODE MAX_PORTS_MODE
}; };
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
};
/* Ramrod Header of SPQE */ /* Ramrod Header of SPQE */
struct ramrod_header { struct ramrod_header {
__le32 cid /* Slowpath Connection CID */; __le32 cid /* Slowpath Connection CID */;
...@@ -700,6 +718,36 @@ struct tstorm_per_port_stat { ...@@ -700,6 +718,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt; struct regpair preroce_irregular_pkt;
}; };
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
u8 reserved[3];
};
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
__le16 reserved1;
__le32 reserved2;
};
struct atten_status_block { struct atten_status_block {
__le32 atten_bits; __le32 atten_bits;
__le32 atten_ack; __le32 atten_ack;
...@@ -1026,7 +1074,7 @@ enum init_phases { ...@@ -1026,7 +1074,7 @@ enum init_phases {
PHASE_ENGINE, PHASE_ENGINE,
PHASE_PORT, PHASE_PORT,
PHASE_PF, PHASE_PF,
PHASE_RESERVED, PHASE_VF,
PHASE_QM_PF, PHASE_QM_PF,
MAX_INIT_PHASES MAX_INIT_PHASES
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000 #define QED_BAR_ACQUIRE_TIMEOUT 1000
...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, ...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done, quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); if (IS_PF(p_hwfn->cdev)) {
hw_offset = qed_ptt_get_bar_addr(p_ptt); qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt);
} else {
hw_offset = hw_addr + done;
}
dw_count = quota / 4; dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done); host_addr = (u32 *)((u8 *)addr + done);
...@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, ...@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend); *(u32 *)&p_ptt->pxp.pretend);
} }
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
return concrete_fid;
}
/* DMAE */ /* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc, const u8 is_src_type_grc,
const u8 is_dst_type_grc, const u8 is_dst_type_grc,
struct qed_dmae_params *p_params) struct qed_dmae_params *p_params)
{ {
u16 opcode_b = 0;
u32 opcode = 0; u32 opcode = 0;
u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC. /* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe * 0- The source is the PCIe
...@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, ...@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT); DMAE_CMD_DST_ADDR_RESET_SHIFT);
opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << /* SRC/DST VFID: all 1's - pf, otherwise VF id */
DMAE_CMD_SRC_VF_ID_SHIFT); if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
}
opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
DMAE_CMD_DST_VF_ID_SHIFT); opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
} }
u32 qed_dmae_idx_to_go_cmd(u8 idx) u32 qed_dmae_idx_to_go_cmd(u8 idx)
...@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
mutex_lock(&(p_hwfn->dmae_info.mutex));
rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
QED_DMAE_ADDRESS_HOST_PHYS,
QED_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
mutex_unlock(&(p_hwfn->dmae_info.mutex));
return rc;
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto, enum protocol_type proto,
union qed_qm_pq_params *p_params) union qed_qm_pq_params *p_params)
...@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, ...@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break; break;
case PROTOCOLID_ETH: case PROTOCOLID_ETH:
pq_id = p_params->eth.tc; pq_id = p_params->eth.tc;
if (p_params->eth.is_vf)
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break; break;
default: default:
pq_id = 0; pq_id = 0;
......
...@@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, ...@@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
void qed_port_unpretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/** /**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it. * this is declared here since other files will require it.
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_init_ops.h" #include "qed_init_ops.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500 #define QED_INIT_POLL_PERIOD_US 500
...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) ...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_rt_data *rt_data = &p_hwfn->rt_data; struct qed_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->cdev))
return 0;
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!rt_data->b_valid) if (!rt_data->b_valid)
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
struct qed_pi_info { struct qed_pi_info {
qed_int_comp_cb_t comp_cb; qed_int_comp_cb_t comp_cb;
...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, ...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset; u32 sb_offset;
u32 pi_offset; u32 pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB; sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
...@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, ...@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0; sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, if (IS_PF(p_hwfn->cdev))
sb_info->igu_sb_id, 0, 0); qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
} }
/** /**
...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, ...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */ /* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID) if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, ...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be /* The igu address will hold the absolute address that needs to be
* written to for a specific status block * written to for a specific status block
*/ */
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + if (IS_PF(p_hwfn->cdev)) {
GTT_BAR0_MAP_REG_IGU_CMD + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
(sb_info->igu_sb_id << 3); GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE +
sb_info->igu_sb_id) << 3);
}
sb_info->flags |= QED_SB_INFO_INIT; sb_info->flags |= QED_SB_INFO_INIT;
...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, ...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{ {
p_hwfn->b_int_enabled = 0; p_hwfn->b_int_enabled = 0;
if (IS_VF(p_hwfn->cdev))
return;
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
} }
...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
struct qed_igu_info *p_igu_info; struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk; struct qed_igu_block *blk;
u32 val;
u16 sb_id;
u16 prev_sb_id = 0xFF; u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs */ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff; p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) { sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id]; blk = &p_igu_info->igu_map.igu_blocks[sb_id];
...@@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++; (p_igu_info->igu_sb_cnt)++;
} }
} }
} else {
if ((blk->function_id >= min_vf) &&
(blk->function_id < max_vf)) {
/* Available for VFs of this PF */
if (p_igu_info->igu_base_sb_iov == 0xffff) {
p_igu_info->igu_base_sb_iov = sb_id;
} else if (last_iov_sb_id != sb_id - 1) {
if (!val) {
DP_VERBOSE(p_hwfn->cdev,
NETIF_MSG_INTR,
"First uninitialized IGU CAM entry at index 0x%04x\n",
sb_id);
} else {
DP_NOTICE(p_hwfn->cdev,
"Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
p_hwfn->rel_pf_id,
last_iov_sb_id,
sb_id); }
break;
}
blk->status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
last_iov_sb_id = sb_id;
}
} }
} }
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", DP_VERBOSE(
p_igu_info->igu_base_sb, p_hwfn,
p_igu_info->igu_sb_cnt, NETIF_MSG_INTR,
p_igu_info->igu_dsb_id); "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_base_sb_iov,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_sb_cnt_iov,
p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff || if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_dsb_id == 0xffff ||
...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks; p_sb_cnt_info->sb_free_blk = info->free_blks;
} }
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
}
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev) void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{ {
int i; int i;
......
...@@ -20,6 +20,12 @@ ...@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands /* Igu control commands
*/ */
...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn); ...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn, void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** /**
* @brief - Enable Interrupt & Attention for hw function * @brief - Enable Interrupt & Attention for hw function
* *
......
This diff is collapsed.
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_L2_H
#define _QED_L2_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_eth_if.h>
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
struct qed_sge_tpa_params {
u8 max_buffers_per_cqe;
u8 update_tpa_en_flg;
u8 tpa_ipv4_en_flg;
u8 tpa_ipv6_en_flg;
u8 tpa_ipv4_tunn_en_flg;
u8 tpa_ipv6_tunn_en_flg;
u8 update_tpa_param_flg;
u8 tpa_pkt_split_flg;
u8 tpa_hdr_data_split_flg;
u8 tpa_gro_consistent_flg;
u8 tpa_max_aggs_num;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
};
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only, bool cqe_completion);
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool tx_switching;
bool only_untagged;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_inner_vlan_removal_flg;
u8 inner_vlan_removal_flg;
u8 silent_vlan_removal_flg;
u8 update_default_vlan_enable_flg;
u8 default_vlan_enable_flg;
u8 update_default_vlan_flg;
u16 default_vlan;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_approx_mcast_flg;
u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_vport_stop -
*
* This ramrod closes a VPort after all its RX and TX queues are terminated.
* An Assert is generated if any queues are left open.
*
* @param p_hwfn
* @param opaque_fid
* @param vport_id VPort ID
*
* @return int
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_rx_eth_queues_update -
*
* This ramrod updates an RX queue. It is used for setting the active state
* of the queue and updating the TPA and SGE parameters.
*
* @note At the moment - only used by non-linux VFs.
*
* @param p_hwfn
* @param rx_queue_id RX Queue ID
* @param num_rxqs Allow to update multiple rx
* queues, from rx_queue_id to
* (rx_queue_id + num_rxqs)
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
*
* @return int
*/
int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params);
u8 qed_mcast_bin_from_mac(u8 *mac);
#endif /* _QED_L2_H */
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include "qed.h" #include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_dev_api.h" #include "qed_dev_api.h"
#include "qed_mcp.h" #include "qed_mcp.h"
...@@ -125,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -125,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1; goto err1;
} }
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n"); DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO; rc = -EIO;
goto err1; goto err1;
...@@ -175,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -175,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2; goto err2;
} }
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); if (IS_PF(cdev)) {
cdev->db_size = pci_resource_len(cdev->pdev, 2); cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); cdev->db_size = pci_resource_len(cdev->pdev, 2);
if (!cdev->doorbells) { cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
DP_NOTICE(cdev, "Cannot map doorbell space\n"); if (!cdev->doorbells) {
return -ENOMEM; DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
}
} }
return 0; return 0;
...@@ -207,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -207,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION; if (IS_PF(cdev)) {
dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->mf_mode = cdev->mf_mode; dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
&dev_info->fw_eng);
}
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); if (IS_PF(cdev)) {
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
if (ptt) { &dev_info->flash_size);
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
}
} else {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
&dev_info->mfw_rev, NULL);
} }
return 0; return 0;
...@@ -257,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev, ...@@ -257,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */ /* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev, static struct qed_dev *qed_probe(struct pci_dev *pdev,
enum qed_protocol protocol, struct qed_probe_params *params)
u32 dp_module,
u8 dp_level)
{ {
struct qed_dev *cdev; struct qed_dev *cdev;
int rc; int rc;
...@@ -268,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, ...@@ -268,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev) if (!cdev)
goto err0; goto err0;
cdev->protocol = protocol; cdev->protocol = params->protocol;
if (params->is_vf)
cdev->b_is_vf = true;
qed_init_dp(cdev, dp_module, dp_level); qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev); rc = qed_init_pci(cdev, pdev);
if (rc) { if (rc) {
...@@ -664,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -664,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0; return 0;
} }
static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
{
int rc;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
&cdev->int_params.in.num_vectors);
if (cdev->num_hwfns > 1) {
u8 vectors = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
cdev->int_params.in.num_vectors += vectors;
}
/* We want a minimum of one fastpath vector per vf hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
rc = qed_set_int_mode(cdev, true);
if (rc)
return rc;
cdev->int_params.fp_msix_base = 0;
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
return 0;
}
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf) u8 *input_buf, u32 max_size, u8 *unzip_buf)
{ {
...@@ -749,34 +795,43 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -749,34 +795,43 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
const u8 *data = NULL; const u8 *data = NULL;
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;
int rc; int rc = -EINVAL;
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, if (qed_iov_wq_start(cdev))
&cdev->pdev->dev);
if (rc) {
DP_NOTICE(cdev,
"Failed to find fw file - /lib/firmware/%s\n",
QED_FW_FILE_NAME);
goto err; goto err;
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
if (rc) {
DP_NOTICE(cdev,
"Failed to find fw file - /lib/firmware/%s\n",
QED_FW_FILE_NAME);
goto err;
}
} }
rc = qed_nic_setup(cdev); rc = qed_nic_setup(cdev);
if (rc) if (rc)
goto err; goto err;
rc = qed_slowpath_setup_int(cdev, params->int_mode); if (IS_PF(cdev))
rc = qed_slowpath_setup_int(cdev, params->int_mode);
else
rc = qed_slowpath_vf_setup_int(cdev);
if (rc) if (rc)
goto err1; goto err1;
/* Allocate stream for unzipping */ if (IS_PF(cdev)) {
rc = qed_alloc_stream_mem(cdev); /* Allocate stream for unzipping */
if (rc) { rc = qed_alloc_stream_mem(cdev);
DP_NOTICE(cdev, "Failed to allocate stream memory\n"); if (rc) {
goto err2; DP_NOTICE(cdev, "Failed to allocate stream memory\n");
} goto err2;
}
/* Start the slowpath */ data = cdev->firmware->data;
data = cdev->firmware->data; }
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
...@@ -789,6 +844,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -789,6 +844,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true, rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode, cdev->int_params.out.int_mode,
true, data); true, data);
...@@ -798,18 +854,20 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -798,18 +854,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev, DP_INFO(cdev,
"HW initialization and function start completed successfully\n"); "HW initialization and function start completed successfully\n");
hwfn = QED_LEADING_HWFN(cdev); if (IS_PF(cdev)) {
drv_version.version = (params->drv_major << 24) | hwfn = QED_LEADING_HWFN(cdev);
(params->drv_minor << 16) | drv_version.version = (params->drv_major << 24) |
(params->drv_rev << 8) | (params->drv_minor << 16) |
(params->drv_eng); (params->drv_rev << 8) |
strlcpy(drv_version.name, params->name, (params->drv_eng);
MCP_DRV_VER_STR_SIZE - 4); strlcpy(drv_version.name, params->name,
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, MCP_DRV_VER_STR_SIZE - 4);
&drv_version); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
if (rc) { &drv_version);
DP_NOTICE(cdev, "Failed sending drv version command\n"); if (rc) {
return rc; DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc;
}
} }
qed_reset_vport_stats(cdev); qed_reset_vport_stats(cdev);
...@@ -818,13 +876,17 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -818,13 +876,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2: err2:
qed_hw_timers_stop_all(cdev); qed_hw_timers_stop_all(cdev);
qed_slowpath_irq_free(cdev); if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev); qed_free_stream_mem(cdev);
qed_disable_msix(cdev); qed_disable_msix(cdev);
err1: err1:
qed_resc_free(cdev); qed_resc_free(cdev);
err: err:
release_firmware(cdev->firmware); if (IS_PF(cdev))
release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, false);
return rc; return rc;
} }
...@@ -834,15 +896,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -834,15 +896,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
qed_free_stream_mem(cdev); if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
qed_sriov_disable(cdev, true);
qed_nic_stop(cdev); qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev); qed_slowpath_irq_free(cdev);
}
qed_disable_msix(cdev); qed_disable_msix(cdev);
qed_nic_reset(cdev); qed_nic_reset(cdev);
release_firmware(cdev->firmware); qed_iov_wq_stop(cdev, true);
if (IS_PF(cdev))
release_firmware(cdev->firmware);
return 0; return 0;
} }
...@@ -932,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev, ...@@ -932,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (IS_VF(cdev))
return 0;
/* The link should be set only once per PF */ /* The link should be set only once per PF */
hwfn = &cdev->hwfns[0]; hwfn = &cdev->hwfns[0];
...@@ -1043,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1043,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link)); memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */ /* Prepare source inputs */
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params)); if (IS_PF(hwfn->cdev)) {
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
sizeof(link_caps)); memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
} else {
qed_vf_get_link_params(hwfn, &params);
qed_vf_get_link_state(hwfn, &link);
qed_vf_get_link_caps(hwfn, &link_caps);
}
/* Set the link parameters to pass to protocol driver */ /* Set the link parameters to pass to protocol driver */
if (link.link_up) if (link.link_up)
...@@ -1148,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1148,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev, static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link) struct qed_link_output *if_link)
{ {
int i;
qed_fill_link(&cdev->hwfns[0], if_link); qed_fill_link(&cdev->hwfns[0], if_link);
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
} }
void qed_link_update(struct qed_hwfn *hwfn) void qed_link_update(struct qed_hwfn *hwfn)
...@@ -1158,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn) ...@@ -1158,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link; struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link); qed_fill_link(hwfn, &if_link);
qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie) if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link); op->link_update(cookie, &if_link);
...@@ -1169,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev) ...@@ -1169,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt; struct qed_ptt *ptt;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i]; hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn); ptt = qed_ptt_acquire(hwfn);
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10 #define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
...@@ -440,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -440,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PATH);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
QED_PATH_ID(p_hwfn));
u32 disabled_vfs[VF_MAX_STATIC / 32];
int i;
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Reading Disabled VF information from [offset %08x], path_addr %08x\n",
mfw_path_offsize, path_addr);
for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
path_addr +
offsetof(struct public_path,
mcp_vf_disabled) +
sizeof(u32) * i);
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"FLR-ed VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
}
if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
}
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc;
int i;
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
mb_params.p_data_src = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
return -EBUSY;
}
/* Clear the ACK bits */
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
qed_wr(p_hwfn, p_ptt,
func_addr +
offsetof(struct public_func, drv_ack_vf_disabled) +
i * sizeof(u32), 0);
return rc;
}
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
...@@ -751,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -751,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE: case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false); qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break; break;
case MFW_DRV_MSG_VF_DISABLED:
qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break; break;
...@@ -787,26 +861,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -787,26 +861,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *p_mfw_ver) struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{ {
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
u32 global_offsize; u32 global_offsize;
p_ptt = qed_ptt_acquire(p_hwfn); if (IS_VF(p_hwfn->cdev)) {
if (!p_ptt) if (p_hwfn->vf_iov_info) {
return -EBUSY; struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return 0;
} else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF requested MFW version prior to ACQUIRE\n");
return -EINVAL;
}
}
global_offsize = qed_rd(p_hwfn, p_ptt, global_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> SECTION_OFFSIZE_ADDR(p_hwfn->
public_base, mcp_info->public_base,
PUBLIC_GLOBAL)); PUBLIC_GLOBAL));
*p_mfw_ver = qed_rd(p_hwfn, p_ptt, *p_mfw_ver =
SECTION_ADDR(global_offsize, 0) + qed_rd(p_hwfn, p_ptt,
offsetof(struct public_global, mfw_ver)); SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
qed_ptt_release(p_hwfn, p_ptt);
if (p_running_bundle_id != NULL) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
running_bundle_id));
}
return 0; return 0;
} }
...@@ -817,6 +907,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, ...@@ -817,6 +907,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
if (IS_VF(cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY; return -EBUSY;
...@@ -951,6 +1044,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -951,6 +1044,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{ {
u32 flash_size; u32 flash_size;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
...@@ -961,6 +1057,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -961,6 +1057,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
/* Only Leader can configure MSIX, and need to take CMT into account */
if (!IS_LEAD_HWFN(p_hwfn))
return 0;
num *= p_hwfn->cdev->num_hwfns;
param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
&resp, &rc_param);
if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
rc = -EINVAL;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
num, vf_id);
}
return rc;
}
int int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, ...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/** /**
* @brief Get the management firmware version value * @brief Get the management firmware version value
* *
* @param cdev - qed dev pointer * @param p_hwfn
* @param mfw_ver - mfw version value * @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
* *
* @return int - 0 - operation was successul. * @return int - 0 - operation was successful.
*/ */
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *mfw_ver); struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/** /**
* @brief Get media type value of the port. * @brief Get media type value of the port.
...@@ -389,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -389,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
*
* @param p_hwfn
* @param p_ptt
* @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
*
* @param return int - 0 upon success.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/** /**
* @brief - calls during init to read shmem of all function-related info. * @brief - calls during init to read shmem of all function-related info.
* *
...@@ -418,6 +433,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, ...@@ -418,6 +433,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
* @return true iff MFW is running and mcp_info is initialized * @return true iff MFW is running and mcp_info is initialized
*/ */
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return int
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
0x2aae04UL 0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL 0x2aa16cUL
#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
0x2aa118UL
#define PSWHST_REG_ZONE_PERMISSION_TABLE \
0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \ #define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL 0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \ #define BAR0_MAP_REG_USDM_RAM \
...@@ -77,6 +81,8 @@ ...@@ -77,6 +81,8 @@
0x2f2eb0UL 0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \ #define DORQ_REG_PF_DB_ENABLE \
0x100508UL 0x100508UL
#define DORQ_REG_VF_USAGE_CNT \
0x1009c4UL
#define QM_REG_PF_EN \ #define QM_REG_PF_EN \
0x2f2ea4UL 0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \ #define TCFC_REG_STRONG_ENABLE_PF \
...@@ -111,6 +117,8 @@ ...@@ -111,6 +117,8 @@
0x009778UL 0x009778UL
#define MISCS_REG_CHIP_METAL \ #define MISCS_REG_CHIP_METAL \
0x009774UL 0x009774UL
#define MISCS_REG_FUNCTION_HIDE \
0x0096f0UL
#define BRB_REG_HEADER_SIZE \ #define BRB_REG_HEADER_SIZE \
0x340804UL 0x340804UL
#define BTB_REG_HEADER_SIZE \ #define BTB_REG_HEADER_SIZE \
...@@ -119,6 +127,8 @@ ...@@ -119,6 +127,8 @@
0x1c0708UL 0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \ #define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL 0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \ #define CDU_REG_CID_ADDR_PARAMS \
0x580900UL 0x580900UL
#define DBG_REG_CLIENT_ENABLE \ #define DBG_REG_CLIENT_ENABLE \
...@@ -161,6 +171,10 @@ ...@@ -161,6 +171,10 @@
0x040200UL 0x040200UL
#define PBF_REG_INIT \ #define PBF_REG_INIT \
0xd80000UL 0xd80000UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
0xd806c8UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \ #define PTU_REG_ATC_INIT_ARRAY \
0x560000UL 0x560000UL
#define PCM_REG_INIT \ #define PCM_REG_INIT \
...@@ -385,6 +399,8 @@ ...@@ -385,6 +399,8 @@
0x1d0000UL 0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \ #define IGU_REG_PF_CONFIGURATION \
0x180800UL 0x180800UL
#define IGU_REG_VF_CONFIGURATION \
0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL 0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \ #define MISC_REG_AEU_AFTER_INVERT_1_IGU \
...@@ -411,6 +427,8 @@ ...@@ -411,6 +427,8 @@
0x1 << 0) 0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \ #define IGU_REG_MAPPING_MEMORY \
0x184000UL 0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
0x180408UL
#define MISCS_REG_GENERIC_POR_0 \ #define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL 0x0096d4UL
#define MCP_REG_NVM_CFG4 \ #define MCP_REG_NVM_CFG4 \
......
...@@ -62,6 +62,9 @@ union ramrod_data { ...@@ -62,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop; struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update; struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update; struct vport_filter_update_ramrod_data vport_filter_update;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
}; };
#define EQ_MAX_CREDIT 0xffffffff #define EQ_MAX_CREDIT 0xffffffff
...@@ -341,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -341,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_tunn * @param p_tunn
* @param mode * @param mode
* @param allow_npar_tx_switch
* *
* @return int * @return int
*/ */
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode); enum qed_mf_mode mode, bool allow_npar_tx_switch);
/** /**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod * @brief qed_sp_pf_stop - PF Function Stop Ramrod
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "qed_int.h" #include "qed_int.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
...@@ -298,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, ...@@ -298,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode) enum qed_mf_mode mode, bool allow_npar_tx_switch)
{ {
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn); u16 sb = qed_int_get_sp_sb_id(p_hwfn);
...@@ -357,6 +358,16 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -357,6 +358,16 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
&p_ramrod->tunnel_config); &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH; p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, sb, sb_index,
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
/*************************************************************************** /***************************************************************************
* Structures & Definitions * Structures & Definitions
...@@ -242,10 +243,17 @@ static int ...@@ -242,10 +243,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
DP_NOTICE(p_hwfn, switch (p_eqe->protocol_id) {
"Unknown Async completion for protocol: %d\n", case PROTOCOLID_COMMON:
p_eqe->protocol_id); return qed_sriov_eqe_event(p_hwfn,
return -EINVAL; p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
default:
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
}
} }
/*************************************************************************** /***************************************************************************
...@@ -379,6 +387,9 @@ static int qed_cqe_completion( ...@@ -379,6 +387,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe, struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol) enum protocol_type protocol)
{ {
if (IS_VF(p_hwfn->cdev))
return 0;
/* @@@tmp - it's possible we'll eventually want to handle some /* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only * actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe * used to complete the ramrod using the echo value on the cqe
......
This diff is collapsed.
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
enum qed_iov_vport_update_flag {
QED_IOV_VP_UPDATE_ACTIVATE,
QED_IOV_VP_UPDATE_VLAN_STRIP,
QED_IOV_VP_UPDATE_TX_SWITCH,
QED_IOV_VP_UPDATE_MCAST,
QED_IOV_VP_UPDATE_ACCEPT_PARAM,
QED_IOV_VP_UPDATE_RSS,
QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
QED_IOV_VP_UPDATE_SGE_TPA,
QED_IOV_VP_UPDATE_MAX,
};
struct qed_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
u8 mac[ETH_ALEN];
/* IFLA_VF_LINK_STATE_<X> */
int link_state;
/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
int tx_rate;
};
/* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/
struct qed_hw_sriov_info {
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total_vfs; /* total VFs associated with the PF */
u16 num_vfs; /* number of vfs that have been started */
u16 initial_vfs; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u16 vf_device_id; /* VF device id */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u32 first_vf_in_pf;
};
/* This mailbox is maintained per VF in its PF contains all information
* required for sending / receiving a message.
*/
struct qed_iov_vf_mbx {
union vfpf_tlvs *req_virt;
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
u8 *offset;
/* saved VF request header */
struct vfpf_first_tlv first_tlv;
};
struct qed_vf_q_info {
u16 fw_rx_qid;
u16 fw_tx_qid;
u8 fw_cid;
u8 rxq_active;
u8 txq_active;
};
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
};
struct qed_vf_vlan_shadow {
bool used;
u16 vid;
};
struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
u8 inner_vlan_removal;
};
/* PFs maintain an array of this structure, per VF */
struct qed_vf_info {
struct qed_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
u8 to_disable;
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
u8 vport_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
u8 vport_instance;
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
bool spoof_chk;
bool req_spoofchk_val;
/* Stores the configuration requested by VF */
struct qed_vf_shadow_config shadow_config;
/* A bitfield using bulletin's valid-map bits, used to indicate
* which of the bulletin board features have been configured.
*/
u64 configured_features;
#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
(1 << VLAN_ADDR_FORCED))
};
/* This structure is part of qed_hwfn and used only for PFs that have sriov
* capability enabled.
*/
struct qed_pf_iov {
struct qed_vf_info vfs_array[MAX_NUM_VFS];
u64 pending_events[QED_VF_ARRAY_LENGTH];
u64 pending_flr[QED_VF_ARRAY_LENGTH];
/* Allocate message address continuosuly and split to each VF */
void *mbx_msg_virt_addr;
dma_addr_t mbx_msg_phys_addr;
u32 mbx_msg_size;
void *mbx_reply_virt_addr;
dma_addr_t mbx_reply_phys_addr;
u32 mbx_reply_size;
void *p_bulletins;
dma_addr_t bulletins_phys;
u32 bulletins_size;
};
enum qed_iov_wq_flag {
QED_IOV_WQ_MSG_FLAG,
QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
* @return MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
* @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
* @brief qed_iov_alloc - allocate sriov related resources
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_iov_free - free sriov related resources
*
* @param p_hwfn
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
* @brief free sriov related memory that was allocated during hw_prepare
*
* @param cdev
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
return MAX_NUM_VFS;
}
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
}
static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
{
}
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
return 0;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
}
static inline int qed_iov_wq_start(struct qed_dev *cdev)
{
return 0;
}
static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag)
{
}
static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
}
static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
{
return 0;
}
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
_i < MAX_NUM_VFS; \
_i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
This diff is collapsed.
This diff is collapsed.
...@@ -112,6 +112,10 @@ struct qede_dev { ...@@ -112,6 +112,10 @@ struct qede_dev {
u32 dp_module; u32 dp_module;
u8 dp_level; u8 dp_level;
u32 flags;
#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
const struct qed_eth_ops *ops; const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
......
This diff is collapsed.
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/qed/eth_common.h> #include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
struct qed_dev_eth_info { struct qed_dev_eth_info {
struct qed_dev_info common; struct qed_dev_info common;
...@@ -34,6 +35,8 @@ struct qed_update_vport_params { ...@@ -34,6 +35,8 @@ struct qed_update_vport_params {
u8 vport_id; u8 vport_id;
u8 update_vport_active_flg; u8 update_vport_active_flg;
u8 vport_active_flg; u8 vport_active_flg;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_accept_any_vlan_flg; u8 update_accept_any_vlan_flg;
u8 accept_any_vlan; u8 accept_any_vlan;
u8 update_rss_flg; u8 update_rss_flg;
...@@ -121,10 +124,14 @@ struct qed_tunn_params { ...@@ -121,10 +124,14 @@ struct qed_tunn_params {
struct qed_eth_cb_ops { struct qed_eth_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac);
}; };
struct qed_eth_ops { struct qed_eth_ops {
const struct qed_common_ops *common; const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
const struct qed_iov_hv_ops *iov;
#endif
int (*fill_dev_info)(struct qed_dev *cdev, int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info); struct qed_dev_eth_info *info);
...@@ -133,6 +140,8 @@ struct qed_eth_ops { ...@@ -133,6 +140,8 @@ struct qed_eth_ops {
struct qed_eth_cb_ops *ops, struct qed_eth_cb_ops *ops,
void *cookie); void *cookie);
bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
int (*vport_start)(struct qed_dev *cdev, int (*vport_start)(struct qed_dev *cdev,
struct qed_start_vport_params *params); struct qed_start_vport_params *params);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment