Commit dacd88d6 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

qed: IOV l2 functionality

This adds sufficient changes to allow VFs l2-configuration flows to work.

While the fastpath of the VF and the PF are meant to be exactly the same,
the configuration of the VF is done by the PF.
This diverges all VF-related configuration flows that originate from a VF,
making them pass through the VF->PF channel and adding sufficient logic
on the PF side to support them.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0b55e27d
...@@ -532,6 +532,7 @@ struct qed_dev { ...@@ -532,6 +532,7 @@ struct qed_dev {
}; };
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
......
...@@ -938,7 +938,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) ...@@ -938,7 +938,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) { for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, NETIF_MSG_IFDOWN,
...@@ -962,6 +967,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) ...@@ -962,6 +967,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{ {
if (IS_VF(p_hwfn->cdev))
return;
/* Re-open incoming traffic */ /* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
......
This diff is collapsed.
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_L2_H
#define _QED_L2_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_eth_if.h>
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only, bool cqe_completion);
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_approx_mcast_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_vport_stop -
*
* This ramrod closes a VPort after all its RX and TX queues are terminated.
* An Assert is generated if any queues are left open.
*
* @param p_hwfn
* @param opaque_fid
* @param vport_id VPort ID
*
* @return int
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params);
u8 qed_mcast_bin_from_mac(u8 *mac);
#endif /* _QED_L2_H */
...@@ -427,6 +427,8 @@ ...@@ -427,6 +427,8 @@
0x1 << 0) 0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \ #define IGU_REG_MAPPING_MEMORY \
0x184000UL 0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
0x180408UL
#define MISCS_REG_GENERIC_POR_0 \ #define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL 0x0096d4UL
#define MCP_REG_NVM_CFG4 \ #define MCP_REG_NVM_CFG4 \
......
This diff is collapsed.
...@@ -24,6 +24,14 @@ ...@@ -24,6 +24,14 @@
#define QED_MAX_VF_CHAINS_PER_PF 16 #define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
enum qed_iov_vport_update_flag {
QED_IOV_VP_UPDATE_ACTIVATE,
QED_IOV_VP_UPDATE_MCAST,
QED_IOV_VP_UPDATE_ACCEPT_PARAM,
QED_IOV_VP_UPDATE_RSS,
QED_IOV_VP_UPDATE_MAX,
};
struct qed_public_vf_info { struct qed_public_vf_info {
/* These copies will later be reflected in the bulletin board, /* These copies will later be reflected in the bulletin board,
* but this copy should be newer. * but this copy should be newer.
...@@ -81,6 +89,7 @@ struct qed_vf_q_info { ...@@ -81,6 +89,7 @@ struct qed_vf_q_info {
enum vf_state { enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */ VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */ VF_ACQUIRED, /* VF, acquired, but not initalized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */ VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */ VF_STOPPED /* VF, Stopped */
}; };
...@@ -97,6 +106,7 @@ struct qed_vf_info { ...@@ -97,6 +106,7 @@ struct qed_vf_info {
u32 concrete_fid; u32 concrete_fid;
u16 opaque_fid; u16 opaque_fid;
u16 mtu;
u8 vport_id; u8 vport_id;
u8 relative_vf_id; u8 relative_vf_id;
...@@ -105,6 +115,7 @@ struct qed_vf_info { ...@@ -105,6 +115,7 @@ struct qed_vf_info {
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id) (p_vf)->abs_vf_id)
u8 vport_instance;
u8 num_rxqs; u8 num_rxqs;
u8 num_txqs; u8 num_txqs;
...@@ -114,6 +125,7 @@ struct qed_vf_info { ...@@ -114,6 +125,7 @@ struct qed_vf_info {
u8 num_vlan_filters; u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info; struct qed_public_vf_info p_vf_info;
}; };
...@@ -238,6 +250,18 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, ...@@ -238,6 +250,18 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
*/ */
int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev); int qed_iov_wq_start(struct qed_dev *cdev);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment