Commit 70e79832 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-add-code-optimization-for-VF-reset-and-some-new-reset-feature'

Huazhong Tan says:

====================
hns3: add code optimization for VF reset and some new reset feature

Currently hardware supports below reset:
1. VF reset: triggered by sending cmd to IMP(Integrated Management
   Processor). Only reset specific VF function and do not affect
   other PF or VF.
2. PF reset: triggered by sending cmd to IMP. Only reset specific PF
   and it's VF.
3. PF FLR: triggered by PCIe subsystem. Only reset specific PF and
   it's VF.
4. VF FLR: triggered by PCIe subsystem. Only reset specific VF function
   and do not affect other PF or VF.
5. Core reset: triggered by writing to register. Reset most hardware
   unit, such as SSU, which affects all the PF and VF.
6. Global reset: triggered by writing to register. Reset all hardware
   unit, which affects all the PF and VF.
7. IMP reset: triggered by IMU(Intelligent Management Unit) when
   IMP is not longer feeding IMU's watchdog. IMU will reload the IMP
   firmware and IMP will perform global reset after firmware reloading,
   which affects all the PF and VF.

Current driver only support PF/VF reset, incomplete core and global
reset(lacking the vf reset handling). So this patchset adds complete
reset support in hns3 driver.

Also, this patchset contains some optimization related to reset.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 40c4b1e9 6ff3cf07
......@@ -85,6 +85,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
struct hclge_vf_rst_cmd {
u8 dest_vfid;
u8 vf_rst;
u8 rsv[22];
};
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
......
......@@ -124,7 +124,10 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
......@@ -132,6 +135,11 @@ enum hnae3_reset_type {
HNAE3_NONE_RESET,
};
enum hnae3_flr_state {
HNAE3_FLR_DOWN,
HNAE3_FLR_DONE,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -297,7 +305,8 @@ struct hnae3_ae_dev {
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
......
......@@ -415,9 +415,6 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
......@@ -439,6 +436,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
......@@ -1849,9 +1851,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
static void hns3_reset_prepare(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "hns3 flr prepare\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
ae_dev->ops->flr_prepare(ae_dev);
}
static void hns3_reset_done(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "hns3 flr done\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
ae_dev->ops->flr_done(ae_dev);
}
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
.reset_prepare = hns3_reset_prepare,
.reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
......@@ -2699,6 +2721,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
......@@ -2707,6 +2730,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
return 0;
}
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
......@@ -2731,9 +2759,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
napi_complete(napi);
if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
napi_complete(napi)) {
hns3_update_new_int_gl(tqp_vector);
hns3_mask_vector_irq(tqp_vector, 1);
}
return rx_pkt_total;
}
......@@ -3818,20 +3848,30 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
return ret;
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
if (ret) {
hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
}
if (ret)
goto err_uninit_vector;
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
return ret;
}
......@@ -3856,6 +3896,10 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
ret = hns3_nic_dealloc_vector_data(priv);
if (ret)
netdev_err(netdev, "dealloc vector error\n");
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
......
......@@ -593,7 +593,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
ae_dev->reset_type == HNAE3_FLR_RESET ||
ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
}
#define hns3_read_dev(a, reg) \
......
......@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* Check if there is new reset pending, because the higher level
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
return -EBUSY;
}
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
......
......@@ -2144,6 +2144,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
......@@ -2160,13 +2168,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
......@@ -2352,11 +2353,15 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
#define HCLGE_RESET_WAIT_CNT 5
#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
case HNAE3_IMP_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_IMP_RESET_BIT;
break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
......@@ -2369,6 +2374,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
case HNAE3_FLR_RESET:
break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
......@@ -2376,6 +2383,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
if (hdev->reset_type == HNAE3_FLR_RESET) {
while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
cnt++ < HCLGE_RESET_WAIT_CNT)
msleep(HCLGE_RESET_WATI_MS);
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
"flr wait timeout: %d\n", cnt);
return -EBUSY;
}
return 0;
}
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
......@@ -2392,6 +2413,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
{
struct hclge_vf_rst_cmd *req;
struct hclge_desc desc;
req = (struct hclge_vf_rst_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
req->dest_vfid = func_id;
if (reset)
req->vf_rst = 0x1;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
{
int i;
for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
int ret;
/* Send cmd to set/clear VF's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%d) rst failded %d!\n",
vport->vport_id, ret);
return ret;
}
if (!reset)
continue;
/* Inform VF to process the reset.
* hclge_inform_reset_assert_to_vf may fail if VF
* driver is not loaded.
*/
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%d) failded %d!\n",
vport->vport_id, ret);
}
return 0;
}
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
......@@ -2434,6 +2504,12 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
case HNAE3_FLR_RESET:
dev_info(&pdev->dev, "FLR requested\n");
/* schedule again to check later */
set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
......@@ -2465,6 +2541,9 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FLR_RESET, addr)) {
rst_level = HNAE3_FLR_RESET;
clear_bit(HNAE3_FLR_RESET, addr);
}
return rst_level;
......@@ -2495,12 +2574,34 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
static int hclge_reset_prepare_down(struct hclge_dev *hdev)
{
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, true);
break;
default:
break;
}
return ret;
}
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
msleep(100);
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -2515,6 +2616,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
*/
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
break;
case HNAE3_FLR_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
msleep(100);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
break;
case HNAE3_IMP_RESET:
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
break;
default:
break;
}
......@@ -2562,6 +2676,23 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
return false;
}
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
default:
break;
}
return ret;
}
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
......@@ -2579,6 +2710,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
ret = hclge_reset_prepare_down(hdev);
if (ret)
goto err_reset;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
......@@ -2614,6 +2749,10 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
......@@ -6815,6 +6954,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
#define HCLGE_FLR_WAIT_MS 100
#define HCLGE_FLR_WAIT_CNT 50
struct hclge_dev *hdev = ae_dev->priv;
int cnt = 0;
clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
hclge_reset_event(hdev->pdev, NULL);
while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
cnt++ < HCLGE_FLR_WAIT_CNT)
msleep(HCLGE_FLR_WAIT_MS);
if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
dev_err(&hdev->pdev->dev,
"flr wait down timeout: %d\n", cnt);
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
......@@ -7473,6 +7640,8 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.flr_prepare = hclge_flr_prepare,
.flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
......
......@@ -97,6 +97,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
......@@ -116,6 +117,8 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
......@@ -594,6 +597,7 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
......@@ -775,6 +779,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
{
return !!hdev->reset_pending;
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
......@@ -784,6 +794,7 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
......
......@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
if (hdev->reset_type == HNAE3_FUNC_RESET)
reset_type = HNAE3_VF_PF_FUNC_RESET;
else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET;
else
return -EINVAL;
memcpy(&msg_data[0], &reset_type, sizeof(u16));
/* send this requested info to VF */
return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
......@@ -363,24 +374,10 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
mbx_req->mbx_src_vfid);
/* Acknowledge VF that PF is now about to assert the reset for the VF.
* On receiving this message VF will get into pending state and will
* start polling for the hardware reset completion status.
*/
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret) {
dev_err(&hdev->pdev->dev,
"PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
ret, vport->vport_id);
return;
}
vport->vport_id);
dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
mbx_req->mbx_src_vfid);
/* reset this virtual function */
hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
......
......@@ -189,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
if (num > hclgevf_ring_space(&hw->cmq.csq)) {
if (num > hclgevf_ring_space(&hw->cmq.csq) ||
test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
......@@ -338,6 +339,16 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* Check if there is new reset pending, because the higher level
* reset may happen when lower level reset is being processed.
*/
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
return -EBUSY;
}
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
......
......@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
......@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
......@@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
struct hclgevf_tqp *tqp;
int i;
/* if this is on going reset then we need to re-allocate the TPQs
* since we cannot assume we would get same number of TPQs back from PF
*/
if (hclgevf_dev_ongoing_reset(hdev))
devm_kfree(&hdev->pdev->dev, hdev->htqp);
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
......@@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
/* if this is on going reset then we need to re-allocate the hnae queues
* as well since number of TPQs from PF might have changed.
*/
if (hclgevf_dev_ongoing_reset(hdev))
devm_kfree(&hdev->pdev->dev, kinfo->tqp);
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
......@@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
return 0;
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
......@@ -1095,33 +1086,74 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
return client->ops->reset_notify(handle, type);
ret = client->ops->reset_notify(handle, type);
if (ret)
dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
type, ret);
return ret;
}
static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclgevf_dev *hdev = ae_dev->priv;
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
unsigned long delay_us,
unsigned long wait_cnt)
{
unsigned long cnt = 0;
while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
cnt++ < wait_cnt)
usleep_range(delay_us, delay_us * 2);
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
"flr wait timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_MS 500
#define HCLGEVF_RESET_WAIT_CNT 20
u32 val, cnt = 0;
#define HCLGEVF_RESET_WAIT_US 20000
#define HCLGEVF_RESET_WAIT_CNT 2000
#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
u32 val;
int ret;
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
(cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
}
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
if (hdev->reset_type == HNAE3_FLR_RESET)
return hclgevf_flr_poll_timeout(hdev,
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_CNT);
ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
!(val & HCLGEVF_RST_ING_BITS),
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
if (ret) {
dev_err(&hdev->pdev->dev,
"could'nt get reset done status from h/w, timeout!\n");
return -EBUSY;
return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
......@@ -1138,10 +1170,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
return ret;
/* re-initialize the hclge device */
ret = hclgevf_init_hdev(hdev);
ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
......@@ -1149,23 +1183,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
return ret;
return 0;
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
break;
case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
break;
default:
break;
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
return ret;
}
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
goto err_reset_lock;
rtnl_unlock();
ret = hclgevf_reset_prepare_wait(hdev);
if (ret)
goto err_reset;
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
......@@ -1175,42 +1246,37 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
rtnl_unlock();
return ret;
goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
if (ret)
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
goto err_reset_lock;
}
/* bring up the nic to enable TX/RX again */
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
rtnl_unlock();
return ret;
}
static int hclgevf_do_reset(struct hclgevf_dev *hdev)
{
int status;
u8 respmsg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, false, &respmsg, sizeof(u8));
if (status)
dev_err(&hdev->pdev->dev,
"VF reset request to PF failed(=%d)\n", status);
err_reset_lock:
rtnl_unlock();
err_reset:
/* When VF reset failed, only the higher level reset asserted by PF
* can restore it, so re-initialize the command queue to receive
* this higher reset event.
*/
hclgevf_cmd_init(hdev);
dev_err(&hdev->pdev->dev, "failed to reset VF\n");
return status;
return ret;
}
static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
......@@ -1218,9 +1284,26 @@ static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
/* return the highest priority reset level amongst all */
if (test_bit(HNAE3_VF_RESET, addr)) {
rst_level = HNAE3_VF_RESET;
clear_bit(HNAE3_VF_RESET, addr);
clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
clear_bit(HNAE3_VF_FUNC_RESET, addr);
} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
rst_level = HNAE3_VF_FULL_RESET;
clear_bit(HNAE3_VF_FULL_RESET, addr);
clear_bit(HNAE3_VF_FUNC_RESET, addr);
} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
rst_level = HNAE3_VF_PF_FUNC_RESET;
clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
clear_bit(HNAE3_VF_FUNC_RESET, addr);
} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
rst_level = HNAE3_VF_FUNC_RESET;
clear_bit(HNAE3_VF_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FLR_RESET, addr)) {
rst_level = HNAE3_FLR_RESET;
clear_bit(HNAE3_FLR_RESET, addr);
}
return rst_level;
......@@ -1229,16 +1312,17 @@ static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
if (!hdev->default_reset_request)
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(hdev,
&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_RESET;
hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
......@@ -1255,6 +1339,27 @@ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
set_bit(rst_type, &hdev->default_reset_request);
}
static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
#define HCLGEVF_FLR_WAIT_MS 100
#define HCLGEVF_FLR_WAIT_CNT 50
struct hclgevf_dev *hdev = ae_dev->priv;
int cnt = 0;
clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
hclgevf_reset_event(hdev->pdev, NULL);
while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
cnt++ < HCLGEVF_FLR_WAIT_CNT)
msleep(HCLGEVF_FLR_WAIT_MS);
if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
dev_err(&hdev->pdev->dev,
"flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
......@@ -1341,9 +1446,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
while ((hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
!= HNAE3_NONE_RESET) {
ret = hclgevf_reset(hdev);
if (ret)
dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
dev_err(&hdev->pdev->dev,
"VF stack reset failed %d.\n", ret);
}
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
......@@ -1372,19 +1483,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
hdev->reset_level = HNAE3_VF_FULL_RESET;
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
/* request PF for resetting this VF via mailbox */
ret = hclgevf_do_reset(hdev);
if (ret)
dev_warn(&hdev->pdev->dev,
"VF rst fail, stack will call\n");
set_bit(hdev->reset_level, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
......@@ -1427,24 +1536,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
u32 *clearval)
{
u32 cmdq_src_reg;
u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
*clearval = cmdq_src_reg;
return HCLGEVF_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
return true;
return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
return false;
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
......@@ -1454,19 +1576,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
if (!hclgevf_check_event_cause(hdev, &clearval))
goto skip_sched;
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
hclgevf_reset_task_schedule(hdev);
break;
case HCLGEVF_VECTOR0_EVENT_MBX:
hclgevf_mbx_handler(hdev);
break;
default:
break;
}
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
hclgevf_clear_event_cause(hdev, clearval);
skip_sched:
hclgevf_enable_vector(&hdev->misc_vector, true);
}
return IRQ_HANDLED;
}
......@@ -1615,10 +1746,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
static void hclgevf_state_init(struct hclgevf_dev *hdev)
{
/* if this is on going reset then skip this initialization */
if (hclgevf_dev_ongoing_reset(hdev))
return;
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
......@@ -1660,10 +1787,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
/* if this is on going reset then skip this initialization */
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
......@@ -1702,6 +1825,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
......@@ -1713,6 +1837,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
devm_kfree(&pdev->dev, hdev->vector_status);
devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
......@@ -1720,10 +1846,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
/* if this is on going reset then skip this initialization */
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
......@@ -1853,14 +1975,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
/* check if we need to skip initialization of pci. This will happen if
* device is undergoing VF reset. Otherwise, we would need to
* re-initialize pci interface again i.e. when device is not going
* through *any* reset or actually undergoing full reset.
*/
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
......@@ -1949,17 +2063,82 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret = 0;
if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
}
if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
pci_set_master(pdev);
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev,
"failed(%d) to init MSI/MSI-X\n", ret);
return ret;
}
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
hclgevf_uninit_msi(hdev);
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
ret);
return ret;
}
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
}
return ret;
}
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
/* check if device is on-going full reset(i.e. pcie as well) */
if (hclgevf_dev_ongoing_full_reset(hdev)) {
dev_warn(&pdev->dev, "device is going full reset\n");
hclgevf_uninit_hdev(hdev);
ret = hclgevf_pci_reset(hdev);
if (ret) {
dev_err(&pdev->dev, "pci reset failed %d\n", ret);
return ret;
}
ret = hclgevf_cmd_init(hdev);
if (ret) {
dev_err(&pdev->dev, "cmd failed %d\n", ret);
return ret;
}
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize RSS\n", ret);
return ret;
}
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
return ret;
}
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
}
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
......@@ -1991,7 +2170,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
hclgevf_state_init(hdev);
hdev->reset_level = HNAE3_VF_RESET;
hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
......@@ -2000,6 +2179,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
......@@ -2047,16 +2228,21 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_cmd_uninit(hdev);
err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_cmd_uninit(hdev);
hclgevf_uninit_msi(hdev);
hclgevf_pci_uninit(hdev);
}
hclgevf_cmd_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
......@@ -2163,7 +2349,7 @@ static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
}
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
......@@ -2183,6 +2369,8 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
.flr_prepare = hclgevf_flr_prepare,
.flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
......
......@@ -31,11 +31,19 @@
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
#define HCLGEVF_FUN_RST_ING 0x20C00
#define HCLGEVF_FUN_RST_ING_B 0
#define HCLGEVF_RST_ING 0x20C00
#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
#define HCLGEVF_RST_ING_BITS \
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
......@@ -54,17 +62,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
HCLGEVF_VECTOR0_EVENT_OTHER,
};
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
......@@ -145,9 +161,12 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
unsigned long last_reset_time;
enum hnae3_reset_type reset_level;
unsigned long reset_pending;
enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
......@@ -196,18 +215,9 @@ struct hclgevf_dev {
u32 flag;
};
static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
{
return (hdev &&
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
(hdev->reset_level == HNAE3_VF_RESET));
}
static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
return (hdev &&
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
(hdev->reset_level == HNAE3_VF_FULL_RESET));
return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
......
......@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return -EIO;
udelay(HCLGEVF_SLEEP_USCOEND);
i++;
}
......@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
......@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
......@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
}
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
......@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
hdev->reset_level = HNAE3_VF_RESET;
reset_type = le16_to_cpu(msg_q[1]);
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment