Commit 7ed19eb9 authored by David S. Miller's avatar David S. Miller

Merge branch 'Misc-Bug-Fixes-and-clean-ups-for-HNS3-Driver'

Salil Mehta says:

====================
Misc. Bug Fixes and clean-ups for HNS3 Driver

This patch-set mainly introduces various bug fixes, cleanups and one
very small enhancement to existing HN3 driver code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0b7d9978 6a814413
...@@ -50,13 +50,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ...@@ -50,13 +50,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */ /* now, (un-)instantiate client by calling lower layer */
if (is_reg) { if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev); ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) if (ret) {
dev_err(&ae_dev->pdev->dev, dev_err(&ae_dev->pdev->dev,
"fail to instantiate client\n"); "fail to instantiate client\n");
return ret; return ret;
}
hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1);
return 0;
}
if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) {
ae_dev->ops->uninit_client_instance(client, ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0);
} }
ae_dev->ops->uninit_client_instance(client, ae_dev);
return 0; return 0;
} }
...@@ -89,7 +98,7 @@ int hnae3_register_client(struct hnae3_client *client) ...@@ -89,7 +98,7 @@ int hnae3_register_client(struct hnae3_client *client)
exit: exit:
mutex_unlock(&hnae3_common_lock); mutex_unlock(&hnae3_common_lock);
return ret; return 0;
} }
EXPORT_SYMBOL(hnae3_register_client); EXPORT_SYMBOL(hnae3_register_client);
...@@ -112,7 +121,7 @@ EXPORT_SYMBOL(hnae3_unregister_client); ...@@ -112,7 +121,7 @@ EXPORT_SYMBOL(hnae3_unregister_client);
* @ae_algo: AE algorithm * @ae_algo: AE algorithm
* NOTE: the duplicated name will not be checked * NOTE: the duplicated name will not be checked
*/ */
int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
{ {
const struct pci_device_id *id; const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
...@@ -151,8 +160,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -151,8 +160,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
} }
mutex_unlock(&hnae3_common_lock); mutex_unlock(&hnae3_common_lock);
return ret;
} }
EXPORT_SYMBOL(hnae3_register_ae_algo); EXPORT_SYMBOL(hnae3_register_ae_algo);
...@@ -168,6 +175,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -168,6 +175,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */ /* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id) if (!id)
continue; continue;
...@@ -191,22 +201,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); ...@@ -191,22 +201,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device * @ae_dev: the AE device
* NOTE: the duplicated name will not be checked * NOTE: the duplicated name will not be checked
*/ */
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{ {
const struct pci_device_id *id; const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo; struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client; struct hnae3_client *client;
int ret = 0, lock_acquired; int ret = 0;
/* we can get deadlocked if SRIOV is being enabled in context to probe mutex_lock(&hnae3_common_lock);
* and probe gets called again in same context. This can happen when
* pci_enable_sriov() is called to create VFs from PF probes context.
* Therefore, for simplicity uniformly defering further probing in all
* cases where we detect contention.
*/
lock_acquired = mutex_trylock(&hnae3_common_lock);
if (!lock_acquired)
return -EPROBE_DEFER;
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
...@@ -220,7 +222,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -220,7 +222,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) { if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
ret = -EOPNOTSUPP;
goto out_err; goto out_err;
} }
...@@ -247,8 +248,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -247,8 +248,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
out_err: out_err:
mutex_unlock(&hnae3_common_lock); mutex_unlock(&hnae3_common_lock);
return ret;
} }
EXPORT_SYMBOL(hnae3_register_ae_dev); EXPORT_SYMBOL(hnae3_register_ae_dev);
...@@ -264,6 +263,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -264,6 +263,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */ /* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id) if (!id)
continue; continue;
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2 #define HNAE3_DEV_SUPPORT_DCB_B 0x2
#define HNAE3_CLIENT_INITED_B 0x3
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B)) BIT(HNAE3_DEV_SUPPORT_ROCE_B))
...@@ -514,11 +515,11 @@ struct hnae3_handle { ...@@ -514,11 +515,11 @@ struct hnae3_handle {
#define hnae_get_bit(origin, shift) \ #define hnae_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift)) hnae_get_field((origin), (0x1 << (shift)), (shift))
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client); void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client);
......
...@@ -1487,6 +1487,45 @@ static const struct net_device_ops hns3_nic_netdev_ops = { ...@@ -1487,6 +1487,45 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
}; };
static bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
switch (dev_id) {
case HNAE3_DEV_ID_GE:
case HNAE3_DEV_ID_25GE:
case HNAE3_DEV_ID_25GE_RDMA:
case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
case HNAE3_DEV_ID_50GE_RDMA:
case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
case HNAE3_DEV_ID_100G_RDMA_MACSEC:
return true;
case HNAE3_DEV_ID_100G_VF:
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
return false;
default:
dev_warn(&pdev->dev, "un-recognized pci device-id %d",
dev_id);
}
return false;
}
static void hns3_disable_sriov(struct pci_dev *pdev)
{
/* If our VFs are assigned we cannot shut down SR-IOV
* without causing issues, so just leave the hardware
* available but disabled
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev,
"disabling driver while VFs are assigned\n");
return;
}
pci_disable_sriov(pdev);
}
/* hns3_probe - Device initialization routine /* hns3_probe - Device initialization routine
* @pdev: PCI device information struct * @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl * @ent: entry in hns3_pci_tbl
...@@ -1514,7 +1553,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1514,7 +1553,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->dev_type = HNAE3_DEV_KNIC; ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev);
return hnae3_register_ae_dev(ae_dev); hnae3_register_ae_dev(ae_dev);
return 0;
} }
/* hns3_remove - Device removal routine /* hns3_remove - Device removal routine
...@@ -1524,14 +1565,49 @@ static void hns3_remove(struct pci_dev *pdev) ...@@ -1524,14 +1565,49 @@ static void hns3_remove(struct pci_dev *pdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
hns3_disable_sriov(pdev);
hnae3_unregister_ae_dev(ae_dev); hnae3_unregister_ae_dev(ae_dev);
} }
/**
* hns3_pci_sriov_configure
* @pdev: pointer to a pci_dev structure
* @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs. Called when the user updates the number
* of VFs in sysfs.
**/
int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
int ret;
if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
dev_warn(&pdev->dev, "Can not config SRIOV\n");
return -EINVAL;
}
if (num_vfs) {
ret = pci_enable_sriov(pdev, num_vfs);
if (ret)
dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
} else if (!pci_vfs_assigned(pdev)) {
pci_disable_sriov(pdev);
} else {
dev_warn(&pdev->dev,
"Unable to free VFs because some are assigned to VMs.\n");
}
return 0;
}
static struct pci_driver hns3_driver = { static struct pci_driver hns3_driver = {
.name = hns3_driver_name, .name = hns3_driver_name,
.id_table = hns3_pci_tbl, .id_table = hns3_pci_tbl,
.probe = hns3_probe, .probe = hns3_probe,
.remove = hns3_remove, .remove = hns3_remove,
.sriov_configure = hns3_pci_sriov_configure,
}; };
/* set default feature to hns3 */ /* set default feature to hns3 */
......
...@@ -1473,21 +1473,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1473,21 +1473,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
hdev->vport = vport; hdev->vport = vport;
hdev->num_alloc_vport = num_vport; hdev->num_alloc_vport = num_vport;
#ifdef CONFIG_PCI_IOV if (IS_ENABLED(CONFIG_PCI_IOV))
/* Enable SRIOV */ hdev->num_alloc_vfs = hdev->num_req_vfs;
if (hdev->num_req_vfs) {
dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
hdev->num_req_vfs);
ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
if (ret) {
hdev->num_alloc_vfs = 0;
dev_err(&pdev->dev, "SRIOV enable failed %d\n",
ret);
return ret;
}
}
hdev->num_alloc_vfs = hdev->num_req_vfs;
#endif
for (i = 0; i < num_vport; i++) { for (i = 0; i < num_vport; i++) {
vport->back = hdev; vport->back = hdev;
...@@ -2946,21 +2933,6 @@ static void hclge_service_task(struct work_struct *work) ...@@ -2946,21 +2933,6 @@ static void hclge_service_task(struct work_struct *work)
hclge_service_complete(hdev); hclge_service_complete(hdev);
} }
static void hclge_disable_sriov(struct hclge_dev *hdev)
{
/* If our VFs are assigned we cannot shut down SR-IOV
* without causing issues, so just leave the hardware
* available but disabled
*/
if (pci_vfs_assigned(hdev->pdev)) {
dev_warn(&hdev->pdev->dev,
"disabling driver while VFs are assigned\n");
return;
}
pci_disable_sriov(hdev->pdev);
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
{ {
/* VF handle has no client */ /* VF handle has no client */
...@@ -3784,6 +3756,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) ...@@ -3784,6 +3756,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hclge_cfg_mac_mode(hdev, true); hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state); clear_bit(HCLGE_STATE_DOWN, &hdev->state);
mod_timer(&hdev->service_timer, jiffies + HZ); mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw.mac.link = 0;
/* reset tqp stats */ /* reset tqp stats */
hclge_reset_tqp_stats(handle); hclge_reset_tqp_stats(handle);
...@@ -3820,7 +3793,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle) ...@@ -3820,7 +3793,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */ /* reset tqp stats */
hclge_reset_tqp_stats(handle); hclge_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
} }
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
...@@ -5407,7 +5379,7 @@ static int hclge_pci_init(struct hclge_dev *hdev) ...@@ -5407,7 +5379,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n"); dev_err(&pdev->dev, "failed to enable PCI device\n");
goto err_no_drvdata; return ret;
} }
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
...@@ -5445,8 +5417,6 @@ static int hclge_pci_init(struct hclge_dev *hdev) ...@@ -5445,8 +5417,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_release_regions(pdev); pci_release_regions(pdev);
err_disable_device: err_disable_device:
pci_disable_device(pdev); pci_disable_device(pdev);
err_no_drvdata:
pci_set_drvdata(pdev, NULL);
return ret; return ret;
} }
...@@ -5455,6 +5425,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) ...@@ -5455,6 +5425,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{ {
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
pcim_iounmap(pdev, hdev->hw.io_base);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
pci_clear_master(pdev); pci_clear_master(pdev);
pci_release_mem_regions(pdev); pci_release_mem_regions(pdev);
...@@ -5540,7 +5511,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5540,7 +5511,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_map_tqp(hdev); ret = hclge_map_tqp(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
goto err_sriov_disable; goto err_msi_irq_uninit;
} }
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
...@@ -5548,7 +5519,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5548,7 +5519,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"mdio config fail ret=%d\n", ret); "mdio config fail ret=%d\n", ret);
goto err_sriov_disable; goto err_msi_irq_uninit;
} }
} }
...@@ -5612,9 +5583,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5612,9 +5583,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
err_mdiobus_unreg: err_mdiobus_unreg:
if (hdev->hw.mac.phydev) if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus); mdiobus_unregister(hdev->hw.mac.mdio_bus);
err_sriov_disable:
if (IS_ENABLED(CONFIG_PCI_IOV))
hclge_disable_sriov(hdev);
err_msi_irq_uninit: err_msi_irq_uninit:
hclge_misc_irq_uninit(hdev); hclge_misc_irq_uninit(hdev);
err_msi_uninit: err_msi_uninit:
...@@ -5622,10 +5590,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5622,10 +5590,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
err_cmd_uninit: err_cmd_uninit:
hclge_destroy_cmd_queue(&hdev->hw); hclge_destroy_cmd_queue(&hdev->hw);
err_pci_uninit: err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev); pci_clear_master(pdev);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
out: out:
return ret; return ret;
} }
...@@ -5717,9 +5685,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5717,9 +5685,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state);
if (IS_ENABLED(CONFIG_PCI_IOV))
hclge_disable_sriov(hdev);
if (hdev->service_timer.function) if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer); del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func) if (hdev->service_task.func)
...@@ -6287,7 +6252,9 @@ static int hclge_init(void) ...@@ -6287,7 +6252,9 @@ static int hclge_init(void)
{ {
pr_info("%s is initializing\n", HCLGE_NAME); pr_info("%s is initializing\n", HCLGE_NAME);
return hnae3_register_ae_algo(&ae_algo); hnae3_register_ae_algo(&ae_algo);
return 0;
} }
static void hclge_exit(void) static void hclge_exit(void)
......
...@@ -500,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) ...@@ -500,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
u32 bit_map)
{ {
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_desc desc; struct hclge_desc desc;
...@@ -511,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) ...@@ -511,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
bp_to_qs_map_cmd->tc_id = tc; bp_to_qs_map_cmd->tc_id = tc;
bp_to_qs_map_cmd->qs_group_id = grp_id;
/* Qset and tc is one by one mapping */ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
...@@ -1167,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) ...@@ -1167,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map); hdev->tm_info.hw_pfc_map);
} }
/* Each Tc has a 1024 queue sets to backpress, it divides to
* 32 group, each group contains 32 queue sets, which can be
* represented by u32 bitmap.
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
struct hclge_vport *vport = hdev->vport;
u32 i, k, qs_bitmap;
int ret;
for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
qs_bitmap = 0;
for (k = 0; k < hdev->num_alloc_vport; k++) {
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
HCLGE_BP_GRP_ID_S);
sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
vport++;
}
ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
if (ret)
return ret;
}
return 0;
}
static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
{ {
bool tx_en, rx_en; bool tx_en, rx_en;
...@@ -1218,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1218,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_qs_bp_cfg(hdev, i); ret = hclge_bp_setup_hw(hdev, i);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd { ...@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para; __le32 pg_shapping_para;
}; };
#define HCLGE_BP_GRP_NUM 32
#define HCLGE_BP_SUB_GRP_ID_S 0
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
#define HCLGE_BP_GRP_ID_S 5
#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
struct hclge_bp_to_qs_map_cmd { struct hclge_bp_to_qs_map_cmd {
u8 tc_id; u8 tc_id;
u8 rsvd[2]; u8 rsvd[2];
......
...@@ -1563,7 +1563,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ...@@ -1563,7 +1563,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n"); dev_err(&pdev->dev, "failed to enable PCI device\n");
goto err_no_drvdata; return ret;
} }
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
...@@ -1595,8 +1595,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ...@@ -1595,8 +1595,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_release_regions(pdev); pci_release_regions(pdev);
err_disable_device: err_disable_device:
pci_disable_device(pdev); pci_disable_device(pdev);
err_no_drvdata:
pci_set_drvdata(pdev, NULL);
return ret; return ret;
} }
...@@ -1608,7 +1607,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) ...@@ -1608,7 +1607,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_clear_master(pdev); pci_clear_master(pdev);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
} }
static int hclgevf_init_hdev(struct hclgevf_dev *hdev) static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
...@@ -1854,7 +1852,9 @@ static int hclgevf_init(void) ...@@ -1854,7 +1852,9 @@ static int hclgevf_init(void)
{ {
pr_info("%s is initializing\n", HCLGEVF_NAME); pr_info("%s is initializing\n", HCLGEVF_NAME);
return hnae3_register_ae_algo(&ae_algovf); hnae3_register_ae_algo(&ae_algovf);
return 0;
} }
static void hclgevf_exit(void) static void hclgevf_exit(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment