Commit 52c951f1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-hns3-Add-vf-mtu-support'

Salil Mehta says:

====================
net: hns3: Add vf mtu support

This patchset adds vf mtu support to HNS3 driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d7b4a2f2 cdca4c48
...@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE { ...@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
}; };
/* below are per-VF mac-vlan subcodes */ /* below are per-VF mac-vlan subcodes */
......
...@@ -210,6 +210,10 @@ struct hnae3_ae_dev { ...@@ -210,6 +210,10 @@ struct hnae3_ae_dev {
* Enable the hardware * Enable the hardware
* stop() * stop()
* Disable the hardware * Disable the hardware
* start_client()
* Inform the hclge that client has been started
* stop_client()
* Inform the hclge that client has been stopped
* get_status() * get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok * non-ok
...@@ -319,6 +323,8 @@ struct hnae3_ae_ops { ...@@ -319,6 +323,8 @@ struct hnae3_ae_ops {
struct hnae3_ae_dev *ae_dev); struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle); int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle); void (*stop)(struct hnae3_handle *handle);
int (*client_start)(struct hnae3_handle *handle);
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle); int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle, void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex); u8 *auto_neg, u32 *speed, u8 *duplex);
......
...@@ -1573,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ...@@ -1573,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
bool if_running = netif_running(netdev);
int ret; int ret;
if (!h->ae_algo->ops->set_mtu) if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* if this was called with netdev up then bring netdevice down */
if (if_running) {
(void)hns3_nic_net_stop(netdev);
msleep(100);
}
ret = h->ae_algo->ops->set_mtu(h, new_mtu); ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret) if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n", netdev_err(netdev, "failed to change MTU in hardware %d\n",
...@@ -1592,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1592,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else else
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
return ret; return ret;
} }
...@@ -3540,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) ...@@ -3540,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
} }
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
return 0;
return handle->ae_algo->ops->client_start(handle);
}
static void hns3_client_stop(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_stop)
return;
handle->ae_algo->ops->client_stop(handle);
}
static int hns3_client_init(struct hnae3_handle *handle) static int hns3_client_init(struct hnae3_handle *handle)
{ {
struct pci_dev *pdev = handle->pdev; struct pci_dev *pdev = handle->pdev;
...@@ -3607,10 +3612,16 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3607,10 +3612,16 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail; goto out_reg_netdev_fail;
} }
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
goto out_reg_netdev_fail;
}
hns3_dcbnl_setup(handle); hns3_dcbnl_setup(handle);
/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); netdev->max_mtu = HNS3_MAX_MTU;
set_bit(HNS3_NIC_STATE_INITED, &priv->state); set_bit(HNS3_NIC_STATE_INITED, &priv->state);
...@@ -3635,6 +3646,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) ...@@ -3635,6 +3646,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret; int ret;
hns3_client_stop(handle);
hns3_remove_hw_addr(netdev); hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED) if (netdev->reg_state != NETREG_UNINITIALIZED)
......
...@@ -76,7 +76,10 @@ enum hns3_nic_state { ...@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768 #define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8 #define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8 #define HNS3_RING_BD_MULTIPLE 8
#define HNS3_MAX_MTU 9728 /* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
#define HNS3_MAX_MTU \
(HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0 #define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1 #define HNS3_BD_SIZE_1024_TYPE 1
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
...@@ -1166,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1166,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) { for (i = 0; i < num_vport; i++) {
vport->back = hdev; vport->back = hdev;
vport->vport_id = i; vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0) if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport); ret = hclge_vport_setup(vport, tqp_main_vport);
...@@ -1969,10 +1970,7 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) ...@@ -1969,10 +1970,7 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev) static int hclge_mac_init(struct hclge_dev *hdev)
{ {
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
int mtu;
int ret; int ret;
hdev->hw.mac.duplex = HCLGE_MAC_FULL; hdev->hw.mac.duplex = HCLGE_MAC_FULL;
...@@ -1986,15 +1984,16 @@ static int hclge_mac_init(struct hclge_dev *hdev) ...@@ -1986,15 +1984,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0; mac->link = 0;
if (netdev) ret = hclge_set_mac_mtu(hdev, hdev->mps);
mtu = netdev->mtu; if (ret) {
else dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
mtu = ETH_DATA_LEN; return ret;
}
ret = hclge_set_mtu(handle, mtu); ret = hclge_buffer_alloc(hdev);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"set mtu failed ret=%d\n", ret); "allocate buffer fail, ret=%d\n", ret);
return ret; return ret;
} }
...@@ -2913,6 +2912,23 @@ static void hclge_mailbox_service_task(struct work_struct *work) ...@@ -2913,6 +2912,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
} }
static void hclge_update_vport_alive(struct hclge_dev *hdev)
{
int i;
/* start from vport 1 for PF is always alive */
for (i = 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
/* If vf is not alive, set to default value */
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
}
}
static void hclge_service_task(struct work_struct *work) static void hclge_service_task(struct work_struct *work)
{ {
struct hclge_dev *hdev = struct hclge_dev *hdev =
...@@ -2925,6 +2941,7 @@ static void hclge_service_task(struct work_struct *work) ...@@ -2925,6 +2941,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev); hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_service_complete(hdev); hclge_service_complete(hdev);
} }
...@@ -5210,6 +5227,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle) ...@@ -5210,6 +5227,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
} }
int hclge_vport_start(struct hclge_vport *vport)
{
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport->last_active_jiffies = jiffies;
return 0;
}
void hclge_vport_stop(struct hclge_vport *vport)
{
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
}
static int hclge_client_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_vport_start(vport);
}
static void hclge_client_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
hclge_vport_stop(vport);
}
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code, u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op) enum hclge_mac_vlan_tbl_opcode op)
...@@ -6357,54 +6400,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) ...@@ -6357,54 +6400,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport); return hclge_set_vlan_rx_offload_cfg(vport);
} }
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{ {
struct hclge_config_max_frm_size_cmd *req; struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
int max_frm_size;
int ret;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data; req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size); req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME; req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
else
hdev->mps = max_frm_size;
return ret;
} }
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_set_vport_mtu(vport, new_mtu);
}
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int ret; int i, max_frm_size, ret = 0;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
if (vport->vport_id && max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
vport->mps = max_frm_size;
mutex_unlock(&hdev->vport_lock);
return 0;
}
ret = hclge_set_mac_mtu(hdev, new_mtu); /* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret); "Change mtu fail, ret =%d\n", ret);
return ret; goto out;
} }
hdev->mps = max_frm_size;
vport->mps = max_frm_size;
ret = hclge_buffer_alloc(hdev); ret = hclge_buffer_alloc(hdev);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret); "Allocate buffer fail, ret =%d\n", ret);
out:
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
mutex_unlock(&hdev->vport_lock);
return ret; return ret;
} }
...@@ -7021,6 +7086,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7021,6 +7086,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET; hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev; ae_dev->priv = hdev;
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret) {
...@@ -7197,6 +7265,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev) ...@@ -7197,6 +7265,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
} }
static void hclge_reset_vport_state(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
hclge_vport_start(vport);
vport++;
}
}
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{ {
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
...@@ -7282,6 +7361,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7282,6 +7361,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (hclge_enable_tm_hw_error(hdev, true)) if (hclge_enable_tm_hw_error(hdev, true))
dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME); HCLGE_DRIVER_NAME);
...@@ -7308,6 +7389,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7308,6 +7389,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw); hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev); hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev); hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL; ae_dev->priv = NULL;
} }
...@@ -7690,6 +7772,8 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -7690,6 +7772,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback, .set_loopback = hclge_set_loopback,
.start = hclge_ae_start, .start = hclge_ae_start,
.stop = hclge_ae_stop, .stop = hclge_ae_stop,
.client_start = hclge_client_start,
.client_stop = hclge_client_stop,
.get_status = hclge_get_status, .get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result, .get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h, .update_speed_duplex_h = hclge_update_speed_duplex_h,
......
...@@ -120,7 +120,7 @@ enum HLCGE_PORT_TYPE { ...@@ -120,7 +120,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_MAC_DEFAULT_FRAME \ #define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728 #define HCLGE_MAC_MAX_FRAME 9728
...@@ -678,6 +678,8 @@ struct hclge_dev { ...@@ -678,6 +678,8 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */ u32 mps; /* Max packet size */
/* vport_lock protect resource shared by vports */
struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg; struct hclge_vlan_type_cfg vlan_type_cfg;
...@@ -728,6 +730,11 @@ struct hclge_rss_tuple_cfg { ...@@ -728,6 +730,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en; u8 ipv6_fragment_en;
}; };
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAX
};
struct hclge_vport { struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */ u16 alloc_tqps; /* Allocated Tx/Rx queues */
...@@ -753,6 +760,10 @@ struct hclge_vport { ...@@ -753,6 +760,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */ struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic; struct hnae3_handle nic;
struct hnae3_handle roce; struct hnae3_handle roce;
unsigned long state;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
}; };
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
...@@ -800,4 +811,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); ...@@ -800,4 +811,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
#endif #endif
...@@ -301,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, ...@@ -301,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status; return status;
} }
static int hclge_set_vf_alive(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
bool alive = !!mbx_req->msg[2];
int ret = 0;
if (alive)
ret = hclge_vport_start(vport);
else
hclge_vport_stop(vport);
return ret;
}
static int hclge_get_vf_tcinfo(struct hclge_vport *vport, static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp) bool gen_resp)
...@@ -380,6 +395,24 @@ static void hclge_reset_vf(struct hclge_vport *vport, ...@@ -380,6 +395,24 @@ static void hclge_reset_vf(struct hclge_vport *vport,
hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
} }
static void hclge_vf_keep_alive(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
vport->last_active_jiffies = jiffies;
}
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int ret;
u32 mtu;
memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
ret = hclge_set_vport_mtu(vport, mtu);
return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw) static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{ {
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
...@@ -457,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -457,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n", "PF failed(%d) to config VF's VLAN\n",
ret); ret);
break; break;
case HCLGE_MBX_SET_ALIVE:
ret = hclge_set_vf_alive(vport, req, false);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to set VF's ALIVE\n",
ret);
break;
case HCLGE_MBX_GET_QINFO: case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true); ret = hclge_get_vf_queue_info(vport, req, true);
if (ret) if (ret)
...@@ -484,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -484,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET: case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req); hclge_reset_vf(vport, req);
break; break;
case HCLGE_MBX_KEEP_ALIVE:
hclge_vf_keep_alive(vport, req);
break;
case HCLGE_MBX_SET_MTU:
ret = hclge_set_vf_mtu(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"VF fail(%d) to set mtu\n", ret);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
...@@ -1081,6 +1081,14 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1081,6 +1081,14 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0); 2, true, NULL, 0);
} }
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
sizeof(new_mtu), true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev, static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type) enum hnae3_reset_notify_type type)
{ {
...@@ -1515,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work) ...@@ -1515,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
} }
static void hclgevf_keep_alive_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
}
static void hclgevf_keep_alive_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
u8 respmsg;
int ret;
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8));
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
}
static void hclgevf_service_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work)
{ {
struct hclgevf_dev *hdev; struct hclgevf_dev *hdev;
...@@ -1767,6 +1797,38 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) ...@@ -1767,6 +1797,38 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_update_link_status(hdev, 0); hclgevf_update_link_status(hdev, 0);
} }
static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data;
msg_data = alive ? 1 : 0;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
0, &msg_data, 1, false, NULL, 0);
}
static int hclgevf_client_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
return hclgevf_set_alive(handle, true);
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_set_alive(handle, false);
if (ret)
dev_warn(&hdev->pdev->dev,
"%s failed %d\n", __func__, ret);
del_timer_sync(&hdev->keep_alive_timer);
cancel_work_sync(&hdev->keep_alive_task);
}
static void hclgevf_state_init(struct hclgevf_dev *hdev) static void hclgevf_state_init(struct hclgevf_dev *hdev)
{ {
/* setup tasks for the MBX */ /* setup tasks for the MBX */
...@@ -2279,6 +2341,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) ...@@ -2279,6 +2341,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{ {
struct pci_dev *pdev = ae_dev->pdev; struct pci_dev *pdev = ae_dev->pdev;
struct hclgevf_dev *hdev;
int ret; int ret;
ret = hclgevf_alloc_hdev(ae_dev); ret = hclgevf_alloc_hdev(ae_dev);
...@@ -2288,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -2288,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
} }
ret = hclgevf_init_hdev(ae_dev->priv); ret = hclgevf_init_hdev(ae_dev->priv);
if (ret) if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n"); dev_err(&pdev->dev, "hclge device initialization failed\n");
return ret; return ret;
}
hdev = ae_dev->priv;
timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
return 0;
} }
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
...@@ -2413,6 +2482,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -2413,6 +2482,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.uninit_client_instance = hclgevf_uninit_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start, .start = hclgevf_ae_start,
.stop = hclgevf_ae_stop, .stop = hclgevf_ae_stop,
.client_start = hclgevf_client_start,
.client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector, .map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector, .get_vector = hclgevf_get_vector,
...@@ -2450,6 +2521,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -2450,6 +2521,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.ae_dev_resetting = hclgevf_ae_dev_resetting, .ae_dev_resetting = hclgevf_ae_dev_resetting,
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
.set_gro_en = hclgevf_gro_en, .set_gro_en = hclgevf_gro_en,
.set_mtu = hclgevf_set_mtu,
}; };
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
......
...@@ -201,7 +201,9 @@ struct hclgevf_dev { ...@@ -201,7 +201,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer; struct timer_list service_timer;
struct timer_list keep_alive_timer;
struct work_struct service_task; struct work_struct service_task;
struct work_struct keep_alive_task;
struct work_struct rst_service_task; struct work_struct rst_service_task;
struct work_struct mbx_service_task; struct work_struct mbx_service_task;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment