Commit 0fe45467 authored by Pavan Kumar Linga's avatar Pavan Kumar Linga Committed by Tony Nguyen

idpf: add create vport and netdev configuration

Add the required support to create a vport by spawning
the init task. Once the vport is created, initialize and
allocate the resources needed for it. Configure and register
a netdev for each vport with all the features supported
by the device based on the capabilities received from the
device Control Plane. Spawn the init task till all the default
vports are created.
Co-developed-by: default avatarAlan Brady <alan.brady@intel.com>
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Co-developed-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Signed-off-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Co-developed-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Signed-off-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Co-developed-by: default avatarPhani Burra <phani.r.burra@intel.com>
Signed-off-by: default avatarPhani Burra <phani.r.burra@intel.com>
Co-developed-by: default avatarShailendra Bhatnagar <shailendra.bhatnagar@intel.com>
Signed-off-by: default avatarShailendra Bhatnagar <shailendra.bhatnagar@intel.com>
Reviewed-by: default avatarSridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 4930fbf4
...@@ -11,5 +11,6 @@ idpf-y := \ ...@@ -11,5 +11,6 @@ idpf-y := \
idpf_dev.o \ idpf_dev.o \
idpf_lib.o \ idpf_lib.o \
idpf_main.o \ idpf_main.o \
idpf_txrx.o \
idpf_virtchnl.o \ idpf_virtchnl.o \
idpf_vf_dev.o idpf_vf_dev.o
This diff is collapsed.
This diff is collapsed.
...@@ -16,6 +16,7 @@ MODULE_LICENSE("GPL"); ...@@ -16,6 +16,7 @@ MODULE_LICENSE("GPL");
static void idpf_remove(struct pci_dev *pdev) static void idpf_remove(struct pci_dev *pdev)
{ {
struct idpf_adapter *adapter = pci_get_drvdata(pdev); struct idpf_adapter *adapter = pci_get_drvdata(pdev);
int i;
set_bit(IDPF_REMOVE_IN_PROG, adapter->flags); set_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
...@@ -30,11 +31,41 @@ static void idpf_remove(struct pci_dev *pdev) ...@@ -30,11 +31,41 @@ static void idpf_remove(struct pci_dev *pdev)
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter); idpf_deinit_dflt_mbx(adapter);
if (!adapter->netdevs)
goto destroy_wqs;
/* There are some cases where it's possible to still have netdevs
* registered with the stack at this point, e.g. if the driver detected
* a HW reset and rmmod is called before it fully recovers. Unregister
* any stale netdevs here.
*/
for (i = 0; i < adapter->max_vports; i++) {
if (!adapter->netdevs[i])
continue;
if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(adapter->netdevs[i]);
free_netdev(adapter->netdevs[i]);
adapter->netdevs[i] = NULL;
}
destroy_wqs:
destroy_workqueue(adapter->init_wq);
destroy_workqueue(adapter->serv_wq); destroy_workqueue(adapter->serv_wq);
destroy_workqueue(adapter->mbx_wq); destroy_workqueue(adapter->mbx_wq);
destroy_workqueue(adapter->vc_event_wq); destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}
kfree(adapter->vport_config);
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
mutex_destroy(&adapter->vport_ctrl_lock); mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock); mutex_destroy(&adapter->vector_lock);
mutex_destroy(&adapter->queue_lock);
mutex_destroy(&adapter->vc_buf_lock); mutex_destroy(&adapter->vc_buf_lock);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
...@@ -93,6 +124,9 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -93,6 +124,9 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter) if (!adapter)
return -ENOMEM; return -ENOMEM;
adapter->req_tx_splitq = true;
adapter->req_rx_splitq = true;
switch (ent->device) { switch (ent->device) {
case IDPF_DEV_ID_PF: case IDPF_DEV_ID_PF:
idpf_dev_ops_init(adapter); idpf_dev_ops_init(adapter);
...@@ -130,13 +164,22 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -130,13 +164,22 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
pci_set_drvdata(pdev, adapter); pci_set_drvdata(pdev, adapter);
adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
dev_err(dev, "Failed to allocate init workqueue\n");
err = -ENOMEM;
goto err_free;
}
adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
dev_driver_string(dev), dev_driver_string(dev),
dev_name(dev)); dev_name(dev));
if (!adapter->serv_wq) { if (!adapter->serv_wq) {
dev_err(dev, "Failed to allocate service workqueue\n"); dev_err(dev, "Failed to allocate service workqueue\n");
err = -ENOMEM; err = -ENOMEM;
goto err_free; goto err_serv_wq_alloc;
} }
adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
...@@ -169,10 +212,12 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -169,10 +212,12 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->vport_ctrl_lock); mutex_init(&adapter->vport_ctrl_lock);
mutex_init(&adapter->vector_lock); mutex_init(&adapter->vector_lock);
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock); mutex_init(&adapter->vc_buf_lock);
init_waitqueue_head(&adapter->vchnl_wq); init_waitqueue_head(&adapter->vchnl_wq);
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task); INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
...@@ -190,6 +235,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -190,6 +235,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
destroy_workqueue(adapter->mbx_wq); destroy_workqueue(adapter->mbx_wq);
err_mbx_wq_alloc: err_mbx_wq_alloc:
destroy_workqueue(adapter->serv_wq); destroy_workqueue(adapter->serv_wq);
err_serv_wq_alloc:
destroy_workqueue(adapter->init_wq);
err_free: err_free:
kfree(adapter); kfree(adapter);
return err; return err;
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
/**
* idpf_vport_init_num_qs - Initialize number of queues
* @vport: vport to initialize queues
* @vport_msg: data to be filled into vport
*/
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg)
{
struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
/* number of txqs and rxqs in config data will be zeros only in the
* driver load path and we dont update them there after
*/
if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
if (idpf_is_queue_model_split(vport->txq_model))
vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
vport->bufq_size[0] = IDPF_RX_BUF_2048;
return;
}
vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
/* Bufq[0] default buffer size is 4K
* Bufq[1] default buffer size is 2K
*/
vport->bufq_size[0] = IDPF_RX_BUF_4096;
vport->bufq_size[1] = IDPF_RX_BUF_2048;
}
/**
* idpf_vport_calc_num_q_desc - Calculate number of queue groups
* @vport: vport to calculate q groups for
*/
void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
{
struct idpf_vport_user_config_data *config_data;
int num_bufqs = vport->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx;
int i;
config_data = &vport->adapter->vport_config[idx]->user_config;
num_req_txq_desc = config_data->num_req_txq_desc;
num_req_rxq_desc = config_data->num_req_rxq_desc;
vport->complq_desc_count = 0;
if (num_req_txq_desc) {
vport->txq_desc_count = num_req_txq_desc;
if (idpf_is_queue_model_split(vport->txq_model)) {
vport->complq_desc_count = num_req_txq_desc;
if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
vport->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
if (idpf_is_queue_model_split(vport->txq_model))
vport->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
vport->rxq_desc_count = num_req_rxq_desc;
else
vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
for (i = 0; i < num_bufqs; i++) {
if (!vport->bufq_desc_count[i])
vport->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
num_bufqs);
}
}
/**
* idpf_vport_calc_total_qs - Calculate total number of queues
* @adapter: private data struct
* @vport_idx: vport idx to retrieve vport pointer
* @vport_msg: message to fill with data
* @max_q: vport max queue info
*
* Return 0 on success, error value on failure.
*/
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q)
{
int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
u32 num_qs;
vport_config = adapter->vport_config[vport_idx];
if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else {
int num_cpus;
/* Restrict num of queues to cpus online as a default
* configuration to give best performance. User can always
* override to a max number of queues via ethtool.
*/
num_cpus = num_online_cpus();
dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
}
if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
IDPF_COMPLQ_PER_GROUP);
vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
} else {
num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
dflt_singleq_txqs);
vport_msg->num_tx_q = cpu_to_le16(num_qs);
vport_msg->num_tx_complq = 0;
}
if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
IDPF_MAX_BUFQS_PER_RXQ_GRP);
vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
} else {
num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
dflt_singleq_rxqs);
vport_msg->num_rx_q = cpu_to_le16(num_qs);
vport_msg->num_rx_bufq = 0;
}
return 0;
}
/**
* idpf_vport_calc_num_q_groups - Calculate number of queue groups
* @vport: vport to calculate q groups for
*/
void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
{
if (idpf_is_queue_model_split(vport->txq_model))
vport->num_txq_grp = vport->num_txq;
else
vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_rxq_grp = vport->num_rxq;
else
vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
...@@ -4,10 +4,53 @@ ...@@ -4,10 +4,53 @@
#ifndef _IDPF_TXRX_H_ #ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_ #define _IDPF_TXRX_H_
#define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2
#define IDPF_MIN_TXQ_COMPLQ_DESC 256
#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
#define IDPF_COMPLQ_PER_GROUP 1
#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
/* Default vector sharing */ /* Default vector sharing */
#define IDPF_MBX_Q_VEC 1 #define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1 #define IDPF_MIN_Q_VEC 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
#define IDPF_DFLT_RX_Q_DESC_COUNT 512
/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
* given RX completion queue has descriptors. This includes _ALL_ buffer
* queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
* you have a total of 1024 buffers so your RX queue _must_ have at least that
* many descriptors. This macro divides a given number of RX descriptors by
* number of buffer queues to calculate how many descriptors each buffer queue
* can have without overrunning the RX queue.
*
* If you give hardware more buffers than completion descriptors what will
* happen is that if hardware gets a chance to post more than ring wrap of
* descriptors before SW gets an interrupt and overwrites SW head, the gen bit
* in the descriptor will be wrong. Any overwritten descriptors' buffers will
* be gone forever and SW has no reasonable way to tell that this has happened.
* From SW perspective, when we finally get an interrupt, it looks like we're
* still waiting for descriptor to be done, stalling forever.
*/
#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
#define IDPF_RX_BUF_2048 2048
#define IDPF_RX_BUF_4096 4096
#define IDPF_PACKET_HDR_PAD \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
/** /**
* struct idpf_intr_reg * struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register * @dyn_ctl: Dynamic control interrupt register
...@@ -35,4 +78,13 @@ struct idpf_q_vector { ...@@ -35,4 +78,13 @@ struct idpf_q_vector {
struct idpf_intr_reg intr_reg; struct idpf_intr_reg intr_reg;
char *name; char *name;
}; };
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
#endif /* !_IDPF_TXRX_H_ */ #endif /* !_IDPF_TXRX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment