Commit 9a4d7e86 authored by Sudarsana Reddy Kalluru's avatar Sudarsana Reddy Kalluru Committed by David S. Miller

qede: Add support for Tx/Rx-only queues.

Add provision for configuring the fastpath queues with Tx (or Rx) only
functionality.
Signed-off-by: default avatarSudarsana Reddy Kalluru <sudarsana.kalluru@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f8edcd12
...@@ -126,16 +126,22 @@ struct qede_dev { ...@@ -126,16 +126,22 @@ struct qede_dev {
(edev)->dev_info.num_tc) (edev)->dev_info.num_tc)
struct qede_fastpath *fp_array; struct qede_fastpath *fp_array;
u16 req_rss; u8 req_num_tx;
u16 num_rss; u8 fp_num_tx;
u8 req_num_rx;
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
u8 num_tc; u8 num_tc;
#define QEDE_RSS_CNT(edev) ((edev)->num_rss) #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
(edev)->num_tc) (edev)->num_tc)
#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) #define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) QEDE_TSS_COUNT(edev))
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \ #define QEDE_TX_QUEUE(edev, txqidx) \
(&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))]) (edev), (txqidx))])
struct qed_int_info int_info; struct qed_int_info int_info;
...@@ -284,7 +290,11 @@ struct qede_tx_queue { ...@@ -284,7 +290,11 @@ struct qede_tx_queue {
struct qede_fastpath { struct qede_fastpath {
struct qede_dev *edev; struct qede_dev *edev;
u8 rss_id; #define QEDE_FASTPATH_TX BIT(0)
#define QEDE_FASTPATH_RX BIT(1)
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type;
u8 id;
struct napi_struct napi; struct napi_struct napi;
struct qed_sb_info *sb_info; struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq; struct qede_rx_queue *rxq;
...@@ -344,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, ...@@ -344,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define QEDE_MIN_PKT_LEN 64 #define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256 #define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */ #endif /* _QEDE_H_ */
...@@ -172,7 +172,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -172,7 +172,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{ {
int i, j, k; int i, j, k;
for (i = 0, k = 0; i < edev->num_rss; i++) { for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
int tc; int tc;
for (j = 0; j < QEDE_NUM_RQSTATS; j++) for (j = 0; j < QEDE_NUM_RQSTATS; j++)
...@@ -230,15 +230,21 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -230,15 +230,21 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock); mutex_lock(&edev->qede_lock);
for (qid = 0; qid < edev->num_rss; qid++) { for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
int tc; int tc;
if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
}
if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
buf[cnt++] = QEDE_TQSTATS_DATA(edev, sidx, qid, buf[cnt++] = QEDE_TQSTATS_DATA(edev,
tc); sidx,
qid, tc);
}
} }
} }
...@@ -265,8 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) ...@@ -265,8 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only) if (qede_stats_arr[i].pf_only)
num_stats--; num_stats--;
} }
return num_stats + edev->num_rss * return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
(QEDE_NUM_RQSTATS + QEDE_NUM_TQSTATS * edev->num_tc); QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN; return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST: case ETH_SS_TEST:
...@@ -576,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev, ...@@ -576,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev,
rxc = (u16)coal->rx_coalesce_usecs; rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs;
for_each_rss(i) { for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id; sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u8)i, sb_id); (u8)i, sb_id);
...@@ -728,45 +734,70 @@ static void qede_get_channels(struct net_device *dev, ...@@ -728,45 +734,70 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev); channels->max_combined = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_RSS_CNT(edev); channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
channels->rx_count = edev->fp_num_rx;
} }
static int qede_set_channels(struct net_device *dev, static int qede_set_channels(struct net_device *dev,
struct ethtool_channels *channels) struct ethtool_channels *channels)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
u32 count;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count, channels->rx_count, channels->tx_count,
channels->other_count, channels->combined_count); channels->other_count, channels->combined_count);
/* We don't support separate rx / tx, nor `other' channels. */ count = channels->rx_count + channels->tx_count +
if (channels->rx_count || channels->tx_count || channels->combined_count;
channels->other_count || (channels->combined_count == 0) ||
(channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { /* We don't support `other' channels */
if (channels->other_count) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"command parameters not supported\n"); "command parameters not supported\n");
return -EINVAL; return -EINVAL;
} }
if (!(channels->combined_count || (channels->rx_count &&
channels->tx_count))) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"need to request at least one transmit and one receive channel\n");
return -EINVAL;
}
if (count > QEDE_MAX_RSS_CNT(edev)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"requested channels = %d max supported channels = %d\n",
count, QEDE_MAX_RSS_CNT(edev));
return -EINVAL;
}
/* Check if there was a change in the active parameters */ /* Check if there was a change in the active parameters */
if (channels->combined_count == QEDE_RSS_CNT(edev)) { if ((count == QEDE_QUEUE_CNT(edev)) &&
(channels->tx_count == edev->fp_num_tx) &&
(channels->rx_count == edev->fp_num_rx)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"No change in active parameters\n"); "No change in active parameters\n");
return 0; return 0;
} }
/* We need the number of queues to be divisible between the hwfns */ /* We need the number of queues to be divisible between the hwfns */
if (channels->combined_count % edev->dev_info.common.num_hwfns) { if ((count % edev->dev_info.common.num_hwfns) ||
(channels->tx_count % edev->dev_info.common.num_hwfns) ||
(channels->rx_count % edev->dev_info.common.num_hwfns)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Number of channels must be divisable by %04x\n", "Number of channels must be divisible by %04x\n",
edev->dev_info.common.num_hwfns); edev->dev_info.common.num_hwfns);
return -EINVAL; return -EINVAL;
} }
/* Set number of queues and reload if necessary */ /* Set number of queues and reload if necessary */
edev->req_rss = channels->combined_count; edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
if (netif_running(dev)) if (netif_running(dev))
qede_reload(edev, NULL, NULL); qede_reload(edev, NULL, NULL);
...@@ -836,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, ...@@ -836,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
info->data = edev->num_rss; info->data = QEDE_RSS_COUNT(edev);
return 0; return 0;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info); return qede_get_rss_flags(edev, info);
...@@ -1039,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev) ...@@ -1039,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev)
if (!netif_running(edev->ndev)) if (!netif_running(edev->ndev))
return; return;
for_each_rss(i) { for_each_queue(i) {
/* Update and reenable interrupts */ /* Update and reenable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
napi_enable(&edev->fp_array[i].napi); napi_enable(&edev->fp_array[i].napi);
...@@ -1051,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev) ...@@ -1051,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev)
{ {
int i; int i;
for_each_rss(i) { for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi); napi_disable(&edev->fp_array[i].napi);
/* Disable interrupts */ /* Disable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
...@@ -1061,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev) ...@@ -1061,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev)
static int qede_selftest_transmit_traffic(struct qede_dev *edev, static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping; dma_addr_t mapping;
int i, idx, val; int i, idx, val;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
txq = edev->fp_array[i].txqs;
break;
}
}
if (!txq) {
DP_NOTICE(edev, "Tx path is not available\n");
return -1;
}
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb; txq->sw_tx_ring[idx].skb = skb;
...@@ -1129,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1129,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev) static int qede_selftest_receive_traffic(struct qede_dev *edev)
{ {
struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe; struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data; struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe; union eth_rx_cqe *cqe;
u8 *data_ptr; u8 *data_ptr;
int i; int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
rxq = edev->fp_array[i].rxq;
break;
}
}
if (!rxq) {
DP_NOTICE(edev, "Rx path is not available\n");
return -1;
}
/* The packet is expected to receive on rx-queue 0 even though RSS is /* The packet is expected to receive on rx-queue 0 even though RSS is
* enabled. This is because the queue 0 is configured as the default * enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP. * queue and that the loopback traffic is not IP.
......
...@@ -519,7 +519,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -519,7 +519,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */ /* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb); txq_index = skb_get_queue_mapping(skb);
WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
txq = QEDE_TX_QUEUE(edev, txq_index); txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index);
...@@ -1203,7 +1203,7 @@ static void qede_gro_receive(struct qede_dev *edev, ...@@ -1203,7 +1203,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#endif #endif
send_skb: send_skb:
skb_record_rx_queue(skb, fp->rss_id); skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, vlan_tag); qede_skb_receive(edev, fp, skb, vlan_tag);
} }
...@@ -1407,7 +1407,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1407,7 +1407,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
edev->ops->eth_cqe_completion( edev->ops->eth_cqe_completion(
edev->cdev, fp->rss_id, edev->cdev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe); (struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe; goto next_cqe;
} }
...@@ -1578,7 +1578,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1578,7 +1578,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
qede_set_skb_csum(skb, csum_flag); qede_set_skb_csum(skb, csum_flag);
skb_record_rx_queue(skb, fp->rss_id); skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
next_rx_only: next_rx_only:
...@@ -1611,10 +1611,12 @@ static int qede_poll(struct napi_struct *napi, int budget) ...@@ -1611,10 +1611,12 @@ static int qede_poll(struct napi_struct *napi, int budget)
u8 tc; u8 tc;
for (tc = 0; tc < edev->num_tc; tc++) for (tc = 0; tc < edev->num_tc; tc++)
if (qede_txq_has_work(&fp->txqs[tc])) if (likely(fp->type & QEDE_FASTPATH_TX) &&
qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]); qede_tx_int(edev, &fp->txqs[tc]);
rx_work_done = qede_has_rx_work(fp->rxq) ? rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0; qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) { if (rx_work_done < budget) {
qed_sb_update_sb_idx(fp->sb_info); qed_sb_update_sb_idx(fp->sb_info);
...@@ -1634,8 +1636,10 @@ static int qede_poll(struct napi_struct *napi, int budget) ...@@ -1634,8 +1636,10 @@ static int qede_poll(struct napi_struct *napi, int budget)
rmb(); rmb();
/* Fall out from the NAPI loop if needed */ /* Fall out from the NAPI loop if needed */
if (!(qede_has_rx_work(fp->rxq) || if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_tx_work(fp))) { qede_has_rx_work(fp->rxq)) ||
(likely(fp->type & QEDE_FASTPATH_TX) &&
qede_has_tx_work(fp)))) {
napi_complete(napi); napi_complete(napi);
/* Update and reenable interrupts */ /* Update and reenable interrupts */
...@@ -2349,7 +2353,7 @@ static void qede_free_fp_array(struct qede_dev *edev) ...@@ -2349,7 +2353,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
struct qede_fastpath *fp; struct qede_fastpath *fp;
int i; int i;
for_each_rss(i) { for_each_queue(i) {
fp = &edev->fp_array[i]; fp = &edev->fp_array[i];
kfree(fp->sb_info); kfree(fp->sb_info);
...@@ -2358,22 +2362,33 @@ static void qede_free_fp_array(struct qede_dev *edev) ...@@ -2358,22 +2362,33 @@ static void qede_free_fp_array(struct qede_dev *edev)
} }
kfree(edev->fp_array); kfree(edev->fp_array);
} }
edev->num_rss = 0;
edev->num_queues = 0;
edev->fp_num_tx = 0;
edev->fp_num_rx = 0;
} }
static int qede_alloc_fp_array(struct qede_dev *edev) static int qede_alloc_fp_array(struct qede_dev *edev)
{ {
u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp; struct qede_fastpath *fp;
int i; int i;
edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
sizeof(*edev->fp_array), GFP_KERNEL); sizeof(*edev->fp_array), GFP_KERNEL);
if (!edev->fp_array) { if (!edev->fp_array) {
DP_NOTICE(edev, "fp array allocation failed\n"); DP_NOTICE(edev, "fp array allocation failed\n");
goto err; goto err;
} }
for_each_rss(i) { fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
/* Allocate the FP elements for Rx queues followed by combined and then
* the Tx. This ordering should be maintained so that the respective
* queues (Rx or Tx) will be together in the fastpath array and the
* associated ids will be sequential.
*/
for_each_queue(i) {
fp = &edev->fp_array[i]; fp = &edev->fp_array[i];
fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
...@@ -2382,19 +2397,36 @@ static int qede_alloc_fp_array(struct qede_dev *edev) ...@@ -2382,19 +2397,36 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err; goto err;
} }
fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); if (fp_rx) {
if (!fp->rxq) { fp->type = QEDE_FASTPATH_RX;
DP_NOTICE(edev, "RXQ struct allocation failed\n"); fp_rx--;
goto err; } else if (fp_combined) {
fp->type = QEDE_FASTPATH_COMBINED;
fp_combined--;
} else {
fp->type = QEDE_FASTPATH_TX;
} }
fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); if (fp->type & QEDE_FASTPATH_TX) {
fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
GFP_KERNEL);
if (!fp->txqs) { if (!fp->txqs) {
DP_NOTICE(edev, "TXQ array allocation failed\n"); DP_NOTICE(edev,
"TXQ array allocation failed\n");
goto err; goto err;
} }
} }
if (fp->type & QEDE_FASTPATH_RX) {
fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
if (!fp->rxq) {
DP_NOTICE(edev,
"RXQ struct allocation failed\n");
goto err;
}
}
}
return 0; return 0;
err: err:
qede_free_fp_array(edev); qede_free_fp_array(edev);
...@@ -2605,8 +2637,8 @@ static int qede_set_num_queues(struct qede_dev *edev) ...@@ -2605,8 +2637,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
u16 rss_num; u16 rss_num;
/* Setup queues according to possible resources*/ /* Setup queues according to possible resources*/
if (edev->req_rss) if (edev->req_queues)
rss_num = edev->req_rss; rss_num = edev->req_queues;
else else
rss_num = netif_get_num_default_rss_queues() * rss_num = netif_get_num_default_rss_queues() *
edev->dev_info.common.num_hwfns; edev->dev_info.common.num_hwfns;
...@@ -2616,11 +2648,15 @@ static int qede_set_num_queues(struct qede_dev *edev) ...@@ -2616,11 +2648,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
if (rc > 0) { if (rc > 0) {
/* Managed to request interrupts for our queues */ /* Managed to request interrupts for our queues */
edev->num_rss = rc; edev->num_queues = rc;
DP_INFO(edev, "Managed %d [of %d] RSS queues\n", DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
QEDE_RSS_CNT(edev), rss_num); QEDE_QUEUE_CNT(edev), rss_num);
rc = 0; rc = 0;
} }
edev->fp_num_tx = edev->req_num_tx;
edev->fp_num_rx = edev->req_num_rx;
return rc; return rc;
} }
...@@ -2912,32 +2948,38 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) ...@@ -2912,32 +2948,38 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
qede_free_mem_sb(edev, fp->sb_info); qede_free_mem_sb(edev, fp->sb_info);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq); qede_free_mem_rxq(edev, fp->rxq);
if (fp->type & QEDE_FASTPATH_TX)
for (tc = 0; tc < edev->num_tc; tc++) for (tc = 0; tc < edev->num_tc; tc++)
qede_free_mem_txq(edev, &fp->txqs[tc]); qede_free_mem_txq(edev, &fp->txqs[tc]);
} }
/* This function allocates all memory needed for a single fp (i.e. an entity /* This function allocates all memory needed for a single fp (i.e. an entity
* which contains status block, one rx queue and multiple per-TC tx queues. * which contains status block, one rx queue and/or multiple per-TC tx queues.
*/ */
static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{ {
int rc, tc; int rc, tc;
rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc) if (rc)
goto err; goto err;
if (fp->type & QEDE_FASTPATH_RX) {
rc = qede_alloc_mem_rxq(edev, fp->rxq); rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc) if (rc)
goto err; goto err;
}
if (fp->type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
if (rc) if (rc)
goto err; goto err;
} }
}
return 0; return 0;
err: err:
...@@ -2948,7 +2990,7 @@ static void qede_free_mem_load(struct qede_dev *edev) ...@@ -2948,7 +2990,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
{ {
int i; int i;
for_each_rss(i) { for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i]; struct qede_fastpath *fp = &edev->fp_array[i];
qede_free_mem_fp(edev, fp); qede_free_mem_fp(edev, fp);
...@@ -2958,16 +3000,16 @@ static void qede_free_mem_load(struct qede_dev *edev) ...@@ -2958,16 +3000,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
/* This function allocates all qede memory at NIC load. */ /* This function allocates all qede memory at NIC load. */
static int qede_alloc_mem_load(struct qede_dev *edev) static int qede_alloc_mem_load(struct qede_dev *edev)
{ {
int rc = 0, rss_id; int rc = 0, queue_id;
for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
struct qede_fastpath *fp = &edev->fp_array[rss_id]; struct qede_fastpath *fp = &edev->fp_array[queue_id];
rc = qede_alloc_mem_fp(edev, fp); rc = qede_alloc_mem_fp(edev, fp);
if (rc) { if (rc) {
DP_ERR(edev, DP_ERR(edev,
"Failed to allocate memory for fastpath - rss id = %d\n", "Failed to allocate memory for fastpath - rss id = %d\n",
rss_id); queue_id);
qede_free_mem_load(edev); qede_free_mem_load(edev);
return rc; return rc;
} }
...@@ -2979,32 +3021,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev) ...@@ -2979,32 +3021,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */ /* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev) static void qede_init_fp(struct qede_dev *edev)
{ {
int rss_id, txq_index, tc; int queue_id, rxq_index = 0, txq_index = 0, tc;
struct qede_fastpath *fp; struct qede_fastpath *fp;
for_each_rss(rss_id) { for_each_queue(queue_id) {
fp = &edev->fp_array[rss_id]; fp = &edev->fp_array[queue_id];
fp->edev = edev; fp->edev = edev;
fp->rss_id = rss_id; fp->id = queue_id;
memset((void *)&fp->napi, 0, sizeof(fp->napi)); memset((void *)&fp->napi, 0, sizeof(fp->napi));
memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
if (fp->type & QEDE_FASTPATH_RX) {
memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
fp->rxq->rxq_id = rss_id; fp->rxq->rxq_id = rxq_index++;
}
memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); if (fp->type & QEDE_FASTPATH_TX) {
memset((void *)fp->txqs, 0,
(edev->num_tc * sizeof(*fp->txqs)));
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; fp->txqs[tc].index = txq_index +
fp->txqs[tc].index = txq_index; tc * QEDE_TSS_COUNT(edev);
if (edev->dev_info.is_legacy) if (edev->dev_info.is_legacy)
fp->txqs[tc].is_legacy = true; fp->txqs[tc].is_legacy = true;
} }
txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, rss_id); edev->ndev->name, queue_id);
} }
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
...@@ -3014,12 +3062,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev) ...@@ -3014,12 +3062,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{ {
int rc = 0; int rc = 0;
rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
if (rc) { if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc; return rc;
} }
rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
if (rc) { if (rc) {
DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
return rc; return rc;
...@@ -3032,7 +3081,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev) ...@@ -3032,7 +3081,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
{ {
int i; int i;
for_each_rss(i) { for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi); napi_disable(&edev->fp_array[i].napi);
netif_napi_del(&edev->fp_array[i].napi); netif_napi_del(&edev->fp_array[i].napi);
...@@ -3044,7 +3093,7 @@ static void qede_napi_add_enable(struct qede_dev *edev) ...@@ -3044,7 +3093,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
int i; int i;
/* Add NAPI objects */ /* Add NAPI objects */
for_each_rss(i) { for_each_queue(i) {
netif_napi_add(edev->ndev, &edev->fp_array[i].napi, netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
qede_poll, NAPI_POLL_WEIGHT); qede_poll, NAPI_POLL_WEIGHT);
napi_enable(&edev->fp_array[i].napi); napi_enable(&edev->fp_array[i].napi);
...@@ -3073,14 +3122,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev) ...@@ -3073,14 +3122,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
int i, rc; int i, rc;
/* Sanitize number of interrupts == number of prepared RSS queues */ /* Sanitize number of interrupts == number of prepared RSS queues */
if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
DP_ERR(edev, DP_ERR(edev,
"Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < QEDE_RSS_CNT(edev); i++) { for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
rc = request_irq(edev->int_info.msix[i].vector, rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name, qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]); &edev->fp_array[i]);
...@@ -3125,11 +3174,11 @@ static int qede_setup_irqs(struct qede_dev *edev) ...@@ -3125,11 +3174,11 @@ static int qede_setup_irqs(struct qede_dev *edev)
/* qed should learn receive the RSS ids and callbacks */ /* qed should learn receive the RSS ids and callbacks */
ops = edev->ops->common; ops = edev->ops->common;
for (i = 0; i < QEDE_RSS_CNT(edev); i++) for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
ops->simd_handler_config(edev->cdev, ops->simd_handler_config(edev->cdev,
&edev->fp_array[i], i, &edev->fp_array[i], i,
qede_simd_fp_handler); qede_simd_fp_handler);
edev->int_info.used_cnt = QEDE_RSS_CNT(edev); edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
} }
return 0; return 0;
} }
...@@ -3187,9 +3236,10 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -3187,9 +3236,10 @@ static int qede_stop_queues(struct qede_dev *edev)
} }
/* Flush Tx queues. If needed, request drain from MCP */ /* Flush Tx queues. If needed, request drain from MCP */
for_each_rss(i) { for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i]; struct qede_fastpath *fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
struct qede_tx_queue *txq = &fp->txqs[tc]; struct qede_tx_queue *txq = &fp->txqs[tc];
...@@ -3198,17 +3248,21 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -3198,17 +3248,21 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc; return rc;
} }
} }
}
/* Stop all Queues in reverse order*/ /* Stop all Queues in reverse order */
for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params; struct qed_stop_rxq_params rx_params;
/* Stop the Tx Queue(s)*/ /* Stop the Tx Queue(s) */
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
struct qed_stop_txq_params tx_params; struct qed_stop_txq_params tx_params;
u8 val;
tx_params.rss_id = i; tx_params.rss_id = i;
tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; val = edev->fp_array[i].txqs[tc].index;
tx_params.tx_queue_id = val;
rc = edev->ops->q_tx_stop(cdev, &tx_params); rc = edev->ops->q_tx_stop(cdev, &tx_params);
if (rc) { if (rc) {
DP_ERR(edev, "Failed to stop TXQ #%d\n", DP_ERR(edev, "Failed to stop TXQ #%d\n",
...@@ -3216,11 +3270,13 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -3216,11 +3270,13 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc; return rc;
} }
} }
}
/* Stop the Rx Queue*/ /* Stop the Rx Queue */
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
memset(&rx_params, 0, sizeof(rx_params)); memset(&rx_params, 0, sizeof(rx_params));
rx_params.rss_id = i; rx_params.rss_id = i;
rx_params.rx_queue_id = i; rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
rc = edev->ops->q_rx_stop(cdev, &rx_params); rc = edev->ops->q_rx_stop(cdev, &rx_params);
if (rc) { if (rc) {
...@@ -3228,6 +3284,7 @@ static int qede_stop_queues(struct qede_dev *edev) ...@@ -3228,6 +3284,7 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc; return rc;
} }
} }
}
/* Stop the vport */ /* Stop the vport */
rc = edev->ops->vport_stop(cdev, 0); rc = edev->ops->vport_stop(cdev, 0);
...@@ -3248,7 +3305,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -3248,7 +3305,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_start_vport_params start = {0}; struct qed_start_vport_params start = {0};
bool reset_rss_indir = false; bool reset_rss_indir = false;
if (!edev->num_rss) { if (!edev->num_queues) {
DP_ERR(edev, DP_ERR(edev,
"Cannot update V-VPORT as active as there are no Rx queues\n"); "Cannot update V-VPORT as active as there are no Rx queues\n");
return -EINVAL; return -EINVAL;
...@@ -3272,50 +3329,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -3272,50 +3329,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
for_each_rss(i) { for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i]; struct qede_fastpath *fp = &edev->fp_array[i];
dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; dma_addr_t p_phys_table;
u32 page_cnt;
if (fp->type & QEDE_FASTPATH_RX) {
struct qede_rx_queue *rxq = fp->rxq;
__le16 *val;
memset(&q_params, 0, sizeof(q_params)); memset(&q_params, 0, sizeof(q_params));
q_params.rss_id = i; q_params.rss_id = i;
q_params.queue_id = i; q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0; q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id; q_params.sb = fp->sb_info->igu_sb_id;
q_params.sb_idx = RX_PI; q_params.sb_idx = RX_PI;
p_phys_table =
qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
rc = edev->ops->q_rx_start(cdev, &q_params, rc = edev->ops->q_rx_start(cdev, &q_params,
fp->rxq->rx_buf_size, rxq->rx_buf_size,
fp->rxq->rx_bd_ring.p_phys_addr, rxq->rx_bd_ring.p_phys_addr,
phys_table, p_phys_table,
fp->rxq->rx_comp_ring.page_cnt, page_cnt,
&fp->rxq->hw_rxq_prod_addr); &rxq->hw_rxq_prod_addr);
if (rc) { if (rc) {
DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
rc);
return rc; return rc;
} }
fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; val = &fp->sb_info->sb_virt->pi_array[RX_PI];
rxq->hw_cons_ptr = val;
qede_update_rx_prod(edev, rxq);
}
qede_update_rx_prod(edev, fp->rxq); if (!(fp->type & QEDE_FASTPATH_TX))
continue;
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
struct qede_tx_queue *txq = &fp->txqs[tc]; struct qede_tx_queue *txq = &fp->txqs[tc];
int txq_index = tc * QEDE_RSS_CNT(edev) + i;
p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params)); memset(&q_params, 0, sizeof(q_params));
q_params.rss_id = i; q_params.rss_id = i;
q_params.queue_id = txq_index; q_params.queue_id = txq->index;
q_params.vport_id = 0; q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id; q_params.sb = fp->sb_info->igu_sb_id;
q_params.sb_idx = TX_PI(tc); q_params.sb_idx = TX_PI(tc);
rc = edev->ops->q_tx_start(cdev, &q_params, rc = edev->ops->q_tx_start(cdev, &q_params,
txq->tx_pbl.pbl.p_phys_table, p_phys_table, page_cnt,
txq->tx_pbl.page_cnt,
&txq->doorbell_addr); &txq->doorbell_addr);
if (rc) { if (rc) {
DP_ERR(edev, "Start TXQ #%d failed %d\n", DP_ERR(edev, "Start TXQ #%d failed %d\n",
txq_index, rc); txq->index, rc);
return rc; return rc;
} }
...@@ -3346,13 +3419,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -3346,13 +3419,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
} }
/* Fill struct with RSS params */ /* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) { if (QEDE_RSS_COUNT(edev) > 1) {
vport_update_params.update_rss_flg = 1; vport_update_params.update_rss_flg = 1;
/* Need to validate current RSS config uses valid entries */ /* Need to validate current RSS config uses valid entries */
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
if (edev->rss_params.rss_ind_table[i] >= if (edev->rss_params.rss_ind_table[i] >=
edev->num_rss) { QEDE_RSS_COUNT(edev)) {
reset_rss_indir = true; reset_rss_indir = true;
break; break;
} }
...@@ -3365,7 +3438,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -3365,7 +3438,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
u16 indir_val; u16 indir_val;
val = QEDE_RSS_CNT(edev); val = QEDE_RSS_COUNT(edev);
indir_val = ethtool_rxfh_indir_default(i, val); indir_val = ethtool_rxfh_indir_default(i, val);
edev->rss_params.rss_ind_table[i] = indir_val; edev->rss_params.rss_ind_table[i] = indir_val;
} }
...@@ -3494,7 +3567,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) ...@@ -3494,7 +3567,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
if (rc) if (rc)
goto err1; goto err1;
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
QEDE_RSS_CNT(edev), edev->num_tc); QEDE_QUEUE_CNT(edev), edev->num_tc);
rc = qede_set_real_num_queues(edev); rc = qede_set_real_num_queues(edev);
if (rc) if (rc)
...@@ -3547,7 +3620,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) ...@@ -3547,7 +3620,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
err1: err1:
edev->ops->common->set_fp_int(edev->cdev, 0); edev->ops->common->set_fp_int(edev->cdev, 0);
qede_free_fp_array(edev); qede_free_fp_array(edev);
edev->num_rss = 0; edev->num_queues = 0;
edev->fp_num_tx = 0;
edev->fp_num_rx = 0;
err0: err0:
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment