Commit 9a4d7e86 authored by Sudarsana Reddy Kalluru's avatar Sudarsana Reddy Kalluru Committed by David S. Miller

qede: Add support for Tx/Rx-only queues.

Add provision for configuring the fastpath queues with Tx (or Rx) only
functionality.
Signed-off-by: default avatarSudarsana Reddy Kalluru <sudarsana.kalluru@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f8edcd12
...@@ -126,16 +126,22 @@ struct qede_dev { ...@@ -126,16 +126,22 @@ struct qede_dev {
(edev)->dev_info.num_tc) (edev)->dev_info.num_tc)
struct qede_fastpath *fp_array; struct qede_fastpath *fp_array;
u16 req_rss; u8 req_num_tx;
u16 num_rss; u8 fp_num_tx;
u8 req_num_rx;
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
u8 num_tc; u8 num_tc;
#define QEDE_RSS_CNT(edev) ((edev)->num_rss) #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
(edev)->num_tc) #define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) (edev)->num_tc)
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) #define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
QEDE_TSS_COUNT(edev))
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \ #define QEDE_TX_QUEUE(edev, txqidx) \
(&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))]) (edev), (txqidx))])
struct qed_int_info int_info; struct qed_int_info int_info;
...@@ -284,7 +290,11 @@ struct qede_tx_queue { ...@@ -284,7 +290,11 @@ struct qede_tx_queue {
struct qede_fastpath { struct qede_fastpath {
struct qede_dev *edev; struct qede_dev *edev;
u8 rss_id; #define QEDE_FASTPATH_TX BIT(0)
#define QEDE_FASTPATH_RX BIT(1)
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type;
u8 id;
struct napi_struct napi; struct napi_struct napi;
struct qed_sb_info *sb_info; struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq; struct qede_rx_queue *rxq;
...@@ -344,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, ...@@ -344,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define QEDE_MIN_PKT_LEN 64 #define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256 #define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */ #endif /* _QEDE_H_ */
...@@ -172,7 +172,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -172,7 +172,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{ {
int i, j, k; int i, j, k;
for (i = 0, k = 0; i < edev->num_rss; i++) { for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
int tc; int tc;
for (j = 0; j < QEDE_NUM_RQSTATS; j++) for (j = 0; j < QEDE_NUM_RQSTATS; j++)
...@@ -230,15 +230,21 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -230,15 +230,21 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock); mutex_lock(&edev->qede_lock);
for (qid = 0; qid < edev->num_rss; qid++) { for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
int tc; int tc;
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
for (tc = 0; tc < edev->num_tc; tc++) { buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) }
buf[cnt++] = QEDE_TQSTATS_DATA(edev, sidx, qid,
tc); if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < edev->num_tc; tc++) {
for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
buf[cnt++] = QEDE_TQSTATS_DATA(edev,
sidx,
qid, tc);
}
} }
} }
...@@ -265,8 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) ...@@ -265,8 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only) if (qede_stats_arr[i].pf_only)
num_stats--; num_stats--;
} }
return num_stats + edev->num_rss * return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
(QEDE_NUM_RQSTATS + QEDE_NUM_TQSTATS * edev->num_tc); QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN; return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST: case ETH_SS_TEST:
...@@ -576,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev, ...@@ -576,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev,
rxc = (u16)coal->rx_coalesce_usecs; rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs;
for_each_rss(i) { for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id; sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u8)i, sb_id); (u8)i, sb_id);
...@@ -728,45 +734,70 @@ static void qede_get_channels(struct net_device *dev, ...@@ -728,45 +734,70 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev); channels->max_combined = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_RSS_CNT(edev); channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
channels->rx_count = edev->fp_num_rx;
} }
static int qede_set_channels(struct net_device *dev, static int qede_set_channels(struct net_device *dev,
struct ethtool_channels *channels) struct ethtool_channels *channels)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
u32 count;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count, channels->rx_count, channels->tx_count,
channels->other_count, channels->combined_count); channels->other_count, channels->combined_count);
/* We don't support separate rx / tx, nor `other' channels. */ count = channels->rx_count + channels->tx_count +
if (channels->rx_count || channels->tx_count || channels->combined_count;
channels->other_count || (channels->combined_count == 0) ||
(channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { /* We don't support `other' channels */
if (channels->other_count) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"command parameters not supported\n"); "command parameters not supported\n");
return -EINVAL; return -EINVAL;
} }
if (!(channels->combined_count || (channels->rx_count &&
channels->tx_count))) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"need to request at least one transmit and one receive channel\n");
return -EINVAL;
}
if (count > QEDE_MAX_RSS_CNT(edev)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"requested channels = %d max supported channels = %d\n",
count, QEDE_MAX_RSS_CNT(edev));
return -EINVAL;
}
/* Check if there was a change in the active parameters */ /* Check if there was a change in the active parameters */
if (channels->combined_count == QEDE_RSS_CNT(edev)) { if ((count == QEDE_QUEUE_CNT(edev)) &&
(channels->tx_count == edev->fp_num_tx) &&
(channels->rx_count == edev->fp_num_rx)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"No change in active parameters\n"); "No change in active parameters\n");
return 0; return 0;
} }
/* We need the number of queues to be divisible between the hwfns */ /* We need the number of queues to be divisible between the hwfns */
if (channels->combined_count % edev->dev_info.common.num_hwfns) { if ((count % edev->dev_info.common.num_hwfns) ||
(channels->tx_count % edev->dev_info.common.num_hwfns) ||
(channels->rx_count % edev->dev_info.common.num_hwfns)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Number of channels must be divisable by %04x\n", "Number of channels must be divisible by %04x\n",
edev->dev_info.common.num_hwfns); edev->dev_info.common.num_hwfns);
return -EINVAL; return -EINVAL;
} }
/* Set number of queues and reload if necessary */ /* Set number of queues and reload if necessary */
edev->req_rss = channels->combined_count; edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
if (netif_running(dev)) if (netif_running(dev))
qede_reload(edev, NULL, NULL); qede_reload(edev, NULL, NULL);
...@@ -836,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, ...@@ -836,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
info->data = edev->num_rss; info->data = QEDE_RSS_COUNT(edev);
return 0; return 0;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info); return qede_get_rss_flags(edev, info);
...@@ -1039,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev) ...@@ -1039,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev)
if (!netif_running(edev->ndev)) if (!netif_running(edev->ndev))
return; return;
for_each_rss(i) { for_each_queue(i) {
/* Update and reenable interrupts */ /* Update and reenable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
napi_enable(&edev->fp_array[i].napi); napi_enable(&edev->fp_array[i].napi);
...@@ -1051,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev) ...@@ -1051,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev)
{ {
int i; int i;
for_each_rss(i) { for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi); napi_disable(&edev->fp_array[i].napi);
/* Disable interrupts */ /* Disable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
...@@ -1061,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev) ...@@ -1061,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev)
static int qede_selftest_transmit_traffic(struct qede_dev *edev, static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping; dma_addr_t mapping;
int i, idx, val; int i, idx, val;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
txq = edev->fp_array[i].txqs;
break;
}
}
if (!txq) {
DP_NOTICE(edev, "Tx path is not available\n");
return -1;
}
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb; txq->sw_tx_ring[idx].skb = skb;
...@@ -1129,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1129,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev) static int qede_selftest_receive_traffic(struct qede_dev *edev)
{ {
struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe; struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data; struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe; union eth_rx_cqe *cqe;
u8 *data_ptr; u8 *data_ptr;
int i; int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
rxq = edev->fp_array[i].rxq;
break;
}
}
if (!rxq) {
DP_NOTICE(edev, "Rx path is not available\n");
return -1;
}
/* The packet is expected to receive on rx-queue 0 even though RSS is /* The packet is expected to receive on rx-queue 0 even though RSS is
* enabled. This is because the queue 0 is configured as the default * enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP. * queue and that the loopback traffic is not IP.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment