Commit fccf111e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-hns3-updates-for-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

There are several updates relating to the interrupt coalesce for
the HNS3 ethernet driver.

   based on the frame quantity).
   a fixed value in code.
   based on the gap time).
   its new usage.

change log:
V4 - remove #5~#10 from this series, which needs more discussion.
V3 - fix a typo error in #1 reported by Jakub Kicinski.
     rewrite #9 commit log.
     remove #11 from this series.
V2 - reorder #2 & #3 to fix compiler error.
     fix some checkpatch warnings in #10 & #11.

previous version:
V3: https://patchwork.ozlabs.org/project/netdev/cover/1605151998-12633-1-git-send-email-tanhuazhong@huawei.com/
V2: https://patchwork.ozlabs.org/project/netdev/cover/1604892159-19990-1-git-send-email-tanhuazhong@huawei.com/
V1: https://patchwork.ozlabs.org/project/netdev/cover/1604730681-32559-1-git-send-email-tanhuazhong@huawei.com/
====================

Link: https://lore.kernel.org/r/1605514854-11205-1-git-send-email-tanhuazhong@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0676a4ea de25bcc4
...@@ -278,6 +278,7 @@ struct hnae3_dev_specs { ...@@ -278,6 +278,7 @@ struct hnae3_dev_specs {
u16 rss_ind_tbl_size; u16 rss_ind_tbl_size;
u16 rss_key_size; u16 rss_key_size;
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */ u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
}; };
......
...@@ -349,6 +349,7 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h) ...@@ -349,6 +349,7 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max); dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
} }
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
......
...@@ -211,8 +211,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -211,8 +211,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/ */
if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
!tqp_vector->rx_group.coal.gl_adapt_enable) !tqp_vector->rx_group.coal.adapt_enable)
/* According to the hardware, the range of rl_reg is /* According to the hardware, the range of rl_reg is
* 0-59 and the unit is 4. * 0-59 and the unit is 4.
*/ */
...@@ -224,48 +224,99 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -224,48 +224,99 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value) u32 gl_value)
{ {
u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); u32 new_val;
writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); if (tqp_vector->rx_group.coal.unit_1us)
new_val = gl_value | HNS3_INT_GL_1US;
else
new_val = hns3_gl_usec_to_reg(gl_value);
writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
} }
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value) u32 gl_value)
{ {
u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); u32 new_val;
if (tqp_vector->tx_group.coal.unit_1us)
new_val = gl_value | HNS3_INT_GL_1US;
else
new_val = hns3_gl_usec_to_reg(gl_value);
writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value)
{
writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
}
writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value)
{
writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
} }
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv) struct hns3_nic_priv *priv)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
/* initialize the configuration for interrupt coalescing. /* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter) * 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter) * 2. RL (Interrupt Rate Limiter)
* 3. QL (Interrupt Quantity Limiter)
* *
* Default: enable interrupt coalescing self-adaptive and GL * Default: enable interrupt coalescing self-adaptive and GL
*/ */
tqp_vector->tx_group.coal.gl_adapt_enable = 1; tx_coal->adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1; rx_coal->adapt_enable = 1;
tx_coal->int_gl = HNS3_INT_GL_50K;
rx_coal->int_gl = HNS3_INT_GL_50K;
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; rx_coal->flow_level = HNS3_FLOW_LOW;
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; tx_coal->flow_level = HNS3_FLOW_LOW;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
*/
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
tx_coal->unit_1us = 1;
rx_coal->unit_1us = 1;
}
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; if (ae_dev->dev_specs.int_ql_max) {
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; tx_coal->ql_enable = 1;
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
}
} }
static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, static void
struct hns3_nic_priv *priv) hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{ {
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
hns3_set_vector_coalesce_tx_gl(tqp_vector, hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
tqp_vector->tx_group.coal.int_gl); hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
hns3_set_vector_coalesce_rx_gl(tqp_vector,
tqp_vector->rx_group.coal.int_gl);
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
if (tx_coal->ql_enable)
hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
if (rx_coal->ql_enable)
hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
} }
static int hns3_nic_set_real_num_queue(struct net_device *netdev) static int hns3_nic_set_real_num_queue(struct net_device *netdev)
...@@ -3333,14 +3384,14 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) ...@@ -3333,14 +3384,14 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
tqp_vector->last_jiffies + msecs_to_jiffies(1000))) tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return; return;
if (rx_group->coal.gl_adapt_enable) { if (rx_group->coal.adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group); rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update) if (rx_update)
hns3_set_vector_coalesce_rx_gl(tqp_vector, hns3_set_vector_coalesce_rx_gl(tqp_vector,
rx_group->coal.int_gl); rx_group->coal.int_gl);
} }
if (tx_group->coal.gl_adapt_enable) { if (tx_group->coal.adapt_enable) {
tx_update = hns3_get_new_int_gl(tx_group); tx_update = hns3_get_new_int_gl(tx_group);
if (tx_update) if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector, hns3_set_vector_coalesce_tx_gl(tqp_vector,
...@@ -3536,7 +3587,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -3536,7 +3587,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) { for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i]; tqp_vector = &priv->tqp_vector[i];
hns3_vector_gl_rl_init_hw(tqp_vector, priv); hns3_vector_coalesce_init_hw(tqp_vector, priv);
tqp_vector->num_tqps = 0; tqp_vector->num_tqps = 0;
} }
...@@ -3632,7 +3683,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) ...@@ -3632,7 +3683,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector->idx = i; tqp_vector->idx = i;
tqp_vector->mask_addr = vector[i].io_addr; tqp_vector->mask_addr = vector[i].io_addr;
tqp_vector->vector_irq = vector[i].vector; tqp_vector->vector_irq = vector[i].vector;
hns3_vector_gl_rl_init(tqp_vector, priv); hns3_vector_coalesce_init(tqp_vector, priv);
} }
out: out:
......
...@@ -181,6 +181,8 @@ enum hns3_nic_state { ...@@ -181,6 +181,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_GL2_OFFSET 0x300 #define HNS3_VECTOR_GL2_OFFSET 0x300
#define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6 #define HNS3_VECTOR_RL_EN_B 6
#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
#define HNS3_RING_EN_B 0 #define HNS3_RING_EN_B 0
...@@ -418,18 +420,25 @@ enum hns3_flow_level_range { ...@@ -418,18 +420,25 @@ enum hns3_flow_level_range {
HNS3_FLOW_ULTRA = 3, HNS3_FLOW_ULTRA = 3,
}; };
#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014 #define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032 #define HNS3_INT_GL_20K 0x0032
#define HNS3_INT_GL_18K 0x0036 #define HNS3_INT_GL_18K 0x0036
#define HNS3_INT_GL_8K 0x007C #define HNS3_INT_GL_8K 0x007C
#define HNS3_INT_GL_1US BIT(31)
#define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40 #define HNS3_INT_RL_ENABLE_MASK 0x40
#define HNS3_INT_QL_DEFAULT_CFG 0x20
struct hns3_enet_coalesce { struct hns3_enet_coalesce {
u16 int_gl; u16 int_gl;
u8 gl_adapt_enable; u16 int_ql;
u16 int_ql_max;
u8 adapt_enable:1;
u8 ql_enable:1;
u8 unit_1us:1;
enum hns3_flow_level_range flow_level; enum hns3_flow_level_range flow_level;
}; };
...@@ -595,6 +604,10 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -595,6 +604,10 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value); u32 gl_value);
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value); u32 rl_value);
void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle); void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
......
...@@ -1105,9 +1105,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -1105,9 +1105,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
rx_vector = priv->ring[queue_num + queue].tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce = cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable; tx_vector->tx_group.coal.adapt_enable;
cmd->use_adaptive_rx_coalesce = cmd->use_adaptive_rx_coalesce =
rx_vector->rx_group.coal.gl_adapt_enable; rx_vector->rx_group.coal.adapt_enable;
cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
...@@ -1115,6 +1115,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -1115,6 +1115,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
return 0; return 0;
} }
...@@ -1127,22 +1130,30 @@ static int hns3_get_coalesce(struct net_device *netdev, ...@@ -1127,22 +1130,30 @@ static int hns3_get_coalesce(struct net_device *netdev,
static int hns3_check_gl_coalesce_para(struct net_device *netdev, static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd) struct ethtool_coalesce *cmd)
{ {
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
u32 rx_gl, tx_gl; u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev, netdev_err(netdev,
"Invalid rx-usecs value, rx-usecs range is 0-%d\n", "invalid rx-usecs value, rx-usecs range is 0-%u\n",
HNS3_INT_GL_MAX); ae_dev->dev_specs.max_int_gl);
return -EINVAL; return -EINVAL;
} }
if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev, netdev_err(netdev,
"Invalid tx-usecs value, tx-usecs range is 0-%d\n", "invalid tx-usecs value, tx-usecs range is 0-%u\n",
HNS3_INT_GL_MAX); ae_dev->dev_specs.max_int_gl);
return -EINVAL; return -EINVAL;
} }
/* device version above V3(include V3), GL uses 1us unit,
* so the round down is not needed.
*/
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
return 0;
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) { if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev, netdev_info(netdev,
...@@ -1188,6 +1199,29 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev, ...@@ -1188,6 +1199,29 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
return 0; return 0;
} }
static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
netdev_err(netdev, "coalesced frames is not supported\n");
return -EOPNOTSUPP;
}
if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max ||
cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) {
netdev_err(netdev,
"invalid coalesced_frames value, range is 0-%u\n",
ae_dev->dev_specs.int_ql_max);
return -ERANGE;
}
return 0;
}
static int hns3_check_coalesce_para(struct net_device *netdev, static int hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd) struct ethtool_coalesce *cmd)
{ {
...@@ -1207,6 +1241,10 @@ static int hns3_check_coalesce_para(struct net_device *netdev, ...@@ -1207,6 +1241,10 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret; return ret;
} }
ret = hns3_check_ql_coalesce_param(netdev, cmd);
if (ret)
return ret;
if (cmd->use_adaptive_tx_coalesce == 1 || if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) { cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev, netdev_info(netdev,
...@@ -1230,14 +1268,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, ...@@ -1230,14 +1268,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
tx_vector = priv->ring[queue].tqp_vector; tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring[queue_num + queue].tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable = tx_vector->tx_group.coal.adapt_enable =
cmd->use_adaptive_tx_coalesce; cmd->use_adaptive_tx_coalesce;
rx_vector->rx_group.coal.gl_adapt_enable = rx_vector->rx_group.coal.adapt_enable =
cmd->use_adaptive_rx_coalesce; cmd->use_adaptive_rx_coalesce;
tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames;
rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames;
hns3_set_vector_coalesce_tx_gl(tx_vector, hns3_set_vector_coalesce_tx_gl(tx_vector,
tx_vector->tx_group.coal.int_gl); tx_vector->tx_group.coal.int_gl);
hns3_set_vector_coalesce_rx_gl(rx_vector, hns3_set_vector_coalesce_rx_gl(rx_vector,
...@@ -1245,6 +1286,13 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, ...@@ -1245,6 +1286,13 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
if (tx_vector->tx_group.coal.ql_enable)
hns3_set_vector_coalesce_tx_ql(tx_vector,
tx_vector->tx_group.coal.int_ql);
if (rx_vector->rx_group.coal.ql_enable)
hns3_set_vector_coalesce_rx_ql(rx_vector,
rx_vector->rx_group.coal.int_ql);
} }
static int hns3_set_coalesce(struct net_device *netdev, static int hns3_set_coalesce(struct net_device *netdev,
...@@ -1471,7 +1519,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev, ...@@ -1471,7 +1519,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ #define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \ ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_TX_USECS_HIGH) ETHTOOL_COALESCE_TX_USECS_HIGH | \
ETHTOOL_COALESCE_MAX_FRAMES)
static const struct ethtool_ops hns3vf_ethtool_ops = { static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE, .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
......
...@@ -1103,6 +1103,14 @@ struct hclge_dev_specs_0_cmd { ...@@ -1103,6 +1103,14 @@ struct hclge_dev_specs_0_cmd {
__le32 max_tm_rate; __le32 max_tm_rate;
}; };
#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
struct hclge_dev_specs_1_cmd {
__le32 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
int hclge_cmd_init(struct hclge_dev *hdev); int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
......
...@@ -1366,6 +1366,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ...@@ -1366,6 +1366,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE; ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
} }
static void hclge_parse_dev_specs(struct hclge_dev *hdev, static void hclge_parse_dev_specs(struct hclge_dev *hdev,
...@@ -1373,14 +1374,18 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ...@@ -1373,14 +1374,18 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_dev_specs_0_cmd *req0; struct hclge_dev_specs_0_cmd *req0;
struct hclge_dev_specs_1_cmd *req1;
req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size); le16_to_cpu(req0->rss_ind_tbl_size);
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
} }
static void hclge_check_dev_specs(struct hclge_dev *hdev) static void hclge_check_dev_specs(struct hclge_dev *hdev)
...@@ -1395,6 +1400,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) ...@@ -1395,6 +1400,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE; dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate) if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
} }
static int hclge_query_dev_specs(struct hclge_dev *hdev) static int hclge_query_dev_specs(struct hclge_dev *hdev)
......
...@@ -285,6 +285,14 @@ struct hclgevf_dev_specs_0_cmd { ...@@ -285,6 +285,14 @@ struct hclgevf_dev_specs_0_cmd {
u8 rsv1[5]; u8 rsv1[5];
}; };
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
__le32 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
writel(value, base + reg); writel(value, base + reg);
......
...@@ -2991,6 +2991,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) ...@@ -2991,6 +2991,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
HCLGEVF_MAX_NON_TSO_BD_NUM; HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
} }
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
...@@ -2998,13 +2999,17 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, ...@@ -2998,13 +2999,17 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0; struct hclgevf_dev_specs_0_cmd *req0;
struct hclgevf_dev_specs_1_cmd *req1;
req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size); le16_to_cpu(req0->rss_ind_tbl_size);
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
} }
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
...@@ -3017,6 +3022,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) ...@@ -3017,6 +3022,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size) if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
} }
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment