Commit a071c6ac authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Jijie Shao says:

====================
There are some bugfix for the HNS3 ethernet driver

There are some bugfix for the HNS3 ethernet driver
====================
Reviewed-by: default avatarSimon Horman <simon.horman@corigine.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c7b75bea 882481b1
......@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/pkt_sched.h>
#include <linux/types.h>
#include <linux/bitmap.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
......@@ -101,6 +102,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_FEC_STATS_B,
HNAE3_DEV_SUPPORT_LANE_NUM_B,
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
......@@ -172,6 +174,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_wol_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
#define hnae3_ae_dev_tm_flush_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
......@@ -407,7 +412,7 @@ struct hnae3_ae_dev {
unsigned long hw_err_reset_req;
struct hnae3_dev_specs dev_specs;
u32 dev_version;
unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
};
......
......@@ -156,6 +156,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
......@@ -171,6 +172,20 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
{
const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
unsigned int i;
for (i = 0; i < words; i++)
val[i] = __le32_to_cpu(caps[i]);
bitmap_from_arr32(bitmap, val,
HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
}
static void
hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
struct hclge_comm_query_version_cmd *cmd)
......@@ -179,11 +194,12 @@ hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
ARRAY_SIZE(hclge_vf_cmd_caps);
u32 caps, i;
DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
u32 i;
caps = __le32_to_cpu(cmd->caps[0]);
hclge_comm_capability_to_bitmap(caps, cmd->caps);
for (i = 0; i < size; i++)
if (hnae3_get_bit(caps, caps_map[i].imp_bit))
if (test_bit(caps_map[i].imp_bit, caps))
set_bit(caps_map[i].local_bit, ae_dev->caps);
}
......
......@@ -153,6 +153,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
HCLGE_OPC_TM_FLUSH = 0x0872,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
......@@ -349,6 +350,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_FEC_STATS_B = 25,
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
};
enum HCLGE_COMM_API_CAP_BITS {
......
......@@ -411,6 +411,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support wake on lan",
.cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
}, {
.name = "support tm flush",
.cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
}
};
......
......@@ -52,7 +52,10 @@ static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
for (i = 0; i < HNAE3_MAX_TC; i++) {
ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
if (i < hdev->tm_info.num_tc)
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
else
ets->tc_tx_bw[i] = 0;
if (hdev->tm_info.tc_info[i].tc_sch_mode ==
HCLGE_SCH_MODE_SP)
......@@ -123,7 +126,8 @@ static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
}
static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
struct ieee_ets *ets, bool *changed)
struct ieee_ets *ets, bool *changed,
u8 tc_num)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
......@@ -137,6 +141,13 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
if (i >= tc_num) {
dev_err(&hdev->pdev->dev,
"tc%u is disabled, cannot set ets bw\n",
i);
return -EINVAL;
}
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
......@@ -176,7 +187,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret)
return ret;
ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
if (ret)
return ret;
......@@ -216,6 +227,10 @@ static int hclge_notify_down_uinit(struct hclge_dev *hdev)
if (ret)
return ret;
ret = hclge_tm_flush_cfg(hdev, true);
if (ret)
return ret;
return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
}
......@@ -227,6 +242,10 @@ static int hclge_notify_init_up(struct hclge_dev *hdev)
if (ret)
return ret;
ret = hclge_tm_flush_cfg(hdev, false);
if (ret)
return ret;
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
......@@ -313,6 +332,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
int last_bad_ret = 0;
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
......@@ -350,13 +370,28 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
if (ret)
return ret;
ret = hclge_buffer_alloc(hdev);
if (ret) {
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
ret = hclge_tm_flush_cfg(hdev, true);
if (ret)
return ret;
}
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
/* No matter whether the following operations are performed
* successfully or not, disabling the tm flush and notify
* the network status to up are necessary.
* Do not return immediately.
*/
ret = hclge_buffer_alloc(hdev);
if (ret)
last_bad_ret = ret;
ret = hclge_tm_flush_cfg(hdev, false);
if (ret)
last_bad_ret = ret;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
last_bad_ret = ret;
return last_bad_ret;
}
static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
......
......@@ -693,8 +693,7 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
i, sch_mode_str,
hdev->tm_info.pg_info[0].tc_dwrr[i]);
i, sch_mode_str, ets_weight->tc_weight[i]);
}
return 0;
......
......@@ -785,6 +785,7 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
#define BW_PERCENT 100
#define DEFAULT_BW_WEIGHT 1
u8 i;
......@@ -806,7 +807,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
}
}
......@@ -1484,7 +1485,11 @@ int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return ret;
/* Cfg schd mode for each level schd */
return hclge_tm_schd_mode_hw(hdev);
ret = hclge_tm_schd_mode_hw(hdev);
if (ret)
return ret;
return hclge_tm_flush_cfg(hdev, false);
}
static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
......@@ -2113,3 +2118,28 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
return 0;
}
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
int ret;
if (!hnae3_ae_dev_tm_flush_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false);
desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to config tm flush, ret = %d\n", ret);
return ret;
}
if (enable)
msleep(HCLGE_TM_FLUSH_TIME_MS);
return ret;
}
......@@ -33,6 +33,9 @@ enum hclge_opcode_type;
#define HCLGE_DSCP_MAP_TC_BD_NUM 2
#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
#define HCLGE_TM_FLUSH_TIME_MS 10
#define HCLGE_TM_FLUSH_EN_MSK BIT(0)
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
......@@ -272,4 +275,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment