Commit ff45b48d authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-cleanups'

Guangbin Huang says:

====================
hns3: some cleanups for -next

To improve code readability and simplicity, this series refactor some
functions in the HNS3 ethernet driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents aeeecb88 1d851c09
...@@ -1355,44 +1355,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, ...@@ -1355,44 +1355,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
HNS3_TUN_NVGRE); HNS3_TUN_NVGRE);
} }
static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
u8 il4_proto, u32 *type_cs_vlan_tso, u32 *type_cs_vlan_tso)
u32 *ol_type_vlan_len_msec)
{ {
unsigned char *l2_hdr = skb->data;
u32 l4_proto = ol4_proto;
union l4_hdr_info l4;
union l3_hdr_info l3;
u32 l2_len, l3_len;
l4.hdr = skb_transport_header(skb);
l3.hdr = skb_network_header(skb);
/* handle encapsulation skb */
if (skb->encapsulation) {
/* If this is a not UDP/GRE encapsulation skb */
if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
/* drop the skb tunnel packet if hardware don't support,
* because hardware can't calculate csum when TSO.
*/
if (skb_is_gso(skb))
return -EDOM;
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
/* switch to inner header */
l2_hdr = skb_inner_mac_header(skb);
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
l4_proto = il4_proto;
}
if (l3.v4->version == 4) { if (l3.v4->version == 4) {
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV4); HNS3_L3T_IPV4);
...@@ -1406,15 +1371,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, ...@@ -1406,15 +1371,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV6); HNS3_L3T_IPV6);
} }
}
/* compute inner(/normal) L2 header size, defined in 2 Bytes */ static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
l2_len = l3.hdr - l2_hdr; u32 l4_proto, u32 *type_cs_vlan_tso)
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); {
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
...@@ -1460,6 +1421,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, ...@@ -1460,6 +1421,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0; return 0;
} }
static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
{
unsigned char *l2_hdr = skb->data;
u32 l4_proto = ol4_proto;
union l4_hdr_info l4;
union l3_hdr_info l3;
u32 l2_len, l3_len;
l4.hdr = skb_transport_header(skb);
l3.hdr = skb_network_header(skb);
/* handle encapsulation skb */
if (skb->encapsulation) {
/* If this is a not UDP/GRE encapsulation skb */
if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
/* drop the skb tunnel packet if hardware don't support,
* because hardware can't calculate csum when TSO.
*/
if (skb_is_gso(skb))
return -EDOM;
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
/* switch to inner header */
l2_hdr = skb_inner_mac_header(skb);
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
l4_proto = il4_proto;
}
hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
/* compute inner(/normal) L2 header size, defined in 2 Bytes */
l2_len = l3.hdr - l2_hdr;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
}
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -2383,90 +2395,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb, ...@@ -2383,90 +2395,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features; return features;
} }
static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
struct hns3_enet_ring *ring, bool is_tx)
{
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
if (is_tx) {
stats->tx_bytes += ring->stats.tx_bytes;
stats->tx_packets += ring->stats.tx_pkts;
stats->tx_dropped += ring->stats.sw_err_cnt;
stats->tx_dropped += ring->stats.tx_vlan_err;
stats->tx_dropped += ring->stats.tx_l4_proto_err;
stats->tx_dropped += ring->stats.tx_l2l3l4_err;
stats->tx_dropped += ring->stats.tx_tso_err;
stats->tx_dropped += ring->stats.over_max_recursion;
stats->tx_dropped += ring->stats.hw_limitation;
stats->tx_dropped += ring->stats.copy_bits_err;
stats->tx_dropped += ring->stats.skb2sgl_err;
stats->tx_dropped += ring->stats.map_sg_err;
stats->tx_errors += ring->stats.sw_err_cnt;
stats->tx_errors += ring->stats.tx_vlan_err;
stats->tx_errors += ring->stats.tx_l4_proto_err;
stats->tx_errors += ring->stats.tx_l2l3l4_err;
stats->tx_errors += ring->stats.tx_tso_err;
stats->tx_errors += ring->stats.over_max_recursion;
stats->tx_errors += ring->stats.hw_limitation;
stats->tx_errors += ring->stats.copy_bits_err;
stats->tx_errors += ring->stats.skb2sgl_err;
stats->tx_errors += ring->stats.map_sg_err;
} else {
stats->rx_bytes += ring->stats.rx_bytes;
stats->rx_packets += ring->stats.rx_pkts;
stats->rx_dropped += ring->stats.l2_err;
stats->rx_errors += ring->stats.l2_err;
stats->rx_errors += ring->stats.l3l4_csum_err;
stats->rx_crc_errors += ring->stats.l2_err;
stats->multicast += ring->stats.rx_multicast;
stats->rx_length_errors += ring->stats.err_pkt_len;
}
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
static void hns3_nic_get_stats64(struct net_device *netdev, static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps; int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle; struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
u64 rx_length_errors = 0;
u64 rx_crc_errors = 0;
u64 rx_multicast = 0;
unsigned int start;
u64 tx_errors = 0;
u64 rx_errors = 0;
unsigned int idx; unsigned int idx;
u64 tx_bytes = 0;
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
u64 tx_drop = 0;
u64 rx_drop = 0;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return; return;
handle->ae_algo->ops->update_stats(handle, &netdev->stats); handle->ae_algo->ops->update_stats(handle, &netdev->stats);
memset(&ring_total_stats, 0, sizeof(ring_total_stats));
for (idx = 0; idx < queue_num; idx++) { for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */ /* fetch the tx stats */
ring = &priv->ring[idx]; ring = &priv->ring[idx];
do { hns3_fetch_stats(&ring_total_stats, ring, true);
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.sw_err_cnt;
tx_drop += ring->stats.tx_vlan_err;
tx_drop += ring->stats.tx_l4_proto_err;
tx_drop += ring->stats.tx_l2l3l4_err;
tx_drop += ring->stats.tx_tso_err;
tx_drop += ring->stats.over_max_recursion;
tx_drop += ring->stats.hw_limitation;
tx_drop += ring->stats.copy_bits_err;
tx_drop += ring->stats.skb2sgl_err;
tx_drop += ring->stats.map_sg_err;
tx_errors += ring->stats.sw_err_cnt;
tx_errors += ring->stats.tx_vlan_err;
tx_errors += ring->stats.tx_l4_proto_err;
tx_errors += ring->stats.tx_l2l3l4_err;
tx_errors += ring->stats.tx_tso_err;
tx_errors += ring->stats.over_max_recursion;
tx_errors += ring->stats.hw_limitation;
tx_errors += ring->stats.copy_bits_err;
tx_errors += ring->stats.skb2sgl_err;
tx_errors += ring->stats.map_sg_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */ /* fetch the rx stats */
ring = &priv->ring[idx + queue_num]; ring = &priv->ring[idx + queue_num];
do { hns3_fetch_stats(&ring_total_stats, ring, false);
start = u64_stats_fetch_begin_irq(&ring->syncp); }
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts; stats->tx_bytes = ring_total_stats.tx_bytes;
rx_drop += ring->stats.l2_err; stats->tx_packets = ring_total_stats.tx_packets;
rx_errors += ring->stats.l2_err; stats->rx_bytes = ring_total_stats.rx_bytes;
rx_errors += ring->stats.l3l4_csum_err; stats->rx_packets = ring_total_stats.rx_packets;
rx_crc_errors += ring->stats.l2_err;
rx_multicast += ring->stats.rx_multicast; stats->rx_errors = ring_total_stats.rx_errors;
rx_length_errors += ring->stats.err_pkt_len; stats->multicast = ring_total_stats.multicast;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_length_errors = ring_total_stats.rx_length_errors;
} stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
stats->tx_bytes = tx_bytes;
stats->tx_packets = tx_pkts;
stats->rx_bytes = rx_bytes;
stats->rx_packets = rx_pkts;
stats->rx_errors = rx_errors;
stats->multicast = rx_multicast;
stats->rx_length_errors = rx_length_errors;
stats->rx_crc_errors = rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = tx_errors; stats->tx_errors = ring_total_stats.tx_errors;
stats->rx_dropped = rx_drop; stats->rx_dropped = ring_total_stats.rx_dropped;
stats->tx_dropped = tx_drop; stats->tx_dropped = ring_total_stats.tx_dropped;
stats->collisions = netdev->stats.collisions; stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors;
...@@ -2659,18 +2670,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2659,18 +2670,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret; return ret;
} }
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) static int hns3_get_timeout_queue(struct net_device *ndev)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
int fbd_num, fbd_oft;
int ebd_num, ebd_oft;
int bd_num, bd_err;
int ring_en, tc;
int i; int i;
/* Find the stopped queue the same way the stack does */ /* Find the stopped queue the same way the stack does */
...@@ -2690,7 +2691,6 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -2690,7 +2691,6 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
dql->last_obj_cnt, dql->num_queued, dql->last_obj_cnt, dql->num_queued,
dql->adj_limit, dql->num_completed); dql->adj_limit, dql->num_completed);
#endif #endif
timeout_queue = i;
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state, q->state,
jiffies_to_msecs(jiffies - trans_start)); jiffies_to_msecs(jiffies - trans_start));
...@@ -2698,17 +2698,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -2698,17 +2698,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
} }
} }
if (i == ndev->num_tx_queues) { return i;
netdev_info(ndev, }
"no netdev TX timeout queue found, timeout count: %llu\n",
priv->tx_timeout_count);
return false;
}
priv->tx_timeout_count++;
tx_ring = &priv->ring[timeout_queue]; static void hns3_dump_queue_stats(struct net_device *ndev,
napi = &tx_ring->tqp_vector->napi; struct hns3_enet_ring *tx_ring,
int timeout_queue)
{
struct napi_struct *napi = &tx_ring->tqp_vector->napi;
struct hns3_nic_priv *priv = netdev_priv(ndev);
netdev_info(ndev, netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
...@@ -2724,6 +2722,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -2724,6 +2722,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
}
static void hns3_dump_queue_reg(struct net_device *ndev,
struct hns3_enet_ring *tx_ring)
{
netdev_info(ndev,
"BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
readl(tx_ring->tqp_vector->mask_addr));
netdev_info(ndev,
"RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
hns3_tqp_read_reg(tx_ring,
HNS3_RING_TX_RING_EBD_OFFSET_REG));
}
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
int timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
netdev_info(ndev,
"no netdev TX timeout queue found, timeout count: %llu\n",
priv->tx_timeout_count);
return false;
}
priv->tx_timeout_count++;
tx_ring = &priv->ring[timeout_queue];
hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
/* When mac received many pause frames continuous, it's unable to send /* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout * packets, which may cause tx timeout
...@@ -2736,32 +2776,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -2736,32 +2776,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
} }
hw_head = readl_relaxed(tx_ring->tqp->io_base + hns3_dump_queue_reg(ndev, tx_ring);
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
fbd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
ebd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_EBDNUM_REG);
ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_EBD_OFFSET_REG);
bd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
bd_err = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_BD_ERR_REG);
ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
netdev_info(ndev,
"BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
netdev_info(ndev,
"RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true; return true;
} }
...@@ -3546,6 +3561,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) ...@@ -3546,6 +3561,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias; return page_count(cb->priv) == cb->pagecnt_bias;
} }
static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring,
int pull_len,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 frag_size = size - pull_len;
void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
return -ENOMEM;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return 0;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
...@@ -3555,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3555,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len; u32 frag_size = size - pull_len;
int ret = 0;
bool reused; bool reused;
if (ring->page_pool) { if (ring->page_pool) {
...@@ -3589,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3589,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0; desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) { } else if (frag_size <= ring->rx_copybreak) {
void *frag = napi_alloc_frag(frag_size); ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
if (ret)
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
goto out; goto out;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return;
} }
out: out:
...@@ -4025,6 +4055,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, ...@@ -4025,6 +4055,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
skb_set_hash(skb, rss_hash, rss_type); skb_set_hash(skb, rss_hash, rss_type);
} }
static void hns3_handle_rx_ts_info(struct net_device *netdev,
struct hns3_desc *desc, struct sk_buff *skb,
u32 bd_base_info)
{
if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
struct hnae3_handle *h = hns3_get_handle(netdev);
u32 nsec = le32_to_cpu(desc->ts_nsec);
u32 sec = le32_to_cpu(desc->ts_sec);
if (h->ae_algo->ops->get_rx_hwts)
h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
}
}
static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, struct sk_buff *skb,
u32 l234info)
{
struct net_device *netdev = ring_to_netdev(ring);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tag);
}
}
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{ {
struct net_device *netdev = ring_to_netdev(ring); struct net_device *netdev = ring_to_netdev(ring);
...@@ -4047,26 +4110,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ...@@ -4047,26 +4110,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info); ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum); csum = le16_to_cpu(desc->csum);
if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
struct hnae3_handle *h = hns3_get_handle(netdev);
u32 nsec = le32_to_cpu(desc->ts_nsec);
u32 sec = le32_to_cpu(desc->ts_sec);
if (h->ae_algo->ops->get_rx_hwts)
h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
}
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) { BIT(HNS3_RXD_L2E_B))))) {
......
...@@ -621,6 +621,11 @@ static inline int ring_space(struct hns3_enet_ring *ring) ...@@ -621,6 +621,11 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1; (begin - end)) - 1;
} }
static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
{
return readl_relaxed(ring->tqp->io_base + reg);
}
static inline u32 hns3_read_reg(void __iomem *base, u32 reg) static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{ {
return readl(base + reg); return readl(base + reg);
......
...@@ -258,12 +258,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -258,12 +258,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return 0; return 0;
} }
static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_EN_B, "mac_trans_en"},
{HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
{HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
{HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
int len, int *pos) int len, int *pos)
{ {
struct hclge_config_mac_mode_cmd *req; struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
u32 loop_en; u32 loop_en, i, offset;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
...@@ -278,39 +295,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, ...@@ -278,39 +295,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_mode_cmd *)desc.data; req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
*pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n", for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); offset = hclge_dbg_mac_en_status[i].offset;
*pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n", *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); hclge_dbg_mac_en_status[i].message,
*pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n", hnae3_get_bit(loop_en, offset));
hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); }
*pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
*pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
*pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
*pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
*pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
*pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
*pos += scnprintf(buf + *pos, len - *pos,
"mac_rx_oversize_truncate_en: %#x\n",
hnae3_get_bit(loop_en,
HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
*pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
*pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
*pos += scnprintf(buf + *pos, len - *pos,
"mac_tx_under_min_err_en: %#x\n",
hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
*pos += scnprintf(buf + *pos, len - *pos,
"mac_tx_oversize_truncate_en: %#x\n",
hnae3_get_bit(loop_en,
HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
return 0; return 0;
} }
...@@ -1614,8 +1604,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) ...@@ -1614,8 +1604,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
return 0; return 0;
} }
static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
{HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
{HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
{HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
{HCLGE_FUN_RST_ING, "function reset status"}
};
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
{ {
u32 i, offset;
int pos = 0; int pos = 0;
pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
...@@ -1634,22 +1635,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) ...@@ -1634,22 +1635,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
hdev->rst_stats.reset_cnt); hdev->rst_stats.reset_cnt);
pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt); hdev->rst_stats.reset_fail_cnt);
pos += scnprintf(buf + pos, len - pos,
"vector0 interrupt enable status: 0x%x\n", for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); offset = hclge_dbg_rst_info[i].offset;
pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n", pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); hclge_dbg_rst_info[i].message,
pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n", hclge_read_dev(&hdev->hw, offset));
hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); }
pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
hclge_read_dev(&hdev->hw,
HCLGE_RAS_PF_OTHER_INT_STS_REG));
pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
hdev->state); hdev->state);
......
...@@ -94,6 +94,11 @@ struct hclge_dbg_func { ...@@ -94,6 +94,11 @@ struct hclge_dbg_func {
char *buf, int len); char *buf, int len);
}; };
struct hclge_dbg_status_dfx_info {
u32 offset;
char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
};
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"}, {false, "Reserved"},
{true, "BP_CPU_STATE"}, {true, "BP_CPU_STATE"},
......
...@@ -2653,11 +2653,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed) ...@@ -2653,11 +2653,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex; return duplex;
} }
struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
{HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
{HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
{HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
{HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
{HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
{HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
{HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
{HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
{HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
};
static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
{
u16 i;
for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
*speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
return 0;
}
}
return -EINVAL;
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
u8 duplex) u8 duplex)
{ {
struct hclge_config_mac_speed_dup_cmd *req; struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
u32 speed_fw;
int ret; int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
...@@ -2667,48 +2694,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, ...@@ -2667,48 +2694,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex) if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
switch (speed) { ret = hclge_convert_to_fw_speed(speed, &speed_fw);
case HCLGE_MAC_SPEED_10M: if (ret) {
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
break;
case HCLGE_MAC_SPEED_100M:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
break;
case HCLGE_MAC_SPEED_1G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
break;
case HCLGE_MAC_SPEED_10G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
break;
case HCLGE_MAC_SPEED_25G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
break;
case HCLGE_MAC_SPEED_40G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
break;
case HCLGE_MAC_SPEED_50G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
break;
case HCLGE_MAC_SPEED_100G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
break;
case HCLGE_MAC_SPEED_200G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL; return ret;
} }
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1); 1);
...@@ -11589,24 +11582,20 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, ...@@ -11589,24 +11582,20 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0; int retry_cnt = 0;
int ret; int ret;
retry: while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
down(&hdev->reset_sem); down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hdev->reset_type = rst_type; hdev->reset_type = rst_type;
ret = hclge_reset_prepare(hdev); ret = hclge_reset_prepare(hdev);
if (ret || hdev->reset_pending) { if (!ret && !hdev->reset_pending)
dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", break;
ret);
if (hdev->reset_pending || dev_err(&hdev->pdev->dev,
retry_cnt++ < HCLGE_RESET_RETRY_CNT) { "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
dev_err(&hdev->pdev->dev, ret, hdev->reset_pending, retry_cnt);
"reset_pending:0x%lx, retry_cnt:%d\n", clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hdev->reset_pending, retry_cnt); up(&hdev->reset_sem);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); msleep(HCLGE_RESET_RETRY_WAIT_MS);
up(&hdev->reset_sem);
msleep(HCLGE_RESET_RETRY_WAIT_MS);
goto retry;
}
} }
/* disable misc vector before reset done */ /* disable misc vector before reset done */
......
...@@ -1095,6 +1095,11 @@ struct hclge_speed_bit_map { ...@@ -1095,6 +1095,11 @@ struct hclge_speed_bit_map {
u32 speed_bit; u32 speed_bit;
}; };
struct hclge_mac_speed_map {
u32 speed_drv; /* speed defined in driver */
u32 speed_fw; /* speed defined in firmware */
};
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc); bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport, int hclge_add_uc_addr_common(struct hclge_vport *vport,
......
...@@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, ...@@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
return 0; return 0;
} }
static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
{ {
struct hclge_vport *vport = hdev->vport; struct hclge_vport *vport = hdev->vport;
u16 i, k;
int ret; int ret;
u32 i, k;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, one by one mapping */
/* Cfg qs -> pri mapping, one by one mapping */ for (k = 0; k < hdev->num_alloc_vport; k++) {
for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
struct hnae3_knic_private_info *kinfo =
&vport[k].nic.kinfo; for (i = 0; i < kinfo->tc_info.num_tc; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
for (i = 0; i < kinfo->tc_info.num_tc; i++) { vport[k].qs_offset + i,
ret = hclge_tm_qs_to_pri_map_cfg( i);
hdev, vport[k].qs_offset + i, i); if (ret)
if (ret) return ret;
return ret;
}
} }
} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++)
for (i = 0; i < HNAE3_MAX_TC; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(
hdev, vport[k].qs_offset + i, k);
if (ret)
return ret;
}
} else {
return -EINVAL;
} }
return 0;
}
static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
u16 i, k;
int ret;
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++)
for (i = 0; i < HNAE3_MAX_TC; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
vport[k].qs_offset + i,
k);
if (ret)
return ret;
}
return 0;
}
static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
u32 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
else
return -EINVAL;
if (ret)
return ret;
/* Cfg q -> qs mapping */ /* Cfg q -> qs mapping */
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_vport_q_to_qs_map(hdev, vport); ret = hclge_vport_q_to_qs_map(hdev, vport);
...@@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) ...@@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
return 0; return 0;
} }
static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
{
struct hclge_vport *vport = hdev->vport;
int ret;
u16 i;
ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
if (ret)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_tm_qs_schd_mode_cfg(hdev,
vport[i].qs_offset + pri_id,
HCLGE_SCH_MODE_DWRR);
if (ret)
return ret;
}
return 0;
}
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{ {
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
...@@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) ...@@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{ {
struct hclge_vport *vport = hdev->vport; struct hclge_vport *vport = hdev->vport;
int ret; int ret;
u8 i, k; u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_pri_schd_mode_cfg(hdev, i); ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret) if (ret)
return ret; return ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
ret = hclge_tm_qs_schd_mode_cfg(
hdev, vport[k].qs_offset + i,
HCLGE_SCH_MODE_DWRR);
if (ret)
return ret;
}
} }
} else { } else {
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
......
...@@ -2166,24 +2166,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, ...@@ -2166,24 +2166,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0; int retry_cnt = 0;
int ret; int ret;
retry: while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
down(&hdev->reset_sem); down(&hdev->reset_sem);
set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
hdev->reset_type = rst_type; hdev->reset_type = rst_type;
ret = hclgevf_reset_prepare(hdev); ret = hclgevf_reset_prepare(hdev);
if (ret) { if (!ret && !hdev->reset_pending)
dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", break;
ret);
if (hdev->reset_pending || dev_err(&hdev->pdev->dev,
retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
dev_err(&hdev->pdev->dev, ret, hdev->reset_pending, retry_cnt);
"reset_pending:0x%lx, retry_cnt:%d\n", clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
hdev->reset_pending, retry_cnt); up(&hdev->reset_sem);
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
up(&hdev->reset_sem);
msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
goto retry;
}
} }
/* disable misc vector before reset done */ /* disable misc vector before reset done */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment