Commit cdccf74b authored by David S. Miller's avatar David S. Miller

Merge branch 'hns-misc-fixes'

Salil Mehta says:

====================
net: hns: Misc. HNS Bug Fixes & Code Improvements

This patch set introduces various HNS bug fixes, optimizations and code
improvements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d4f4b915 b4957ab0
......@@ -57,11 +57,15 @@ static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
{
if (unlikely(!cb->priv))
return;
if (cb->type == DESC_TYPE_SKB)
dev_kfree_skb_any((struct sk_buff *)cb->priv);
else if (unlikely(is_rx_ring(ring)))
put_page((struct page *)cb->priv);
memset(cb, 0, sizeof(*cb));
cb->priv = NULL;
}
static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
......@@ -197,6 +201,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q;
ring->flags = flags;
spin_lock_init(&ring->lock);
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
/* not matter for tx or rx ring, the ntc and ntc start from 0 */
......
......@@ -67,6 +67,8 @@ do { \
#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
#define AE_NAME_SIZE 16
#define BD_SIZE_2048_MAX_MTU 6000
/* some said the RX and TX RCB format should not be the same in the future. But
* it is the same now...
*/
......@@ -101,7 +103,6 @@ enum hnae_led_state {
#define HNS_RX_FLAG_L4ID_TCP 0x1
#define HNS_RX_FLAG_L4ID_SCTP 0x3
#define HNS_TXD_ASID_S 0
#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
#define HNS_TXD_BUFNUM_S 8
......@@ -273,6 +274,9 @@ struct hnae_ring {
/* statistic */
struct ring_stats stats;
/* ring lock for poll one */
spinlock_t lock;
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
......@@ -483,11 +487,11 @@ struct hnae_ae_ops {
u32 auto_neg, u32 rx_en, u32 tx_en);
void (*get_coalesce_usecs)(struct hnae_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
void (*get_max_coalesced_frames)(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames);
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
u32 tx_frames, u32 rx_frames);
void (*get_coalesce_range)(struct hnae_handle *handle,
u32 *tx_frames_low, u32 *rx_frames_low,
u32 *tx_frames_high, u32 *rx_frames_high,
......@@ -646,6 +650,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
/* when reinit buffer size, we should reinit buffer description */
static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
{
int i, j;
struct hnae_ring *ring;
for (i = 0; i < h->q_num; i++) {
ring = &h->qs[i]->rx_ring;
for (j = 0; j < ring->desc_num; j++)
ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
}
wmb(); /* commit all data before submit */
}
/* when reinit buffer size, we should reinit page offset */
static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
{
int i, j;
struct hnae_ring *ring;
for (i = 0; i < h->q_num; i++) {
ring = &h->qs[i]->rx_ring;
for (j = 0; j < ring->desc_num; j++) {
ring->desc_cb[j].page_offset = 0;
if (ring->desc[j].addr !=
cpu_to_le64(ring->desc_cb[j].dma))
ring->desc[j].addr =
cpu_to_le64(ring->desc_cb[j].dma);
}
}
wmb(); /* commit all data before submit */
}
#define hnae_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
......
......@@ -267,8 +267,32 @@ static int hns_ae_clr_multicast(struct hnae_handle *handle)
static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
struct hnae_queue *q;
u32 rx_buf_size;
int i, ret;
/* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
if (new_mtu <= BD_SIZE_2048_MAX_MTU)
rx_buf_size = 2048;
else
rx_buf_size = 4096;
} else {
rx_buf_size = mac_cb->dsaf_dev->buf_size;
}
ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
if (!ret) {
/* reinit ring buf_size */
for (i = 0; i < handle->q_num; i++) {
q = handle->qs[i];
q->rx_ring.buf_size = rx_buf_size;
hns_rcb_set_rx_ring_bs(q, rx_buf_size);
}
}
return hns_mac_set_mtu(mac_cb, new_mtu);
return ret;
}
static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
......@@ -463,15 +487,21 @@ static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
ring_pair->port_id_in_comm);
}
static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames)
{
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
*tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
ring_pair->port_id_in_comm);
*rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
handle->port_type == HNAE_PORT_DEBUG)
*tx_frames = hns_rcb_get_rx_coalesced_frames(
ring_pair->rcb_common, ring_pair->port_id_in_comm);
else
*tx_frames = hns_rcb_get_tx_coalesced_frames(
ring_pair->rcb_common, ring_pair->port_id_in_comm);
*rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
ring_pair->port_id_in_comm);
}
......@@ -486,14 +516,33 @@ static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
}
static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
u32 coalesce_frames)
u32 tx_frames, u32 rx_frames)
{
int ret;
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
return hns_rcb_set_coalesced_frames(
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
handle->port_type == HNAE_PORT_DEBUG) {
if (tx_frames != rx_frames)
return -EINVAL;
return hns_rcb_set_rx_coalesced_frames(
ring_pair->rcb_common,
ring_pair->port_id_in_comm, rx_frames);
} else {
if (tx_frames != 1)
return -EINVAL;
ret = hns_rcb_set_tx_coalesced_frames(
ring_pair->rcb_common,
ring_pair->port_id_in_comm, coalesce_frames);
ring_pair->port_id_in_comm, tx_frames);
if (ret)
return ret;
return hns_rcb_set_rx_coalesced_frames(
ring_pair->rcb_common,
ring_pair->port_id_in_comm, rx_frames);
}
}
static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
......@@ -504,20 +553,27 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
{
struct dsaf_device *dsaf_dev;
assert(handle);
dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
*tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
*rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
*tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
*rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
handle->port_type == HNAE_PORT_DEBUG)
*tx_frames_high =
(dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
*rx_frames_high =
(dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
*tx_usecs_low = 0;
*rx_usecs_low = 0;
*tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
*rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
(dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
else
*tx_frames_high = 1;
*rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
*tx_usecs_low = HNS_RCB_TX_USECS_LOW;
*rx_usecs_low = HNS_RCB_RX_USECS_LOW;
*tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
*rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
void hns_ae_update_stats(struct hnae_handle *handle,
......@@ -802,6 +858,7 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
if (indir)
memcpy(indir, ppe_cb->rss_indir_table,
HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
......@@ -814,15 +871,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
/* set the RSS Hash Key if specififed by the user */
if (key)
hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
if (key) {
memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
}
if (indir) {
/* update the shadow RSS table with user specified qids */
memcpy(ppe_cb->rss_indir_table, indir,
HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
/* now update the hardware */
hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
}
return 0;
}
......@@ -846,7 +907,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_autoneg = hns_ae_get_autoneg,
.set_pauseparam = hns_ae_set_pauseparam,
.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
.get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
.get_coalesce_range = hns_ae_get_coalesce_range,
......
......@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
}
/**
*hns_gmac_get_en - get port enable
*@mac_drv:mac device
*@rx:rx enable
*@tx:tx enable
*/
/* hns_gmac_get_en - get port enable
* @mac_drv:mac device
* @rx:rx enable
* @tx:tx enable
*/
static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
......@@ -148,6 +147,17 @@ static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
GMAC_MAX_FRM_SIZE_S, newval);
}
static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
{
u32 tx_ctrl;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
}
static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
......@@ -250,7 +260,6 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
u32 full_duplex)
{
u32 tx_ctrl;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
......@@ -279,14 +288,6 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
return -EINVAL;
}
tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
GMAC_MODE_CHANGE_EB_B, 1);
return 0;
}
......@@ -325,6 +326,17 @@ static void hns_gmac_init(void *mac_drv)
hns_gmac_tx_loop_pkt_dis(mac_drv);
if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
hns_gmac_set_uc_match(mac_drv, 0);
hns_gmac_config_pad_and_crc(mac_drv, 1);
dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
GMAC_MODE_CHANGE_EB_B, 1);
/* reduce gmac tx water line to avoid gmac hang-up
* in speed 100M and duplex half.
*/
dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
GMAC_TX_WATER_LINE_SHIFT, 8);
}
void hns_gmac_update_stats(void *mac_drv)
......@@ -453,24 +465,6 @@ static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
return 0;
}
static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
{
u32 tx_ctrl;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
}
static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
*mac_id = drv->mac_id;
}
static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
{
enum hns_gmac_duplex_mdoe duplex;
......@@ -712,7 +706,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
mac_drv->mac_get_id = hns_gmac_get_id;
mac_drv->get_info = hns_gmac_get_info;
mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
......
......@@ -332,44 +332,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
return 0;
}
/**
*hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
* address twice
*@net_dev: net device
*@vfn : vf lan
*@mac : mac address
*return status
*/
int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
{
struct mac_entry_idx *old_mac;
struct dsaf_device *dsaf_dev;
u32 ret;
dsaf_dev = mac_cb->dsaf_dev;
if (vfn < DSAF_MAX_VM_NUM) {
old_mac = &mac_cb->addr_entry_idx[vfn];
} else {
dev_err(mac_cb->dev,
"vf queue is too large, %s mac%d queue = %#x!\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
return -EINVAL;
}
if (dsaf_dev) {
ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
mac_cb->mac_id, old_mac->addr);
if (ret)
return ret;
if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
old_mac->valid = 0;
}
return 0;
}
int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
{
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
......@@ -491,10 +453,9 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
}
}
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 buf_size = mac_cb->dsaf_dev->buf_size;
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
MAC_MAX_MTU : MAC_MAX_MTU_V2;
......
......@@ -373,8 +373,6 @@ struct mac_driver {
void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
void (*set_promiscuous)(void *mac_drv, u8 enable);
/* get mac id */
void (*mac_get_id)(void *mac_drv, u8 *mac_id);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
void (*autoneg_stat)(void *mac_drv, u32 *enable);
......@@ -436,7 +434,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
void hns_mac_start(struct hns_mac_cb *mac_cb);
void hns_mac_stop(struct hns_mac_cb *mac_cb);
int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
void hns_mac_uninit(struct dsaf_device *dsaf_dev);
void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_reset(struct hns_mac_cb *mac_cb);
......@@ -444,7 +441,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
......
......@@ -510,10 +510,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
......@@ -521,10 +521,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
......@@ -1647,87 +1647,6 @@ int hns_dsaf_rm_mac_addr(
mac_entry->addr);
}
/**
* hns_dsaf_set_mac_mc_entry - set mac mc-entry
* @dsaf_dev: dsa fabric device struct pointer
* @mac_entry: mc-mac entry
*/
int hns_dsaf_set_mac_mc_entry(
struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_multi_dest_entry *mac_entry)
{
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_drv_priv *priv =
(struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
/* mac addr check */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
dsaf_dev->ae_dev.name, mac_entry->addr);
return -EINVAL;
}
/*config key */
hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
mac_entry->in_vlan_id,
mac_entry->in_port_num, mac_entry->addr);
/* entry ie exist? */
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/*if hasnot, find enpty entry*/
entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/*if hasnot empty, error*/
dev_err(dsaf_dev->dev,
"set_uc_entry failed, %s Mac key(%#x:%#x)\n",
dsaf_dev->ae_dev.name,
mac_key.high.val, mac_key.low.val);
return -EINVAL;
}
/* config hardware entry */
memset(mac_data.tbl_mcast_port_msk,
0, sizeof(mac_data.tbl_mcast_port_msk));
} else {
/* config hardware entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
&mac_data);
tmp_mac_key.high.val =
le32_to_cpu(tcam_data.tbl_tcam_data_high);
tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
mac_data.tbl_mcast_old_en = 0;
mac_data.tbl_mcast_item_vld = 1;
dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
0x3F, 0, mac_entry->port_mask[0]);
dev_dbg(dsaf_dev->dev,
"set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL,
&mac_data);
/* config software entry */
soft_mac_entry += entry_index;
soft_mac_entry->index = entry_index;
soft_mac_entry->tcam_key.high.val = mac_key.high.val;
soft_mac_entry->tcam_key.low.val = mac_key.low.val;
return 0;
}
static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
{
u16 *a = (u16 *)dst;
......@@ -2089,166 +2008,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
return ret;
}
/**
* hns_dsaf_get_mac_uc_entry - get mac uc entry
* @dsaf_dev: dsa fabric device struct pointer
* @mac_entry: mac entry
*/
int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry)
{
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_ucast_cfg mac_data;
struct dsaf_tbl_tcam_data tcam_data;
/* check macaddr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
mac_entry->addr);
return -EINVAL;
}
/*config key */
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
mac_entry->in_port_num, mac_entry->addr);
/*check exist? */
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/*find none, error */
dev_err(dsaf_dev->dev,
"get_uc_entry failed, %s Mac key(%#x:%#x)\n",
dsaf_dev->ae_dev.name,
mac_key.high.val, mac_key.low.val);
return -EINVAL;
}
dev_dbg(dsaf_dev->dev,
"get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
/* read entry */
hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_num = mac_data.tbl_ucast_out_port;
return 0;
}
/**
* hns_dsaf_get_mac_mc_entry - get mac mc entry
* @dsaf_dev: dsa fabric device struct pointer
* @mac_entry: mac entry
*/
int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_multi_dest_entry *mac_entry)
{
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_tbl_tcam_data tcam_data;
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
mac_entry->addr);
return -EINVAL;
}
/*config key */
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
mac_entry->in_port_num, mac_entry->addr);
/*check exist? */
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/* find none, error */
dev_err(dsaf_dev->dev,
"get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val);
return -EINVAL;
}
dev_dbg(dsaf_dev->dev,
"get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
/*read entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
return 0;
}
/**
* hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
* @dsaf_dev: dsa fabric device struct pointer
* @entry_index: tab entry index
* @mac_entry: mac entry
*/
int hns_dsaf_get_mac_entry_by_index(
struct dsaf_device *dsaf_dev,
u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
{
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
struct dsaf_tbl_tcam_data tcam_data;
char mac_addr[ETH_ALEN] = {0};
if (entry_index >= dsaf_dev->tcam_max_num) {
/* find none, del error */
dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
dsaf_dev->ae_dev.name);
return -EINVAL;
}
/* mc entry, do read opt */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
/***get mac addr*/
mac_addr[0] = mac_key.high.bits.mac_0;
mac_addr[1] = mac_key.high.bits.mac_1;
mac_addr[2] = mac_key.high.bits.mac_2;
mac_addr[3] = mac_key.high.bits.mac_3;
mac_addr[4] = mac_key.low.bits.mac_4;
mac_addr[5] = mac_key.low.bits.mac_5;
/**is mc or uc*/
if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
/**mc donot do*/
} else {
/*is not mc, just uc... */
hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data,
&mac_uc_data);
mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
}
return 0;
}
static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
size_t sizeof_priv)
{
......
......@@ -68,7 +68,7 @@ enum dsaf_roce_qos_sl {
};
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
......@@ -429,23 +429,12 @@ static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
u8 in_port_num, u8 *addr);
int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
int hns_dsaf_get_mac_entry_by_index(
struct dsaf_device *dsaf_dev,
u16 entry_index,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
......@@ -475,5 +464,4 @@ int hns_dsaf_rm_mac_addr(
int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
#endif /* __HNS_DSAF_MAIN_H__ */
......@@ -496,17 +496,17 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
*/
int hns_ppe_init(struct dsaf_device *dsaf_dev)
{
int i, k;
int ret;
int i;
for (i = 0; i < HNS_PPE_COM_NUM; i++) {
ret = hns_ppe_common_get_cfg(dsaf_dev, i);
if (ret)
goto get_ppe_cfg_fail;
goto get_cfg_fail;
ret = hns_rcb_common_get_cfg(dsaf_dev, i);
if (ret)
goto get_rcb_cfg_fail;
goto get_cfg_fail;
hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
......@@ -518,13 +518,12 @@ int hns_ppe_init(struct dsaf_device *dsaf_dev)
return 0;
get_rcb_cfg_fail:
get_cfg_fail:
for (i = 0; i < HNS_PPE_COM_NUM; i++) {
hns_rcb_common_free_cfg(dsaf_dev, i);
hns_ppe_common_free_cfg(dsaf_dev, i);
get_ppe_cfg_fail:
for (k = i - 1; k >= 0; k--) {
hns_rcb_common_free_cfg(dsaf_dev, k);
hns_ppe_common_free_cfg(dsaf_dev, k);
}
return ret;
}
......
......@@ -32,6 +32,9 @@
#define RCB_RESET_WAIT_TIMES 30
#define RCB_RESET_TRY_TIMES 10
/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
#define RCB_DEFAULT_BUFFER_SIZE 2048
/**
*hns_rcb_wait_fbd_clean - clean fbd
*@qs: ring struct pointer array
......@@ -192,6 +195,30 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
wmb(); /* Sync point after breakpoint */
}
/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
*@q: hnae_queue
*@buf_size: buffer size set to hw
*/
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
{
u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
bd_size_type);
}
/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
*@q: hnae_queue
*@buf_size: buffer size set to hw
*/
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
{
u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
bd_size_type);
}
/**
*hns_rcb_ring_init - init rcb ring
*@ring_pair: ring pair control block
......@@ -200,8 +227,6 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
{
struct hnae_queue *q = &ring_pair->q;
struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
struct hnae_ring *ring =
(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
dma_addr_t dma = ring->desc_dma_addr;
......@@ -212,8 +237,8 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
bd_size_type);
hns_rcb_set_rx_ring_bs(q, ring->buf_size);
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
......@@ -224,12 +249,12 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
bd_size_type);
hns_rcb_set_tx_ring_bs(q, ring->buf_size);
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
ring_pair->port_id_in_comm);
ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
}
}
......@@ -259,13 +284,27 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
static void hns_rcb_set_port_timeout(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
timeout * HNS_RCB_CLK_FREQ_MHZ);
} else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
dsaf_write_dev(rcb_common,
RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
HNS_RCB_DEF_GAP_TIME_USECS);
else
dsaf_write_dev(rcb_common,
RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
timeout);
dsaf_write_dev(rcb_common,
RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
timeout);
} else {
dsaf_write_dev(rcb_common,
RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
timeout);
}
}
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
......@@ -327,8 +366,12 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
for (i = 0; i < port_num; i++) {
hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
(void)hns_rcb_set_coalesced_frames(
rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
hns_rcb_set_rx_coalesced_frames(
rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
hns_rcb_set_tx_coalesced_frames(
rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
hns_rcb_set_port_timeout(
rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
}
......@@ -380,7 +423,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
struct hnae_ring *ring;
struct rcb_common_cb *rcb_common;
struct ring_pair_cb *ring_pair_cb;
u32 buf_size;
u16 desc_num, mdnum_ppkt;
bool irq_idx, is_ver1;
......@@ -401,7 +443,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
}
rcb_common = ring_pair_cb->rcb_common;
buf_size = rcb_common->dsaf_dev->buf_size;
desc_num = rcb_common->dsaf_dev->desc_num;
ring->desc = NULL;
......@@ -410,7 +451,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
ring->irq = ring_pair_cb->virq[irq_idx];
ring->desc_dma_addr = 0;
ring->buf_size = buf_size;
ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
ring->desc_num = desc_num;
ring->max_desc_num_per_pkt = mdnum_ppkt;
ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
......@@ -430,7 +471,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
......@@ -484,18 +524,34 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
}
/**
*hns_rcb_get_coalesced_frames - get rcb port coalesced frames
*hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*
*Returns: coalesced_frames
*/
u32 hns_rcb_get_coalesced_frames(
u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx)
{
return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
}
/**
*hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*
*Returns: coalesced_frames
*/
u32 hns_rcb_get_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx)
{
u64 reg;
reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
return dsaf_read_dev(rcb_common, reg);
}
/**
*hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
*@rcb_common: rcb_common device
......@@ -538,33 +594,47 @@ int hns_rcb_set_coalesce_usecs(
return -EINVAL;
}
}
if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
dev_err(rcb_common->dsaf_dev->dev,
"error: coalesce_usecs setting supports 0~1023us\n");
"error: coalesce_usecs setting supports 1~1023us\n");
return -EINVAL;
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
return 0;
}
if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
if (timeout == 0)
/* set timeout to 0, Disable gap time */
dsaf_set_reg_field(rcb_common->io_base,
RCB_INT_GAP_TIME_REG + port_idx * 4,
PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
0);
else
/* set timeout non 0, restore gap time to 1 */
dsaf_set_reg_field(rcb_common->io_base,
RCB_INT_GAP_TIME_REG + port_idx * 4,
PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
1);
/**
*hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*@coalesced_frames:tx/rx BD num for coalesced frames
*
* Returns:
* Zero for success, or an error code in case of failure
*/
int hns_rcb_set_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
u32 old_waterline =
hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
u64 reg;
if (coalesced_frames == old_waterline)
return 0;
if (coalesced_frames != 1) {
dev_err(rcb_common->dsaf_dev->dev,
"error: not support tx coalesce_frames setting!\n");
return -EINVAL;
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
dsaf_write_dev(rcb_common, reg, coalesced_frames);
return 0;
}
/**
*hns_rcb_set_coalesced_frames - set rcb coalesced frames
*hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*@coalesced_frames:tx/rx BD num for coalesced frames
......@@ -572,10 +642,11 @@ int hns_rcb_set_coalesce_usecs(
* Returns:
* Zero for success, or an error code in case of failure
*/
int hns_rcb_set_coalesced_frames(
int hns_rcb_set_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
u32 old_waterline =
hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
if (coalesced_frames == old_waterline)
return 0;
......
......@@ -35,12 +35,23 @@ struct rcb_common_cb;
#define HNS_RCB_REG_OFFSET 0x10000
#define HNS_RCB_TX_FRAMES_LOW 1
#define HNS_RCB_RX_FRAMES_LOW 1
#define HNS_RCB_TX_FRAMES_HIGH 1023
#define HNS_RCB_RX_FRAMES_HIGH 1023
#define HNS_RCB_TX_USECS_LOW 1
#define HNS_RCB_RX_USECS_LOW 1
#define HNS_RCB_TX_USECS_HIGH 1023
#define HNS_RCB_RX_USECS_HIGH 1023
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
#define HNS_RCB_DEF_COALESCED_FRAMES 50
#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
#define HNS_RCB_DEF_COALESCED_USECS 50
#define HNS_RCB_DEF_COALESCED_USECS 30
#define HNS_RCB_DEF_GAP_TIME_USECS 20
#define HNS_RCB_TX_PKTLINE_OFFSET 8
#define HNS_RCB_COMMON_ENDIAN 1
......@@ -125,13 +136,17 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
u32 hns_rcb_get_coalesced_frames(
u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx);
int hns_rcb_set_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
int hns_rcb_set_coalesced_frames(
int hns_rcb_set_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
int hns_rcb_set_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
......@@ -146,4 +161,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
void hns_rcb_get_strings(int stringset, u8 *data, int index);
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
#endif /* _HNS_DSAF_RCB_H */
......@@ -421,7 +421,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
#define RCB_INT_GAP_TIME_REG 0x9400
#define RCB_PORT_INT_GAPTIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
......@@ -466,6 +466,7 @@
#define GMAC_DUPLEX_TYPE_REG 0x0008UL
#define GMAC_FD_FC_TYPE_REG 0x000CUL
#define GMAC_TX_WATER_LINE_REG 0x0010UL
#define GMAC_FC_TX_TIMER_REG 0x001CUL
#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
......@@ -912,6 +913,9 @@
#define GMAC_DUPLEX_TYPE_B 0
#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1)
#define GMAC_TX_WATER_LINE_SHIFT 0
#define GMAC_FC_TX_TIMER_S 0
#define GMAC_FC_TX_TIMER_M 0xffff
......
......@@ -299,18 +299,6 @@ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
}
/**
*hns_xgmac_get_id - get xgmac port id
*@mac_drv: mac driver
*@newval:xgmac max frame length
*/
static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
*mac_id = drv->mac_id;
}
/**
*hns_xgmac_config_max_frame_length - set xgmac max frame length
*@mac_drv: mac driver
......@@ -833,7 +821,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_half_duplex = NULL;
mac_drv->set_rx_ignore_pause_frames =
hns_xgmac_set_rx_ignore_pause_frames;
mac_drv->mac_get_id = hns_xgmac_get_id;
mac_drv->mac_free = hns_xgmac_free;
mac_drv->adjust_link = NULL;
mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
......
......@@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
int last_offset;
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
twobufs = ((PAGE_SIZE < 8192) &&
hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
......@@ -859,7 +860,7 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
return recv_pkts;
}
static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
......@@ -873,22 +874,23 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
napi_schedule(&ring_data->napi);
return false;
} else {
return true;
}
}
static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
int num;
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
if (num == 0)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring, 0);
if (!num)
return true;
else
napi_schedule(&ring_data->napi);
return false;
}
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
......@@ -921,12 +923,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
/* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER
#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
#else
#define NETIF_TX_LOCK(ndev)
#define NETIF_TX_UNLOCK(ndev)
#define NETIF_TX_LOCK(ring)
#define NETIF_TX_UNLOCK(ring)
#endif
/* reclaim all desc in one budget
* return error or number of desc left
*/
......@@ -940,13 +943,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head;
int bytes, pkts;
NETIF_TX_LOCK(ndev);
NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) {
NETIF_TX_UNLOCK(ndev);
NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */
}
......@@ -954,7 +957,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++;
NETIF_TX_UNLOCK(ndev);
NETIF_TX_UNLOCK(ring);
return -EIO;
}
......@@ -966,7 +969,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
NETIF_TX_UNLOCK(ndev);
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
......@@ -989,7 +992,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
return 0;
}
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head;
......@@ -1002,20 +1005,21 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
napi_schedule(&ring_data->napi);
return false;
} else {
return true;
}
}
static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head == ring->next_to_clean)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring, 0);
return true;
else
napi_schedule(&ring_data->napi);
return false;
}
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
......@@ -1026,7 +1030,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head;
int bytes, pkts;
NETIF_TX_LOCK(ndev);
NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0;
......@@ -1034,7 +1038,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
NETIF_TX_UNLOCK(ndev);
NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue);
......@@ -1042,15 +1046,23 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
static int hns_nic_common_poll(struct napi_struct *napi, int budget)
{
int clean_complete = 0;
struct hns_nic_ring_data *ring_data =
container_of(napi, struct hns_nic_ring_data, napi);
int clean_complete = ring_data->poll_one(
ring_data, budget, ring_data->ex_process);
struct hnae_ring *ring = ring_data->ring;
try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
if (clean_complete >= 0 && clean_complete < budget) {
if (clean_complete < budget) {
if (ring_data->fini_process(ring_data)) {
napi_complete(napi);
ring_data->fini_process(ring_data);
return 0;
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
goto try_again;
}
}
return clean_complete;
......@@ -1196,54 +1208,31 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx)
napi_disable(&priv->ring_data[idx].napi);
}
static void hns_set_irq_affinity(struct hns_nic_priv *priv)
static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
struct hnae_ring *ring, cpumask_t *mask)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
int i;
int cpu;
cpumask_var_t mask;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return;
/*diffrent irq banlance for 16core and 32core*/
if (h->q_num == num_possible_cpus()) {
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index)) {
cpumask_clear(mask);
cpu = rd->queue_index;
cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
mask);
}
}
/* Diffrent irq banlance between 16core and 32core.
* The cpu mask set by ring index according to the ring flag
* which indicate the ring is tx or rx.
*/
if (q_num == num_possible_cpus()) {
if (is_tx_ring(ring))
cpu = ring_idx;
else
cpu = ring_idx - q_num;
} else {
for (i = 0; i < h->q_num; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index * 2)) {
cpumask_clear(mask);
cpu = rd->queue_index * 2;
cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
mask);
}
if (is_tx_ring(ring))
cpu = ring_idx * 2;
else
cpu = (ring_idx - q_num) * 2 + 1;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index * 2 + 1)) {
cpumask_clear(mask);
cpu = rd->queue_index * 2 + 1;
cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
mask);
}
}
}
free_cpumask_var(mask);
return cpu;
}
static int hns_nic_init_irq(struct hns_nic_priv *priv)
......@@ -1252,6 +1241,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
struct hns_nic_ring_data *rd;
int i;
int ret;
int cpu;
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
......@@ -1261,7 +1251,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
(i < h->q_num ? "tx" : "rx"), rd->queue_index);
(is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
......@@ -1273,12 +1263,17 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
return ret;
}
disable_irq(rd->ring->irq);
cpu = hns_nic_init_affinity_mask(h->q_num, i,
rd->ring, &rd->mask);
if (cpu_online(cpu))
irq_set_affinity_hint(rd->ring->irq,
&rd->mask);
rd->ring->irq_init_flag = RCB_IRQ_INITED;
}
/*set cpu affinity*/
hns_set_irq_affinity(priv);
return 0;
}
......@@ -1487,33 +1482,260 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
return (netdev_tx_t)ret;
}
static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
#define HNS_LB_TX_RING 0
static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
int frame_len;
/* allocate test skb */
skb = alloc_skb(64, GFP_KERNEL);
if (!skb)
return NULL;
skb_put(skb, 64);
skb->dev = ndev;
memset(skb->data, 0xFF, skb->len);
/* must be tcp/ip package */
ethhdr = (struct ethhdr *)skb->data;
ethhdr->h_proto = htons(ETH_P_IP);
frame_len = skb->len & (~1ul);
memset(&skb->data[frame_len / 2], 0xAA,
frame_len / 2 - 1);
skb->queue_mapping = HNS_LB_TX_RING;
return skb;
}
static int hns_enable_serdes_lb(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
int speed, duplex;
int ret;
ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
if (ret)
return ret;
ret = ops->start ? ops->start(h) : 0;
if (ret)
return ret;
/* link adjust duplex*/
if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
speed = 1000;
else
speed = 10000;
duplex = 1;
ops->adjust_link(h, speed, duplex);
/* wait h/w ready */
mdelay(300);
return 0;
}
static void hns_disable_serdes_lb(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
ops->stop(h);
ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
}
/**
*hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
*function as follows:
* 1. if one rx ring has found the page_offset is not equal 0 between head
* and tail, it means that the chip fetched the wrong descs for the ring
* which buffer size is 4096.
* 2. we set the chip serdes loopback and set rss indirection to the ring.
* 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
* recieving all packages and it will fetch new descriptions.
* 4. recover to the original state.
*
*@ndev: net device
*/
static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
struct hnae_ae_ops *ops = h->dev->ops;
struct hns_nic_ring_data *rd;
struct hnae_ring *ring;
struct sk_buff *skb;
u32 *org_indir;
u32 *cur_indir;
int indir_size;
int head, tail;
int fetch_num;
int i, j;
bool found;
int retry_times;
int ret = 0;
/* alloc indir memory */
indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
org_indir = kzalloc(indir_size, GFP_KERNEL);
if (!org_indir)
return -ENOMEM;
/* store the orginal indirection */
ops->get_rss(h, org_indir, NULL, NULL);
cur_indir = kzalloc(indir_size, GFP_KERNEL);
if (!cur_indir) {
ret = -ENOMEM;
goto cur_indir_alloc_err;
}
/* set loopback */
if (hns_enable_serdes_lb(ndev)) {
ret = -EINVAL;
goto enable_serdes_lb_err;
}
/* foreach every rx ring to clear fetch desc */
for (i = 0; i < h->q_num; i++) {
ring = &h->qs[i]->rx_ring;
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
found = false;
fetch_num = ring_dist(ring, head, tail);
while (head != tail) {
if (ring->desc_cb[head].page_offset != 0) {
found = true;
break;
}
head++;
if (head == ring->desc_num)
head = 0;
}
if (found) {
for (j = 0; j < indir_size / sizeof(*org_indir); j++)
cur_indir[j] = i;
ops->set_rss(h, cur_indir, NULL, 0);
for (j = 0; j < fetch_num; j++) {
/* alloc one skb and init */
skb = hns_assemble_skb(ndev);
if (!skb)
goto out;
rd = &tx_ring_data(priv, skb->queue_mapping);
hns_nic_net_xmit_hw(ndev, skb, rd);
retry_times = 0;
while (retry_times++ < 10) {
mdelay(10);
/* clean rx */
rd = &rx_ring_data(priv, i);
if (rd->poll_one(rd, fetch_num,
hns_nic_drop_rx_fetch))
break;
}
retry_times = 0;
while (retry_times++ < 10) {
mdelay(10);
/* clean tx ring 0 send package */
rd = &tx_ring_data(priv,
HNS_LB_TX_RING);
if (rd->poll_one(rd, fetch_num, NULL))
break;
}
}
}
}
out:
/* restore everything */
ops->set_rss(h, org_indir, NULL, 0);
hns_disable_serdes_lb(ndev);
enable_serdes_lb_err:
kfree(cur_indir);
cur_indir_alloc_err:
kfree(org_indir);
return ret;
}
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
bool if_running = netif_running(ndev);
int ret;
/* MTU < 68 is an error and causes problems on some kernels */
if (new_mtu < 68)
return -EINVAL;
/* MTU no change */
if (new_mtu == ndev->mtu)
return 0;
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
if (netif_running(ndev)) {
if (if_running) {
(void)hns_nic_net_stop(ndev);
msleep(100);
}
if (priv->enet_ver != AE_VERSION_1 &&
ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
new_mtu > BD_SIZE_2048_MAX_MTU) {
/* update desc */
hnae_reinit_all_ring_desc(h);
/* clear the package which the chip has fetched */
ret = hns_nic_clear_all_rx_fetch(ndev);
/* the page offset must be consist with desc */
hnae_reinit_all_ring_page_off(h);
if (ret) {
netdev_err(ndev, "clear the fetched desc fail\n");
goto out;
}
}
ret = h->dev->ops->set_mtu(h, new_mtu);
if (ret)
if (ret) {
netdev_err(ndev, "set mtu fail, return value %d\n",
ret);
if (hns_nic_net_open(ndev))
netdev_err(ndev, "hns net open fail\n");
} else {
ret = h->dev->ops->set_mtu(h, new_mtu);
goto out;
}
if (!ret)
/* finally, set new mtu to netdevice */
ndev->mtu = new_mtu;
out:
if (if_running) {
if (hns_nic_net_open(ndev)) {
netdev_err(ndev, "hns net open fail\n");
ret = -EINVAL;
}
}
return ret;
}
......@@ -1791,7 +2013,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
/* make sure to commit the things */
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
}
......
......@@ -37,10 +37,11 @@ enum hns_nic_state {
struct hns_nic_ring_data {
struct hnae_ring *ring;
struct napi_struct napi;
cpumask_t mask; /* affinity mask */
int queue_index;
int (*poll_one)(struct hns_nic_ring_data *, int, void *);
void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
void (*fini_process)(struct hns_nic_ring_data *);
bool (*fini_process)(struct hns_nic_ring_data *);
};
/* compatible the difference between two versions */
......
......@@ -146,7 +146,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
/* When there is no phy, autoneg is off. */
cmd->base.autoneg = false;
cmd->base.cmd = speed;
cmd->base.speed = speed;
cmd->base.duplex = duplex;
if (net_dev->phydev)
......@@ -764,14 +764,14 @@ static int hns_get_coalesce(struct net_device *net_dev,
ec->use_adaptive_tx_coalesce = 1;
if ((!ops->get_coalesce_usecs) ||
(!ops->get_rx_max_coalesced_frames))
(!ops->get_max_coalesced_frames))
return -ESRCH;
ops->get_coalesce_usecs(priv->ae_handle,
&ec->tx_coalesce_usecs,
&ec->rx_coalesce_usecs);
ops->get_rx_max_coalesced_frames(
ops->get_max_coalesced_frames(
priv->ae_handle,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
......@@ -801,30 +801,28 @@ static int hns_set_coalesce(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
int ret;
int rc1, rc2;
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
return -EINVAL;
if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
return -EINVAL;
if ((!ops->set_coalesce_usecs) ||
(!ops->set_coalesce_frames))
return -ESRCH;
ret = ops->set_coalesce_usecs(priv->ae_handle,
rc1 = ops->set_coalesce_usecs(priv->ae_handle,
ec->rx_coalesce_usecs);
if (ret)
return ret;
ret = ops->set_coalesce_frames(
priv->ae_handle,
rc2 = ops->set_coalesce_frames(priv->ae_handle,
ec->tx_max_coalesced_frames,
ec->rx_max_coalesced_frames);
return ret;
if (rc1 || rc2)
return -EINVAL;
return 0;
}
/**
......@@ -1253,12 +1251,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
/* currently hfunc can only be Toeplitz hash */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
if (!indir)
return 0;
}
return ops->set_rss(priv->ae_handle, indir, key, hfunc);
}
......
......@@ -23,17 +23,9 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock_types.h>
#define MDIO_DRV_NAME "Hi-HNS_MDIO"
#define MDIO_BUS_NAME "Hisilicon MII Bus"
#define MDIO_DRV_VERSION "1.3.0"
#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
#define MDIO_DRV_STRING MDIO_BUS_NAME
#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_TIMEOUT 1000000
......@@ -64,9 +56,7 @@ struct hns_mdio_device {
#define MDIO_CMD_DEVAD_S 0
#define MDIO_CMD_PRTAD_M 0x1f
#define MDIO_CMD_PRTAD_S 5
#define MDIO_CMD_OP_M 0x3
#define MDIO_CMD_OP_S 10
#define MDIO_CMD_ST_M 0x3
#define MDIO_CMD_ST_S 12
#define MDIO_CMD_START_B 14
......@@ -185,18 +175,20 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
static int hns_mdio_wait_ready(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = bus->priv;
u32 cmd_reg_value;
int i;
u32 cmd_reg_value = 1;
/* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; cmd_reg_value; i++) {
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
MDIO_COMMAND_REG,
MDIO_CMD_START_B);
if (i == MDIO_TIMEOUT)
return -ETIMEDOUT;
if (!cmd_reg_value)
break;
}
if ((i == MDIO_TIMEOUT) && cmd_reg_value)
return -ETIMEDOUT;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment