Commit d0504f4d authored by David S. Miller's avatar David S. Miller

Merge branch 'bna-next'

Ivan Vecera says:

====================
bna: clean-up

The patches clean the bna driver.

v2: changes & comments requested by Joe
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d205ce5c ecc46789
......@@ -282,7 +282,6 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
bfa_q_qe_init(&cee->ioc_notify);
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
......@@ -28,19 +28,6 @@
typedef void (*bfa_sm_t)(void *sm, int event);
/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
......@@ -67,7 +54,6 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
......
......@@ -24,7 +24,6 @@
#include "bfa_defs_status.h"
#include "bfa_defs_mfg_comm.h"
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/* ---------------------- adapter definitions ------------ */
......@@ -55,7 +54,7 @@ struct bfa_adapter_attr {
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd vpd;
struct mac mac;
u8 mac[ETH_ALEN];
u8 nports;
u8 max_speed;
......@@ -187,8 +186,6 @@ enum {
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
#pragma pack(1)
/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
......@@ -211,7 +208,7 @@ struct bfa_mfg_block {
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
mac_t mfg_mac; /* base mac address */
u8 mfg_mac[ETH_ALEN]; /* base mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 card_type; /* card type */
......@@ -227,9 +224,7 @@ struct bfa_mfg_block {
char initial_mode[8]; /* initial mode: hba/cna/nic */
u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
} __packed;
/* ---------------------- pci definitions ------------ */
......
......@@ -109,8 +109,6 @@ union bfa_port_stats_u {
struct bfa_port_eth_stats eth;
};
#pragma pack(1)
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
#define BFA_CEE_DCBX_MAX_PRIORITY (8)
#define BFA_CEE_DCBX_MAX_PGID (8)
......@@ -133,7 +131,7 @@ struct bfa_cee_lldp_str {
u8 len;
u8 rsvd[2];
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
};
} __packed;
/* LLDP parameters */
struct bfa_cee_lldp_cfg {
......@@ -145,7 +143,7 @@ struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str mgmt_addr;
u16 time_to_live;
u16 enabled_system_cap;
};
} __packed;
enum bfa_cee_dcbx_version {
DCBX_PROTOCOL_PRECEE = 1,
......@@ -171,7 +169,7 @@ struct bfa_cee_dcbx_cfg {
u8 lls_fcoe; /* FCoE Logical Link Status */
u8 lls_lan; /* LAN Logical Link Status */
u8 rsvd[2];
};
} __packed;
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
......@@ -188,11 +186,11 @@ struct bfa_cee_attr {
u8 error_reason;
struct bfa_cee_lldp_cfg lldp_remote;
struct bfa_cee_dcbx_cfg dcbx_remote;
mac_t src_mac;
u8 src_mac[ETH_ALEN];
u8 link_speed;
u8 nw_priority;
u8 filler[2];
};
} __packed;
/* LLDP/DCBX/CEE Statistics */
struct bfa_cee_stats {
......@@ -214,8 +212,6 @@ struct bfa_cee_stats {
u32 cee_status_up; /*!< CEE status up */
u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
};
#pragma pack()
} __packed;
#endif /* __BFA_DEFS_CNA_H__ */
......@@ -59,8 +59,6 @@ enum {
BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
};
#pragma pack(1)
/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
......@@ -77,7 +75,7 @@ enum {
CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
CB_GPIO_PROTO = BIT(7) /*!< 8G 2port FC prototypes */
};
#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
......@@ -148,8 +146,6 @@ struct bfa_mfg_vpd {
u8 len; /*!< vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
};
#pragma pack()
} __packed;
#endif /* __BFA_DEFS_MFG_H__ */
......@@ -23,14 +23,6 @@
/* IOC local definitions */
#define bfa_ioc_state_disabled(__sm) \
(((__sm) == BFI_IOC_UNINIT) || \
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
......@@ -57,12 +49,6 @@
((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
readl((__ioc)->ioc_regs.hfn_mbox_cmd))
static bool bfa_nw_auto_recover = true;
......@@ -1105,12 +1091,9 @@ static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
struct bfa_ioc_notify *notify;
struct list_head *qe;
list_for_each(qe, &ioc->notify_q) {
notify = (struct bfa_ioc_notify *)qe;
list_for_each_entry(notify, &ioc->notify_q, qe)
notify->cbfn(notify->cbarg, event);
}
}
static void
......@@ -1387,7 +1370,7 @@ static enum bfi_ioc_img_ver_cmp
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
struct bfi_ioc_image_hdr *fwhdr_to_cmp)
{
if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
return BFI_IOC_IMG_VER_INCOMP;
if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
......@@ -1398,7 +1381,7 @@ bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
/* GA takes priority over internal builds of the same patch stream.
* At this point major minor maint and patch numbers are same.
*/
if (fwhdr_is_ga(base_fwhdr) == true)
if (fwhdr_is_ga(base_fwhdr))
if (fwhdr_is_ga(fwhdr_to_cmp))
return BFI_IOC_IMG_VER_SAME;
else
......@@ -1912,10 +1895,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
}
void
bfa_nw_ioc_timeout(void *ioc_arg)
bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
{
struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
......@@ -1980,10 +1961,9 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
}
void
bfa_nw_ioc_hb_check(void *cbarg)
bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
{
struct bfa_ioc *ioc = cbarg;
u32 hb_count;
u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
......@@ -2177,7 +2157,8 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
/**
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
list_del(&cmd->qe);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
/**
......@@ -2198,8 +2179,10 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
while (!list_empty(&mod->cmd_q))
bfa_q_deq(&mod->cmd_q, &cmd);
while (!list_empty(&mod->cmd_q)) {
cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
list_del(&cmd->qe);
}
}
/**
......@@ -2223,7 +2206,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
return 1;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
......@@ -2278,7 +2261,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
int tlen;
if (ioc->dbg_fwsave_once) {
ioc->dbg_fwsave_once = 0;
ioc->dbg_fwsave_once = false;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
......@@ -2796,7 +2779,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
ad_attr->prototype = 0;
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
......@@ -2942,10 +2925,10 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
return ioc->attr->pwwn;
}
mac_t
bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
void
bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
{
return ioc->attr->mac;
ether_addr_copy(mac, ioc->attr->mac);
}
/* Firmware failure detected. Start recovery actions. */
......@@ -2997,9 +2980,8 @@ bfa_iocpf_stop(struct bfa_ioc *ioc)
}
void
bfa_nw_iocpf_timeout(void *ioc_arg)
bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
enum bfa_iocpf_state iocpf_st;
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
......@@ -3011,10 +2993,8 @@ bfa_nw_iocpf_timeout(void *ioc_arg)
}
void
bfa_nw_iocpf_sem_timeout(void *ioc_arg)
bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
{
struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
bfa_ioc_hw_sem_get(ioc);
}
......@@ -3245,7 +3225,6 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
flash->op_busy = 0;
bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
......
......@@ -232,12 +232,6 @@ struct bfa_ioc_hwif {
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_is_default(__ioc) \
(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
#define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \
memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
......@@ -268,13 +262,6 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
} while (0)
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
......@@ -309,7 +296,7 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
......@@ -317,10 +304,10 @@ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
*/
void bfa_nw_ioc_timeout(void *ioc);
void bfa_nw_ioc_hb_check(void *ioc);
void bfa_nw_iocpf_timeout(void *ioc);
void bfa_nw_iocpf_sem_timeout(void *ioc);
void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
/*
* F/W Image Size & Chunk
......
......@@ -24,7 +24,7 @@
#include "bfa_defs.h"
#define bfa_ioc_ct_sync_pos(__ioc) \
((u32) (1 << bfa_ioc_pcifn(__ioc)))
((u32)BIT(bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
......
......@@ -66,8 +66,9 @@ cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
cmdq->offset = 0;
cmdq->bytes_to_copy = 0;
while (!list_empty(&cmdq->pending_q)) {
bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
bfa_q_qe_init(&cmdq_ent->qe);
cmdq_ent = list_first_entry(&cmdq->pending_q,
struct bfa_msgq_cmd_entry, qe);
list_del(&cmdq_ent->qe);
call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
}
}
......@@ -242,8 +243,8 @@ bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
/* Walk through pending list to see if the command can be posted */
while (!list_empty(&cmdq->pending_q)) {
cmd =
(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
cmd = list_first_entry(&cmdq->pending_q,
struct bfa_msgq_cmd_entry, qe);
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(cmdq)) {
list_del(&cmd->qe);
......@@ -615,7 +616,6 @@ bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
bfa_msgq_rspq_attach(&msgq->rspq, msgq);
bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
bfa_q_qe_init(&msgq->ioc_notify);
bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
}
......
......@@ -21,8 +21,6 @@
#include "bfa_defs.h"
#pragma pack(1)
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
......@@ -36,10 +34,10 @@ struct bfi_mhdr {
struct {
u8 qid;
u8 fn_lpu; /*!< msg destination */
} h2i;
} __packed h2i;
u16 i2htok; /*!< token in msgs to host */
} mtag;
};
} __packed mtag;
} __packed;
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
......@@ -75,14 +73,14 @@ union bfi_addr_u {
struct {
u32 addr_lo;
u32 addr_hi;
} a32;
};
} __packed a32;
} __packed;
/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
};
} __packed;
/*
* Large Message structure - 128 Bytes size Msgs
......@@ -96,7 +94,7 @@ struct bfi_alen {
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
} __packed;
/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
......@@ -184,19 +182,19 @@ enum bfi_ioc_i2h_msgs {
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
};
} __packed;
struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */
mac_t mfg_mac; /*!< Mfg mac */
u8 mfg_mac[ETH_ALEN]; /*!< Mfg mac */
u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn;
u64 nwwn;
mac_t mac; /*!< PBC or Mfg mac */
u8 mac[ETH_ALEN]; /*!< PBC or Mfg mac */
u16 rsvd_b;
mac_t fcoe_mac;
u8 fcoe_mac[ETH_ALEN];
u16 rsvd_c;
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
u8 pcie_gen;
......@@ -211,14 +209,14 @@ struct bfi_ioc_attr {
char optrom_version[BFA_VERSION_LEN];
struct bfa_mfg_vpd vpd;
u32 card_type; /*!< card type */
};
} __packed;
/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
} __packed;
/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
......@@ -256,7 +254,7 @@ struct bfi_ioc_fwver {
u8 build;
u8 rsvd[2];
#endif
};
} __packed;
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
......@@ -269,7 +267,7 @@ struct bfi_ioc_image_hdr {
u32 rsvd_b[2];
struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
} __packed;
enum bfi_ioc_img_ver_cmp {
BFI_IOC_IMG_VER_INCOMP,
......@@ -301,7 +299,7 @@ enum bfi_port_mode {
struct bfi_ioc_hbeat {
struct bfi_mhdr mh; /*!< common msg header */
u32 hb_count; /*!< current heart beat count */
};
} __packed;
/* IOC hardware/firmware state */
enum bfi_ioc_state {
......@@ -317,8 +315,6 @@ enum bfi_ioc_state {
BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
};
#define BFI_IOC_ENDIAN_SIG 0x12345678
enum {
BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
......@@ -337,12 +333,6 @@ enum {
BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_SETP(__prop, __val) \
((__val) << BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_IS_PROTO(__adap_type) \
((__adap_type) & BFI_ADAPTER_PROTO)
#define BFI_ADAPTER_IS_TTV(__adap_type) \
((__adap_type) & BFI_ADAPTER_TTV)
#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
((__adap_type) & BFI_ADAPTER_UNSUPP)
#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
......@@ -353,7 +343,7 @@ struct bfi_ioc_ctrl_req {
u16 clscode;
u16 rsvd;
u32 tv_sec;
};
} __packed;
/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
......@@ -362,7 +352,7 @@ struct bfi_ioc_ctrl_reply {
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
};
} __packed;
#define BFI_IOC_MSGSZ 8
/* H2I Messages */
......@@ -372,14 +362,14 @@ union bfi_ioc_h2i_msg_u {
struct bfi_ioc_ctrl_req disable_req;
struct bfi_ioc_getattr_req getattr_req;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
} __packed;
/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
} __packed;
/*----------------------------------------------------------------------
* MSGQ
......@@ -408,7 +398,7 @@ struct bfi_msgq_mhdr {
u16 num_entries;
u8 enet_id;
u8 rsvd[1];
};
} __packed;
#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
(_mh).msg_class = (_mc); \
......@@ -430,21 +420,21 @@ struct bfi_msgq {
union bfi_addr_u addr;
u16 q_depth; /* Total num of entries in the queue */
u8 rsvd[2];
};
} __packed;
/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
struct bfi_msgq_cfg_req {
struct bfi_mhdr mh;
struct bfi_msgq cmdq;
struct bfi_msgq rspq;
};
} __packed;
/* BFI_ENET_MSGQ_CFG_RSP */
struct bfi_msgq_cfg_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
} __packed;
/* BFI_MSGQ_H2I_DOORBELL */
struct bfi_msgq_h2i_db {
......@@ -452,8 +442,8 @@ struct bfi_msgq_h2i_db {
union {
u16 cmdq_pi;
u16 rspq_ci;
} idx;
};
} __packed idx;
} __packed;
/* BFI_MSGQ_I2H_DOORBELL */
struct bfi_msgq_i2h_db {
......@@ -461,8 +451,8 @@ struct bfi_msgq_i2h_db {
union {
u16 rspq_pi;
u16 cmdq_ci;
} idx;
};
} __packed idx;
} __packed;
#define BFI_CMD_COPY_SZ 28
......@@ -470,14 +460,14 @@ struct bfi_msgq_i2h_db {
struct bfi_msgq_h2i_cmdq_copy_rsp {
struct bfi_mhdr mh;
u8 data[BFI_CMD_COPY_SZ];
};
} __packed;
/* BFI_MSGQ_I2H_CMD_COPY_REQ */
struct bfi_msgq_i2h_cmdq_copy_req {
struct bfi_mhdr mh;
u16 offset;
u16 len;
};
} __packed;
/*
* FLASH module specific
......@@ -505,7 +495,7 @@ enum bfi_flash_i2h_msgs {
struct bfi_flash_query_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
};
} __packed;
/*
* Flash write request
......@@ -519,7 +509,7 @@ struct bfi_flash_write_req {
u8 rsv[2];
u32 offset;
u32 length;
};
} __packed;
/*
* Flash read request
......@@ -532,7 +522,7 @@ struct bfi_flash_read_req {
u32 offset;
u32 length;
struct bfi_alen alen;
};
} __packed;
/*
* Flash query response
......@@ -540,7 +530,7 @@ struct bfi_flash_read_req {
struct bfi_flash_query_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 status;
};
} __packed;
/*
* Flash read response
......@@ -552,7 +542,7 @@ struct bfi_flash_read_rsp {
u8 rsv[3];
u32 status;
u32 length;
};
} __packed;
/*
* Flash write response
......@@ -564,8 +554,6 @@ struct bfi_flash_write_rsp {
u8 rsv[3];
u32 status;
u32 length;
};
#pragma pack()
} __packed;
#endif /* __BFI_H__ */
......@@ -22,8 +22,6 @@
#include "bfi.h"
#include "bfa_defs_cna.h"
#pragma pack(1)
enum bfi_port_h2i {
BFI_PORT_H2I_ENABLE_REQ = (1),
BFI_PORT_H2I_DISABLE_REQ = (2),
......@@ -43,7 +41,7 @@ struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
} __packed;
/* Generic RSP type */
struct bfi_port_generic_rsp {
......@@ -51,13 +49,13 @@ struct bfi_port_generic_rsp {
u8 status; /*!< port enable status */
u8 rsvd[3];
u32 msgtag; /*!< msgtag for reply */
};
} __packed;
/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
} __packed;
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
......@@ -65,7 +63,7 @@ union bfi_port_h2i_msg_u {
struct bfi_port_generic_req disable_req;
struct bfi_port_get_stats_req getstats_req;
struct bfi_port_generic_req clearstats_req;
};
} __packed;
union bfi_port_i2h_msg_u {
struct bfi_mhdr mh;
......@@ -73,7 +71,7 @@ union bfi_port_i2h_msg_u {
struct bfi_port_generic_rsp disable_rsp;
struct bfi_port_generic_rsp getstats_rsp;
struct bfi_port_generic_rsp clearstats_rsp;
};
} __packed;
/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
enum bfi_cee_h2i_msgs {
......@@ -97,7 +95,7 @@ enum bfi_cee_i2h_msgs {
*/
struct bfi_lldp_reset_stats {
struct bfi_mhdr mh;
};
} __packed;
/*
* @brief H2I command structure for resetting the stats.
......@@ -105,7 +103,7 @@ struct bfi_lldp_reset_stats {
*/
struct bfi_cee_reset_stats {
struct bfi_mhdr mh;
};
} __packed;
/*
* @brief get configuration command from host
......@@ -114,7 +112,7 @@ struct bfi_cee_reset_stats {
struct bfi_cee_get_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
};
} __packed;
/*
* @brief reply message from firmware
......@@ -124,7 +122,7 @@ struct bfi_cee_get_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
} __packed;
/*
* @brief get configuration command from host
......@@ -133,7 +131,7 @@ struct bfi_cee_get_rsp {
struct bfi_cee_stats_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
};
} __packed;
/*
* @brief reply message from firmware
......@@ -143,22 +141,20 @@ struct bfi_cee_stats_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
} __packed;
/* @brief mailbox command structures from host to firmware */
union bfi_cee_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_req get_req;
struct bfi_cee_stats_req stats_req;
};
} __packed;
/* @brief mailbox message structures from firmware to host */
union bfi_cee_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_rsp get_rsp;
struct bfi_cee_stats_rsp stats_rsp;
};
#pragma pack()
} __packed;
#endif /* __BFI_CNA_H__ */
......@@ -36,8 +36,6 @@
#include "bfa_defs.h"
#include "bfi.h"
#pragma pack(1)
#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
#define BFI_ENET_TXQ_PRIO_MAX 8
......@@ -59,8 +57,8 @@ union bfi_addr_be_u {
struct {
u32 addr_hi; /* Most Significant 32-bits */
u32 addr_lo; /* Least Significant 32-Bits */
} a32;
};
} __packed a32;
} __packed;
/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
......@@ -70,13 +68,13 @@ union bfi_addr_be_u {
#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
#define BFI_ENET_TXQ_WI_CF_FCOE_CRC BIT(8)
#define BFI_ENET_TXQ_WI_CF_IPID_MODE BIT(5)
#define BFI_ENET_TXQ_WI_CF_INS_PRIO BIT(4)
#define BFI_ENET_TXQ_WI_CF_INS_VLAN BIT(3)
#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM BIT(2)
#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM BIT(1)
#define BFI_ENET_TXQ_WI_CF_IP_CKSUM BIT(0)
struct bfi_enet_txq_wi_base {
u8 reserved;
......@@ -88,28 +86,28 @@ struct bfi_enet_txq_wi_base {
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
};
} __packed;
struct bfi_enet_txq_wi_ext {
u16 reserved;
u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
u32 reserved2[3];
};
} __packed;
struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
u16 reserved;
u16 length; /* Only 14 LSB are valid */
union bfi_addr_be_u addr;
};
} __packed;
/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
struct bfi_enet_txq_wi_ext ext;
} wi;
} __packed wi;
struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
};
} __packed;
#define wi_hdr wi.base
#define wi_ext_hdr wi.ext
......@@ -120,36 +118,36 @@ struct bfi_enet_txq_entry {
/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
} __packed;
/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
#define BFI_ENET_CQ_EF_MAC_ERROR BIT(0)
#define BFI_ENET_CQ_EF_FCS_ERROR BIT(1)
#define BFI_ENET_CQ_EF_TOO_LONG BIT(2)
#define BFI_ENET_CQ_EF_FC_CRC_OK BIT(3)
#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
#define BFI_ENET_CQ_EF_RSVD1 BIT(4)
#define BFI_ENET_CQ_EF_L4_CKSUM_OK BIT(5)
#define BFI_ENET_CQ_EF_L3_CKSUM_OK BIT(6)
#define BFI_ENET_CQ_EF_HDS_HEADER BIT(7)
#define BFI_ENET_CQ_EF_UDP (1 << 8)
#define BFI_ENET_CQ_EF_TCP (1 << 9)
#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
#define BFI_ENET_CQ_EF_UDP BIT(8)
#define BFI_ENET_CQ_EF_TCP BIT(9)
#define BFI_ENET_CQ_EF_IP_OPTIONS BIT(10)
#define BFI_ENET_CQ_EF_IPV6 BIT(11)
#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
#define BFI_ENET_CQ_EF_VLAN (1 << 13)
#define BFI_ENET_CQ_EF_RSS (1 << 14)
#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
#define BFI_ENET_CQ_EF_IPV4 BIT(12)
#define BFI_ENET_CQ_EF_VLAN BIT(13)
#define BFI_ENET_CQ_EF_RSS BIT(14)
#define BFI_ENET_CQ_EF_RSVD2 BIT(15)
#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
#define BFI_ENET_CQ_EF_MCAST (1 << 17)
#define BFI_ENET_CQ_EF_BCAST (1 << 18)
#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
#define BFI_ENET_CQ_EF_MCAST_MATCH BIT(16)
#define BFI_ENET_CQ_EF_MCAST BIT(17)
#define BFI_ENET_CQ_EF_BCAST BIT(18)
#define BFI_ENET_CQ_EF_REMOTE BIT(19)
#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
#define BFI_ENET_CQ_EF_LOCAL BIT(20)
/* CQ Entry Structure */
struct bfi_enet_cq_entry {
......@@ -161,7 +159,7 @@ struct bfi_enet_cq_entry {
u8 reserved1;
u8 reserved2;
u8 rxq_id;
};
} __packed;
/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
......@@ -169,23 +167,23 @@ struct bfi_enet_q {
union bfi_addr_u first_entry;
u16 pages; /* # of pages */
u16 page_sz;
};
} __packed;
struct bfi_enet_txq {
struct bfi_enet_q q;
u8 priority;
u8 rsvd[3];
};
} __packed;
struct bfi_enet_rxq {
struct bfi_enet_q q;
u16 rx_buffer_size;
u16 rsvd;
};
} __packed;
struct bfi_enet_cq {
struct bfi_enet_q q;
};
} __packed;
struct bfi_enet_ib_cfg {
u8 int_pkt_dma;
......@@ -198,16 +196,16 @@ struct bfi_enet_ib_cfg {
u32 inter_pkt_timeout;
u8 inter_pkt_count;
u8 rsvd1[3];
};
} __packed;
struct bfi_enet_ib {
union bfi_addr_u index_addr;
union {
u16 msix_index;
u16 intx_bitmask;
} intr;
} __packed intr;
u16 rsvd;
};
} __packed;
/* ENET command messages */
enum bfi_enet_h2i_msgs {
......@@ -355,7 +353,7 @@ enum bfi_enet_err {
*/
struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
} __packed;
/* Enable/Disable Request
*
......@@ -370,7 +368,7 @@ struct bfi_enet_enable_req {
struct bfi_msgq_mhdr mh;
u8 enable; /* 1 = enable; 0 = disable */
u8 rsvd[3];
};
} __packed;
/* Generic Response */
struct bfi_enet_rsp {
......@@ -378,7 +376,7 @@ struct bfi_enet_rsp {
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
};
} __packed;
/* GLOBAL CONFIGURATION */
......@@ -387,7 +385,7 @@ struct bfi_enet_rsp {
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
} __packed;
/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
......@@ -400,7 +398,7 @@ struct bfi_enet_attr_rsp {
u32 max_cfg;
u32 max_ucmac;
u32 rit_size;
};
} __packed;
/* Tx Configuration
*
......@@ -421,7 +419,7 @@ struct bfi_enet_tx_cfg {
u8 apply_vlan_filter;
u8 add_to_vswitch;
u8 rsvd1[1];
};
} __packed;
struct bfi_enet_tx_cfg_req {
struct bfi_msgq_mhdr mh;
......@@ -431,7 +429,7 @@ struct bfi_enet_tx_cfg_req {
struct {
struct bfi_enet_txq q;
struct bfi_enet_ib ib;
} q_cfg[BFI_ENET_TXQ_PRIO_MAX];
} __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX];
struct bfi_enet_ib_cfg ib_cfg;
......@@ -448,7 +446,7 @@ struct bfi_enet_tx_cfg_rsp {
u32 i_dbell; /* PCI base address offset */
u8 hw_qid; /* For debugging */
u8 rsvd[3];
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
} __packed q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
/* Rx Configuration
......@@ -481,13 +479,13 @@ struct bfi_enet_rx_cfg {
u8 force_offset;
u8 type;
u8 rsvd1;
} hds;
} __packed hds;
u8 multi_buffer;
u8 strip_vlan;
u8 drop_untagged;
u8 rsvd2;
};
} __packed;
/*
* Multicast frames are received on the ql of q-set index zero.
......@@ -504,12 +502,12 @@ struct bfi_enet_rx_cfg_req {
struct bfi_enet_rxq qs; /* small/header buffers */
struct bfi_enet_cq cq;
struct bfi_enet_ib ib;
} q_cfg[BFI_ENET_RX_QSET_MAX];
} __packed q_cfg[BFI_ENET_RX_QSET_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_rx_cfg rx_cfg;
};
} __packed;
struct bfi_enet_rx_cfg_rsp {
struct bfi_msgq_mhdr mh;
......@@ -524,8 +522,8 @@ struct bfi_enet_rx_cfg_rsp {
u8 hw_sqid; /* For debugging */
u8 hw_cqid; /* For debugging */
u8 rsvd;
} q_handles[BFI_ENET_RX_QSET_MAX];
};
} __packed q_handles[BFI_ENET_RX_QSET_MAX];
} __packed;
/* RIT
*
......@@ -537,7 +535,7 @@ struct bfi_enet_rit_req {
u16 size; /* number of table-entries used */
u8 rsvd[2];
u8 table[BFI_ENET_RSS_RIT_MAX];
};
} __packed;
/* RSS
*
......@@ -556,12 +554,12 @@ struct bfi_enet_rss_cfg {
u8 mask;
u8 rsvd[2];
u32 key[BFI_ENET_RSS_KEY_LEN];
};
} __packed;
struct bfi_enet_rss_cfg_req {
struct bfi_msgq_mhdr mh;
struct bfi_enet_rss_cfg cfg;
};
} __packed;
/* MAC Unicast
*
......@@ -573,16 +571,16 @@ struct bfi_enet_rss_cfg_req {
*/
struct bfi_enet_ucast_req {
struct bfi_msgq_mhdr mh;
mac_t mac_addr;
u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
};
} __packed;
/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
u8 mac_addr[ETH_ALEN];
} __packed;
/* MAC Multicast
*
......@@ -591,9 +589,9 @@ struct bfi_enet_mac_n_vlan_req {
*/
struct bfi_enet_mcast_add_req {
struct bfi_msgq_mhdr mh;
mac_t mac_addr;
u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
};
} __packed;
/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
......@@ -605,7 +603,7 @@ struct bfi_enet_mcast_add_rsp {
u16 cmd_offset;
u16 handle;
u8 rsvd1[2];
};
} __packed;
/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
......@@ -614,7 +612,7 @@ struct bfi_enet_mcast_del_req {
struct bfi_msgq_mhdr mh;
u16 handle;
u8 rsvd[2];
};
} __packed;
/* VLAN
*
......@@ -626,7 +624,7 @@ struct bfi_enet_rx_vlan_req {
u8 block_idx;
u8 rsvd[3];
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
} __packed;
/* PAUSE
*
......@@ -638,7 +636,7 @@ struct bfi_enet_set_pause_req {
u8 rsvd[2];
u8 tx_pause; /* 1 = enable; 0 = disable */
u8 rx_pause; /* 1 = enable; 0 = disable */
};
} __packed;
/* DIAGNOSTICS
*
......@@ -650,7 +648,7 @@ struct bfi_enet_diag_lb_req {
u8 rsvd[2];
u8 mode; /* cable or Serdes */
u8 enable; /* 1 = enable; 0 = disable */
};
} __packed;
/* enum for Loopback opmodes */
enum {
......@@ -671,14 +669,14 @@ struct bfi_enet_stats_req {
u32 rx_enet_mask;
u32 tx_enet_mask;
union bfi_addr_u host_buffer;
};
} __packed;
/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_MAC BIT(0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC BIT(1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD BIT(2) /* !< Rx Admission Statistics */
#define BFI_ENET_STATS_RX_FC BIT(3) /* !< Rx FC Stats from RxA */
#define BFI_ENET_STATS_TX_FC BIT(4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_ALL 0x1f
......@@ -699,7 +697,7 @@ struct bfi_enet_stats_txf {
u64 errors;
u64 filter_vlan; /* frames filtered due to VLAN */
u64 filter_mac_sa; /* frames filtered due to SA check */
};
} __packed;
/* RxF Frame Statistics */
struct bfi_enet_stats_rxf {
......@@ -715,7 +713,7 @@ struct bfi_enet_stats_rxf {
u64 bcast;
u64 bcast_vlan;
u64 frame_drops;
};
} __packed;
/* FC Tx Frame Statistics */
struct bfi_enet_stats_fc_tx {
......@@ -734,7 +732,7 @@ struct bfi_enet_stats_fc_tx {
u64 txf_parity_errors;
u64 txf_timeout;
u64 txf_fid_parity_errors;
};
} __packed;
/* FC Rx Frame Statistics */
struct bfi_enet_stats_fc_rx {
......@@ -749,7 +747,7 @@ struct bfi_enet_stats_fc_rx {
u64 rxf_bcast_octets;
u64 rxf_bcast;
u64 rxf_bcast_vlan;
};
} __packed;
/* RAD Frame Statistics */
struct bfi_enet_stats_rad {
......@@ -770,7 +768,7 @@ struct bfi_enet_stats_rad {
u64 rx_bcast_vlan;
u64 rx_drops;
};
} __packed;
/* BPC Tx Registers */
struct bfi_enet_stats_bpc {
......@@ -785,7 +783,7 @@ struct bfi_enet_stats_bpc {
u64 rx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 rx_first_pause[8];
};
} __packed;
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
......@@ -838,7 +836,7 @@ struct bfi_enet_stats_mac {
u64 tx_oversize;
u64 tx_undersize;
u64 tx_fragments;
};
} __packed;
/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
......@@ -852,8 +850,6 @@ struct bfi_enet_stats {
struct bfi_enet_stats_fc_tx fc_tx_stats;
struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
};
#pragma pack()
} __packed;
#endif /* __BFI_ENET_H__ */
......@@ -28,36 +28,8 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
/* Log string size */
#define BNA_MESSAGE_SIZE 256
#define bna_is_small_rxq(_id) ((_id) & 0x1)
#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
(!memcmp((_mac1), (_mac2), sizeof(mac_t)))
#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
#define BNA_TO_POWER_OF_2(x) \
do { \
int _shift = 0; \
while ((x) && (x) != 1) { \
(x) >>= 1; \
_shift++; \
} \
(x) <<= _shift; \
} while (0)
#define BNA_TO_POWER_OF_2_HIGH(x) \
do { \
int n = 1; \
while (n < (x)) \
n <<= 1; \
(x) = n; \
} while (0)
/*
* input : _addr-> os dma addr in host endian format,
* output : _bna_dma_addr-> pointer to hw dma addr
......@@ -80,62 +52,8 @@ do { \
| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
} while (0)
#define containing_rec(addr, type, field) \
((type *)((unsigned char *)(addr) - \
(unsigned char *)(&((type *)0)->field)))
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
/* TxQ element is 64 bytes */
#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}
/* RxQ element is 8 bytes */
#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> \
BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
}
/* CQ element is 16 bytes */
#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
\
page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> \
BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
}
#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
(&((_cast *)(_q_base))[(_qe_idx)])
#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
......@@ -147,31 +65,10 @@ do { \
#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
((_q_depth) - 1))
#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
(_q_depth - 1))
#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
#define BNA_Q_PI_ADD(_q_ptr, _num) \
(_q_ptr)->q.producer_index = \
(((_q_ptr)->q.producer_index + (_num)) & \
((_q_ptr)->q.q_depth - 1))
#define BNA_Q_CI_ADD(_q_ptr, _num) \
(_q_ptr)->q.consumer_index = \
(((_q_ptr)->q.consumer_index + (_num)) \
& ((_q_ptr)->q.q_depth - 1))
#define BNA_Q_FREE_COUNT(_q_ptr) \
(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
#define BNA_Q_IN_USE_COUNT(_q_ptr) \
(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
#define BNA_LARGE_PKT_SIZE 1000
#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
......@@ -222,21 +119,6 @@ do { \
} \
} while (0)
#define call_rxf_pause_cbfn(rxf) \
do { \
if ((rxf)->oper_state_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_rx *); \
struct bnad *cbarg; \
cbfn = (rxf)->oper_state_cbfn; \
cbarg = (rxf)->oper_state_cbarg; \
(rxf)->oper_state_cbfn = NULL; \
(rxf)->oper_state_cbarg = NULL; \
cbfn(cbarg, rxf->rx); \
} \
} while (0)
#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
......@@ -326,28 +208,24 @@ do { \
#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
#define bna_tx_from_rid(_bna, _rid, _tx) \
do { \
struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
struct bna_tx *__tx; \
struct list_head *qe; \
_tx = NULL; \
list_for_each(qe, &__tx_mod->tx_active_q) { \
__tx = (struct bna_tx *)qe; \
if (__tx->rid == (_rid)) { \
(_tx) = __tx; \
break; \
} \
} \
do { \
struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
struct bna_tx *__tx; \
_tx = NULL; \
list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \
if (__tx->rid == (_rid)) { \
(_tx) = __tx; \
break; \
} \
} \
} while (0)
#define bna_rx_from_rid(_bna, _rid, _rx) \
do { \
struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
struct bna_rx *__rx; \
struct list_head *qe; \
_rx = NULL; \
list_for_each(qe, &__rx_mod->rx_active_q) { \
__rx = (struct bna_rx *)qe; \
list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \
if (__rx->rid == (_rid)) { \
(_rx) = __rx; \
break; \
......@@ -367,15 +245,12 @@ do { \
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
list_for_each(qe, q) {
if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
mac = (struct bna_mac *)qe;
break;
}
}
return mac;
struct bna_mac *mac;
list_for_each_entry(mac, q, qe)
if (ether_addr_equal(mac->addr, addr))
return mac;
return NULL;
}
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
......@@ -401,7 +276,6 @@ void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
......@@ -489,30 +363,19 @@ void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac);
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *));
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist);
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac);
void
bna_rx_mcast_delall(struct bna_rx *rx,
void (*cbfn)(struct bnad *, struct bna_rx *));
bna_rx_mcast_delall(struct bna_rx *rx);
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_rxmode bitmask);
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
......@@ -532,11 +395,10 @@ void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
struct bna_pause_config *pause_config,
void (*cbfn)(struct bnad *));
struct bna_pause_config *pause_config);
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
/* IOCETH */
......
......@@ -207,7 +207,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
if (rx_enet_mask & ((u32)(1 << i))) {
if (rx_enet_mask & ((u32)BIT(i))) {
int k;
count = sizeof(struct bfi_enet_stats_rxf) /
sizeof(u64);
......@@ -222,7 +222,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
if (tx_enet_mask & ((u32)(1 << i))) {
if (tx_enet_mask & ((u32)BIT(i))) {
int k;
count = sizeof(struct bfi_enet_stats_txf) /
sizeof(u64);
......@@ -884,16 +884,6 @@ do { \
} \
} while (0)
#define call_enet_pause_cbfn(enet) \
do { \
if ((enet)->pause_cbfn) { \
void (*cbfn)(struct bnad *); \
cbfn = (enet)->pause_cbfn; \
(enet)->pause_cbfn = NULL; \
cbfn((enet)->bna->bnad); \
} \
} while (0)
#define call_enet_mtu_cbfn(enet) \
do { \
if ((enet)->mtu_cbfn) { \
......@@ -925,7 +915,6 @@ bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
static void
bna_enet_sm_stopped_entry(struct bna_enet *enet)
{
call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
call_enet_stop_cbfn(enet);
}
......@@ -947,7 +936,6 @@ bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
break;
case ENET_E_PAUSE_CFG:
call_enet_pause_cbfn(enet);
break;
case ENET_E_MTU_CFG:
......@@ -1039,7 +1027,6 @@ bna_enet_sm_started_entry(struct bna_enet *enet)
* NOTE: Do not call bna_enet_chld_start() here, since it will be
* inadvertently called during cfg_wait->started transition as well
*/
call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
}
......@@ -1211,8 +1198,6 @@ bna_enet_init(struct bna_enet *enet, struct bna *bna)
enet->stop_cbfn = NULL;
enet->stop_cbarg = NULL;
enet->pause_cbfn = NULL;
enet->mtu_cbfn = NULL;
bfa_fsm_set_state(enet, bna_enet_sm_stopped);
......@@ -1308,13 +1293,10 @@ bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void
bna_enet_pause_config(struct bna_enet *enet,
struct bna_pause_config *pause_config,
void (*cbfn)(struct bnad *))
struct bna_pause_config *pause_config)
{
enet->pause_config = *pause_config;
enet->pause_cbfn = cbfn;
bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
}
......@@ -1330,9 +1312,9 @@ bna_enet_mtu_set(struct bna_enet *enet, int mtu,
}
void
bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
{
*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
}
/* IOCETH */
......@@ -1810,17 +1792,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&ucam_mod->free_q);
for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&ucam_mod->del_q);
for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
}
ucam_mod->bna = bna;
}
......@@ -1828,17 +1806,6 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
i = 0;
list_for_each(qe, &ucam_mod->del_q)
i++;
ucam_mod->bna = NULL;
}
......@@ -1852,27 +1819,21 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_q);
for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
}
mcam_mod->mchandle = (struct bna_mcam_handle *)
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_handle_q);
for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mchandle[i].qe,
&mcam_mod->free_handle_q);
}
&mcam_mod->free_handle_q);
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&mcam_mod->del_q);
for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
}
mcam_mod->bna = bna;
}
......@@ -1880,18 +1841,6 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
static void
bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
list_for_each(qe, &mcam_mod->del_q) i++;
i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
}
......@@ -2108,32 +2057,26 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
struct bna_mac *
bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
if (list_empty(head))
return NULL;
struct bna_mac *mac;
bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
mac = list_first_entry_or_null(head, struct bna_mac, qe);
if (mac)
list_del(&mac->qe);
void
bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
list_add_tail(&mac->qe, tail);
return mac;
}
struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
{
struct list_head *qe;
if (list_empty(&mcam_mod->free_handle_q))
return NULL;
struct bna_mcam_handle *handle;
bfa_q_deq(&mcam_mod->free_handle_q, &qe);
handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
struct bna_mcam_handle, qe);
if (handle)
list_del(&handle->qe);
return (struct bna_mcam_handle *)qe;
return handle;
}
void
......
......@@ -213,7 +213,7 @@ do { \
* 15 bits (32K) should be large enough to accumulate, anyways, and the max.
* acked events to h/w can be (32K + max poll weight) (currently 64).
*/
#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
#define BNA_IB_MAX_ACK_EVENTS BIT(15)
/* These macros build the data portion of the TxQ/RxQ doorbell */
#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
......@@ -282,13 +282,13 @@ do { \
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
#define BNA_TXQ_WI_CF_FCOE_CRC BIT(8)
#define BNA_TXQ_WI_CF_IPID_MODE BIT(5)
#define BNA_TXQ_WI_CF_INS_PRIO BIT(4)
#define BNA_TXQ_WI_CF_INS_VLAN BIT(3)
#define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2)
#define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1)
#define BNA_TXQ_WI_CF_IP_CKSUM BIT(0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
......@@ -297,36 +297,36 @@ do { \
* Completion Q defines
*/
/* CQ Entry Flags */
#define BNA_CQ_EF_MAC_ERROR (1 << 0)
#define BNA_CQ_EF_FCS_ERROR (1 << 1)
#define BNA_CQ_EF_TOO_LONG (1 << 2)
#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
#define BNA_CQ_EF_RSVD1 (1 << 4)
#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BNA_CQ_EF_HDS_HEADER (1 << 7)
#define BNA_CQ_EF_UDP (1 << 8)
#define BNA_CQ_EF_TCP (1 << 9)
#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
#define BNA_CQ_EF_IPV6 (1 << 11)
#define BNA_CQ_EF_IPV4 (1 << 12)
#define BNA_CQ_EF_VLAN (1 << 13)
#define BNA_CQ_EF_RSS (1 << 14)
#define BNA_CQ_EF_RSVD2 (1 << 15)
#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
#define BNA_CQ_EF_MCAST (1 << 17)
#define BNA_CQ_EF_BCAST (1 << 18)
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
#define BNA_CQ_EF_MAC_ERROR BIT(0)
#define BNA_CQ_EF_FCS_ERROR BIT(1)
#define BNA_CQ_EF_TOO_LONG BIT(2)
#define BNA_CQ_EF_FC_CRC_OK BIT(3)
#define BNA_CQ_EF_RSVD1 BIT(4)
#define BNA_CQ_EF_L4_CKSUM_OK BIT(5)
#define BNA_CQ_EF_L3_CKSUM_OK BIT(6)
#define BNA_CQ_EF_HDS_HEADER BIT(7)
#define BNA_CQ_EF_UDP BIT(8)
#define BNA_CQ_EF_TCP BIT(9)
#define BNA_CQ_EF_IP_OPTIONS BIT(10)
#define BNA_CQ_EF_IPV6 BIT(11)
#define BNA_CQ_EF_IPV4 BIT(12)
#define BNA_CQ_EF_VLAN BIT(13)
#define BNA_CQ_EF_RSS BIT(14)
#define BNA_CQ_EF_RSVD2 BIT(15)
#define BNA_CQ_EF_MCAST_MATCH BIT(16)
#define BNA_CQ_EF_MCAST BIT(17)
#define BNA_CQ_EF_BCAST BIT(18)
#define BNA_CQ_EF_REMOTE BIT(19)
#define BNA_CQ_EF_LOCAL BIT(20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
#define BNA_CQ_EF_EOP (1 << 31)
#define BNA_CQ_EF_EOP BIT(31)
/* Data structures */
......
......@@ -46,7 +46,6 @@ do { \
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
......@@ -60,14 +59,10 @@ static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
......@@ -82,11 +77,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
if (rxf->flags & BNA_RXF_F_PAUSED) {
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
call_rxf_start_cbfn(rxf);
} else
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
......@@ -101,45 +92,6 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_pause_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
call_rxf_resume_cbfn(rxf);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
{
call_rxf_pause_cbfn(rxf);
}
static void
bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
case RXF_E_FAIL:
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
default:
bfa_sm_fault(event);
}
......@@ -166,7 +118,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
......@@ -174,12 +125,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
/* No-op */
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_start_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
......@@ -197,7 +142,6 @@ bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
}
static void
......@@ -214,41 +158,6 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
if (!bna_rxf_fltr_clear(rxf))
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
else
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
{
}
static void
bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
call_rxf_pause_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_fltr_clear(rxf)) {
/* No more pending CAM entries to clear */
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
}
break;
default:
bfa_sm_fault(event);
}
......@@ -283,7 +192,7 @@ bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
......@@ -299,7 +208,7 @@ bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
......@@ -447,19 +356,14 @@ static struct bna_mac *
bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
{
struct bna_mac *mac;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_active_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
list_for_each_entry(mac, &rxf->mcast_active_q, qe)
if (ether_addr_equal(mac->addr, mac_addr))
return mac;
}
list_for_each(qe, &rxf->mcast_pending_del_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
if (ether_addr_equal(mac->addr, mac_addr))
return mac;
}
return NULL;
}
......@@ -468,13 +372,10 @@ static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_handle_q) {
mchandle = (struct bna_mcam_handle *)qe;
list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
if (mchandle->handle == handle)
return mchandle;
}
return NULL;
}
......@@ -515,7 +416,6 @@ bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
ret = 1;
}
list_del(&mchandle->qe);
bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
......@@ -527,26 +427,23 @@ static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->mcast_pending_del_q,
struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
list_add_tail(&mac->qe, &rxf->mcast_active_q);
mac = list_first_entry(&rxf->mcast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
......@@ -566,7 +463,7 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
block_idx++;
vlan_pending_bitmask >>= 1;
}
rxf->vlan_pending_bitmask &= ~(1 << block_idx);
rxf->vlan_pending_bitmask &= ~BIT(block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
......@@ -577,27 +474,24 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->mcast_pending_del_q,
struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->mcast_pending_add_q);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->mcast_active_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
......@@ -658,25 +552,6 @@ bna_rxf_cfg_apply(struct bna_rxf *rxf)
return 0;
}
/* Only software reset */
static int
bna_rxf_fltr_clear(struct bna_rxf *rxf)
{
if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
return 0;
}
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
......@@ -693,16 +568,13 @@ bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
}
void
......@@ -760,9 +632,6 @@ bna_rxf_init(struct bna_rxf *rxf,
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
if (q_config->paused)
rxf->flags |= BNA_RXF_F_PAUSED;
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
......@@ -795,22 +664,21 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
mac = list_first_entry(&rxf->ucast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
rxf->ucast_pending_mac);
list_add_tail(&rxf->ucast_pending_mac->qe,
bna_ucam_mod_free_q(rxf->rx->bna));
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
mac = list_first_entry(&rxf->mcast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
rxf->rxmode_pending = 0;
......@@ -823,8 +691,6 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
rxf->flags = 0;
rxf->rx = NULL;
}
......@@ -863,8 +729,7 @@ bna_rxf_fail(struct bna_rxf *rxf)
}
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *))
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
{
struct bna_rxf *rxf = &rx->rxf;
......@@ -873,12 +738,11 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
rxf->ucast_pending_set = 1;
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
......@@ -904,8 +768,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, addr, ETH_ALEN);
ether_addr_copy(mac->addr, addr);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
......@@ -917,35 +780,31 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
mac = list_first_entry(&rxf->ucast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
mac = list_first_entry(&rxf->ucast_active_q,
struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
ether_addr_copy(del_mac->addr, mac->addr);
del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Allocate nodes */
......@@ -954,69 +813,57 @@ bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &ucam_mod->free_q);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
mac = list_first_entry(&rxf->mcast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
mac = list_first_entry(&rxf->mcast_active_q,
struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
ether_addr_copy(del_mac->addr, mac->addr);
del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Allocate nodes */
......@@ -1025,8 +872,7 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
......@@ -1034,70 +880,52 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &mcam_mod->free_q);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
bna_rx_mcast_delall(struct bna_rx *rx,
void (*cbfn)(struct bnad *, struct bna_rx *))
bna_rx_mcast_delall(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
mac = list_first_entry(&rxf->mcast_pending_add_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
mac = list_first_entry(&rxf->mcast_active_q,
struct bna_mac, qe);
list_del(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
need_hw_config = 1;
}
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
if (need_hw_config)
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return;
}
if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
}
void
......@@ -1105,12 +933,12 @@ bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
......@@ -1120,12 +948,12 @@ bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
......@@ -1134,23 +962,21 @@ static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->ucast_pending_del_q,
struct bna_mac, qe);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
memcpy(rxf->ucast_active_mac.addr,
rxf->ucast_pending_mac->addr, ETH_ALEN);
ether_addr_copy(rxf->ucast_active_mac.addr,
rxf->ucast_pending_mac->addr);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
......@@ -1159,9 +985,8 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->ucast_pending_add_q,
struct bna_mac, qe);
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
......@@ -1173,33 +998,30 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
mac = list_first_entry(&rxf->ucast_pending_del_q,
struct bna_mac, qe);
if (cleanup == BNA_SOFT_CLEANUP)
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
list_move_tail(&mac->qe,
bna_ucam_mod_del_q(rxf->rx->bna));
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
list_move_tail(&mac->qe,
bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->ucast_pending_add_q);
mac = list_first_entry(&rxf->ucast_active_q,
struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
......@@ -1654,14 +1476,11 @@ static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
}
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
......@@ -1804,7 +1623,6 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
......@@ -1814,11 +1632,9 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
for (i = 0; i < rx->num_paths; i++) {
rxp = rxp ? list_next_entry(rxp, qe)
: list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
......@@ -1921,13 +1737,10 @@ static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
/* Stop IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_stop(rx->bna, &rxp->cq.ib);
}
bna_bfi_rx_enet_stop(rx);
}
......@@ -1957,12 +1770,10 @@ static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
struct list_head *qe = NULL;
bfa_q_deq(&rx_mod->rxq_free_q, &qe);
rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
list_del(&rxq->qe);
rx_mod->rxq_free_count--;
rxq = (struct bna_rxq *)qe;
bfa_q_qe_init(&rxq->qe);
return rxq;
}
......@@ -1970,7 +1781,6 @@ bna_rxq_get(struct bna_rx_mod *rx_mod)
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
......@@ -1978,13 +1788,11 @@ bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
bfa_q_deq(&rx_mod->rxp_free_q, &qe);
rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
list_del(&rxp->qe);
rx_mod->rxp_free_count--;
rxp = (struct bna_rxp *)qe;
bfa_q_qe_init(&rxp->qe);
return rxp;
}
......@@ -1992,7 +1800,6 @@ bna_rxp_get(struct bna_rx_mod *rx_mod)
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
......@@ -2000,18 +1807,16 @@ bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
if (type == BNA_RX_T_REGULAR) {
bfa_q_deq(&rx_mod->rx_free_q, &qe);
} else
bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
BUG_ON(list_empty(&rx_mod->rx_free_q));
if (type == BNA_RX_T_REGULAR)
rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
else
rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
rx_mod->rx_free_count--;
rx = (struct bna_rx *)qe;
bfa_q_qe_init(&rx->qe);
list_add_tail(&rx->qe, &rx_mod->rx_active_q);
list_move_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
......@@ -2020,32 +1825,13 @@ bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
struct list_head *prev_qe = NULL;
struct list_head *qe;
bfa_q_qe_init(&rx->qe);
list_for_each(qe, &rx_mod->rx_free_q) {
list_for_each_prev(qe, &rx_mod->rx_free_q)
if (((struct bna_rx *)qe)->rid < rx->rid)
prev_qe = qe;
else
break;
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
} else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
/* This is the last entry */
list_add_tail(&rx->qe, &rx_mod->rx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&rx->qe) = prev_qe;
bfa_q_next(prev_qe) = &rx->qe;
bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
}
list_add(&rx->qe, qe);
rx_mod->rx_free_count++;
}
......@@ -2199,24 +1985,20 @@ void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type)
bna_rx_start(rx);
}
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
......@@ -2225,13 +2007,11 @@ bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
}
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
......@@ -2240,15 +2020,12 @@ void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
bna_rx_fail(rx);
}
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
......@@ -2282,7 +2059,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
......@@ -2296,7 +2072,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
......@@ -2304,7 +2079,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
......@@ -2313,21 +2087,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &rx_mod->rx_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxp_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxq_free_q)
i++;
rx_mod->bna = NULL;
}
......@@ -2337,7 +2096,6 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
......@@ -2345,10 +2103,8 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
rx->hw_id = cfg_rsp->hw_id;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
......@@ -2396,20 +2152,19 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
BNA_TO_POWER_OF_2_HIGH(dq_depth);
dq_depth = roundup_pow_of_two(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
BNA_TO_POWER_OF_2_HIGH(hq_depth);
hq_depth = roundup_pow_of_two(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
......@@ -2620,7 +2375,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
rxp->cq.ib.intr_vector = (1 << rxp->vector);
rxp->cq.ib.intr_vector = BIT(rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
......@@ -2691,7 +2446,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_depth = roundup_pow_of_two(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
......@@ -2725,7 +2480,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_mod->rid_mask |= (1 << rx->rid);
rx_mod->rid_mask |= BIT(rx->rid);
return rx;
}
......@@ -2742,7 +2497,8 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
bfa_q_deq(&rx->rxp_q, &rxp);
rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
list_del(&rxp->qe);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
......@@ -2769,15 +2525,13 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxp_put(rx_mod, rxp);
}
list_for_each(qe, &rx_mod->rx_active_q) {
list_for_each(qe, &rx_mod->rx_active_q)
if (qe == &rx->qe) {
list_del(&rx->qe);
bfa_q_qe_init(&rx->qe);
break;
}
}
rx_mod->rid_mask &= ~(1 << rx->rid);
rx_mod->rid_mask &= ~BIT(rx->rid);
rx->bna = NULL;
rx->priv = NULL;
......@@ -2844,8 +2598,7 @@ bna_rx_vlan_strip_disable(struct bna_rx *rx)
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
void (*cbfn)(struct bnad *, struct bna_rx *))
enum bna_rxmode bitmask)
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
......@@ -2900,11 +2653,10 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
/* Trigger h/w if needed */
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
} else if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
}
return BNA_CB_SUCCESS;
......@@ -2928,10 +2680,8 @@ void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
struct list_head *qe;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
......@@ -3024,16 +2774,6 @@ do { \
} \
} while (0)
#define call_tx_prio_change_cbfn(tx) \
do { \
if ((tx)->prio_change_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_tx *); \
cbfn = (tx)->prio_change_cbfn; \
(tx)->prio_change_cbfn = NULL; \
cbfn((tx)->bna->bnad, (tx)); \
} \
} while (0)
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
......@@ -3044,7 +2784,6 @@ enum bna_tx_event {
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
......@@ -3085,10 +2824,6 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
/* No-op */
break;
case TX_E_PRIO_CHANGE:
call_tx_prio_change_cbfn(tx);
break;
case TX_E_BW_UPDATE:
/* No-op */
break;
......@@ -3109,28 +2844,23 @@ bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
BNA_TX_F_BW_UPDATED);
if (tx->flags & BNA_TX_F_BW_UPDATED) {
tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
case TX_E_PRIO_CHANGE:
tx->flags |= BNA_TX_F_PRIO_CHANGED;
break;
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
......@@ -3144,11 +2874,9 @@ static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
......@@ -3172,7 +2900,6 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
......@@ -3205,7 +2932,6 @@ bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bna_tx_enet_stop(tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
......@@ -3225,7 +2951,6 @@ bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
......@@ -3256,7 +2981,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
......@@ -3264,7 +2988,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
......@@ -3277,7 +3000,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
......@@ -3293,7 +3015,6 @@ bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
......@@ -3372,7 +3093,6 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
......@@ -3381,11 +3101,9 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq;
i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
for (i = 0; i < tx->num_txq; i++) {
txq = txq ? list_next_entry(txq, qe)
: list_first_entry(&tx->txq_q, struct bna_txq, qe);
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
......@@ -3437,13 +3155,10 @@ static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
/* Stop IB */
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_stop(tx->bna, &txq->ib);
}
bna_bfi_tx_enet_stop(tx);
}
......@@ -3487,18 +3202,15 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
if (type == BNA_TX_T_REGULAR) {
bfa_q_deq(&tx_mod->tx_free_q, &qe);
} else {
bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
}
tx = (struct bna_tx *)qe;
bfa_q_qe_init(&tx->qe);
if (type == BNA_TX_T_REGULAR)
tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
else
tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
list_del(&tx->qe);
tx->type = type;
return tx;
......@@ -3509,21 +3221,18 @@ bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
bfa_q_deq(&tx->txq_q, &txq);
bfa_q_qe_init(&txq->qe);
txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
txq->tcb = NULL;
txq->tx = NULL;
list_add_tail(&txq->qe, &tx_mod->txq_free_q);
list_move_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
bfa_q_qe_init(&tx->qe);
break;
}
}
......@@ -3531,28 +3240,11 @@ bna_tx_free(struct bna_tx *tx)
tx->bna = NULL;
tx->priv = NULL;
prev_qe = NULL;
list_for_each(qe, &tx_mod->tx_free_q) {
list_for_each_prev(qe, &tx_mod->tx_free_q)
if (((struct bna_tx *)qe)->rid < tx->rid)
prev_qe = qe;
else {
break;
}
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
} else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
/* This is the last entry */
list_add_tail(&tx->qe, &tx_mod->tx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&tx->qe) = prev_qe;
bfa_q_next(prev_qe) = &tx->qe;
bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
}
list_add(&tx->qe, qe);
}
static void
......@@ -3585,7 +3277,6 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
......@@ -3593,10 +3284,8 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
tx->hw_id = cfg_rsp->hw_id;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
......@@ -3624,12 +3313,9 @@ void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
}
}
void
......@@ -3689,7 +3375,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
struct list_head *qe;
int page_count;
int i;
......@@ -3719,9 +3404,8 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
bfa_q_deq(&tx_mod->txq_free_q, &txq);
bfa_q_qe_init(&txq->qe);
list_add_tail(&txq->qe, &tx->txq_q);
txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
list_move_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
......@@ -3760,8 +3444,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
/* TxQ */
i = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
......@@ -3779,7 +3462,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
txq->ib.intr_vector = (1 << txq->ib.intr_vector);
txq->ib.intr_vector = BIT(txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
......@@ -3821,7 +3504,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
tx_mod->rid_mask |= (1 << tx->rid);
tx_mod->rid_mask |= BIT(tx->rid);
return tx;
......@@ -3834,15 +3517,12 @@ void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
list_for_each_entry(txq, &tx->txq_q, qe)
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
}
tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
bna_tx_free(tx);
}
......@@ -3920,9 +3600,7 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
......@@ -3935,17 +3613,6 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &tx_mod->tx_free_q)
i++;
i = 0;
list_for_each(qe, &tx_mod->txq_free_q)
i++;
tx_mod->bna = NULL;
}
......@@ -3953,24 +3620,20 @@ void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type)
bna_tx_start(tx);
}
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
......@@ -3979,13 +3642,11 @@ bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
}
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
......@@ -3994,25 +3655,19 @@ void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bna_tx_fail(tx);
}
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
}
}
......@@ -135,7 +135,6 @@ enum bna_tx_type {
enum bna_tx_flags {
BNA_TX_F_ENET_STARTED = 1,
BNA_TX_F_ENABLED = 2,
BNA_TX_F_PRIO_CHANGED = 4,
BNA_TX_F_BW_UPDATED = 8,
};
......@@ -182,17 +181,11 @@ enum bna_rx_mod_flags {
BNA_RX_MOD_F_ENET_LOOPBACK = 2,
};
enum bna_rxf_flags {
BNA_RXF_F_PAUSED = 1,
};
enum bna_rxf_event {
RXF_E_START = 1,
RXF_E_STOP = 2,
RXF_E_FAIL = 3,
RXF_E_CONFIG = 4,
RXF_E_PAUSE = 5,
RXF_E_RESUME = 6,
RXF_E_FW_RESP = 7,
};
......@@ -362,9 +355,6 @@ struct bna_enet {
void (*stop_cbfn)(void *);
void *stop_cbarg;
/* Callback for bna_enet_pause_config() */
void (*pause_cbfn)(struct bnad *);
/* Callback for bna_enet_mtu_set() */
void (*mtu_cbfn)(struct bnad *);
......@@ -498,9 +488,6 @@ struct bna_tx {
void (*stop_cbfn)(void *arg, struct bna_tx *tx);
void *stop_cbarg;
/* callback for bna_tx_prio_set() */
void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_tx_cfg_req cfg_req;
......@@ -676,7 +663,6 @@ struct bna_rx_config {
enum bna_rx_type rx_type;
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
......@@ -721,7 +707,6 @@ struct bna_rxp {
/* RxF structure (hardware Rx Function) */
struct bna_rxf {
bfa_fsm_t fsm;
enum bna_rxf_flags flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
......@@ -742,10 +727,6 @@ struct bna_rxf {
void (*stop_cbfn) (struct bna_rx *rx);
struct bna_rx *stop_cbarg;
/* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
struct bnad *oper_state_cbarg;
/**
* callback for:
* bna_rxf_ucast_set()
......
......@@ -57,7 +57,8 @@ static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
static const u8 bnad_bcast_addr[] __aligned(2) =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/*
* Local MACROS
......@@ -724,7 +725,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
cmpl->valid = 0;
BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
}
cmpl = &cq[ccb->producer_index];
}
napi_gro_flush(&rx_ctrl->napi, false);
......@@ -875,9 +875,9 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
}
/* Control Path Handlers */
......@@ -946,8 +946,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
uint tx_id, tcb_id;
printk(KERN_WARNING "bna: %s link up\n",
bnad->netdev->name);
netdev_info(bnad->netdev, "link up\n");
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
......@@ -966,10 +965,6 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
/*
* Force an immediate
* Transmit Schedule */
printk(KERN_INFO "bna: %s %d "
"TXQ_STARTED\n",
bnad->netdev->name,
txq_id);
netif_wake_subqueue(
bnad->netdev,
txq_id);
......@@ -987,8 +982,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
}
} else {
if (netif_carrier_ok(bnad->netdev)) {
printk(KERN_WARNING "bna: %s link down\n",
bnad->netdev->name);
netdev_info(bnad->netdev, "link down\n");
netif_carrier_off(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
}
......@@ -1058,8 +1052,6 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
txq_id = tcb->id;
clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_subqueue(bnad->netdev, txq_id);
printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
bnad->netdev->name, txq_id);
}
}
......@@ -1082,8 +1074,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
BUG_ON(*(tcb->hw_consumer_index) != 0);
if (netif_carrier_ok(bnad->netdev)) {
printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
bnad->netdev->name, txq_id);
netif_wake_subqueue(bnad->netdev, txq_id);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
}
......@@ -1094,8 +1084,8 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
if (is_zero_ether_addr(bnad->perm_addr)) {
bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
}
......@@ -1703,7 +1693,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
......@@ -1714,7 +1704,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
......@@ -1725,7 +1715,7 @@ bnad_iocpf_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
......@@ -1736,7 +1726,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
......@@ -1862,8 +1852,7 @@ bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
struct netdev_hw_addr *mc_addr;
netdev_for_each_mc_addr(mc_addr, netdev) {
memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
ETH_ALEN);
ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
i++;
}
}
......@@ -2137,7 +2126,7 @@ bnad_reinit_rx(struct bnad *bnad)
current_err = bnad_setup_rx(bnad, rx_id);
if (current_err && !err) {
err = current_err;
pr_err("RXQ:%u setup failed\n", rx_id);
netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
}
}
......@@ -2349,7 +2338,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
if (!bnad->rx_info[0].rx)
return 0;
ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
if (ret != BNA_CB_SUCCESS)
return -EADDRNOTAVAIL;
......@@ -2673,8 +2662,9 @@ bnad_enable_msix(struct bnad *bnad)
if (ret < 0) {
goto intx_mode;
} else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
dev_warn(&bnad->pcidev->dev,
"%d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
......@@ -2696,7 +2686,8 @@ bnad_enable_msix(struct bnad *bnad)
return;
intx_mode:
pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
dev_warn(&bnad->pcidev->dev,
"MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
......@@ -2754,7 +2745,7 @@ bnad_open(struct net_device *netdev)
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_mtu_set(&bnad->bna.enet,
BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
......@@ -3128,7 +3119,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
int entry;
if (netdev_uc_empty(bnad->netdev)) {
bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
return;
}
......@@ -3141,13 +3132,11 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
entry = 0;
netdev_for_each_uc_addr(ha, netdev) {
memcpy(&mac_list[entry * ETH_ALEN],
&ha->addr[0], ETH_ALEN);
ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
entry++;
}
ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
mac_list, NULL);
ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
......@@ -3158,7 +3147,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
/* ucast packets not in UCAM are routed to default function */
mode_default:
bnad->cfg_flags |= BNAD_CF_DEFAULT;
bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
}
static void
......@@ -3183,12 +3172,11 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
if (mac_list == NULL)
goto mode_allmulti;
memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
/* copy rest of the MCAST addresses */
bnad_netdev_mc_list_get(netdev, mac_list);
ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
mac_list, NULL);
ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
......@@ -3198,7 +3186,7 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
mode_allmulti:
bnad->cfg_flags |= BNAD_CF_ALLMULTI;
bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
bna_rx_mcast_delall(bnad->rx_info[0].rx);
}
void
......@@ -3237,7 +3225,7 @@ bnad_set_rx_mode(struct net_device *netdev)
mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
BNA_RXMODE_ALLMULTI;
bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
......@@ -3248,19 +3236,18 @@ bnad_set_rx_mode(struct net_device *netdev)
* in a non-blocking context.
*/
static int
bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
bnad_set_mac_address(struct net_device *netdev, void *addr)
{
int err;
struct bnad *bnad = netdev_priv(netdev);
struct sockaddr *sa = (struct sockaddr *)mac_addr;
struct sockaddr *sa = (struct sockaddr *)addr;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
if (!err)
memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
ether_addr_copy(netdev->dev_addr, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
......@@ -3487,8 +3474,8 @@ bnad_init(struct bnad *bnad,
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
}
pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
(unsigned long long) bnad->mmio_len);
dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
(unsigned long long) bnad->mmio_len);
spin_lock_irqsave(&bnad->bna_lock, flags);
if (!bnad_msix_disable)
......@@ -3609,13 +3596,10 @@ bnad_pci_probe(struct pci_dev *pdev,
struct bfa_pcidev pcidev_info;
unsigned long flags;
pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
pdev, pcidev_id, PCI_FUNC(pdev->devfn));
mutex_lock(&bnad_fwimg_mutex);
if (!cna_get_firmware_buf(pdev)) {
mutex_unlock(&bnad_fwimg_mutex);
pr_warn("Failed to load Firmware Image!\n");
dev_err(&pdev->dev, "failed to load firmware image!\n");
return -ENODEV;
}
mutex_unlock(&bnad_fwimg_mutex);
......@@ -3708,8 +3692,7 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
err = bnad_ioceth_enable(bnad);
if (err) {
pr_err("BNA: Initialization failed err=%d\n",
err);
dev_err(&pdev->dev, "initialization failed err=%d\n", err);
goto probe_success;
}
......@@ -3742,7 +3725,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Get the burnt-in mac */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
......@@ -3751,7 +3734,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
pr_err("BNA : Registering with netdev failed\n");
dev_err(&pdev->dev, "registering net device failed\n");
goto probe_uninit;
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
......@@ -3803,7 +3786,6 @@ bnad_pci_remove(struct pci_dev *pdev)
if (!netdev)
return;
pr_info("%s bnad_pci_remove\n", netdev->name);
bnad = netdev_priv(netdev);
bna = &bnad->bna;
......@@ -3864,15 +3846,14 @@ bnad_module_init(void)
{
int err;
pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
BNAD_VERSION);
pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
if (err < 0) {
pr_err("bna : PCI registration failed in module init "
"(%d)\n", err);
pr_err("bna: PCI driver registration failed err=%d\n", err);
return err;
}
......
......@@ -344,7 +344,7 @@ struct bnad {
struct bnad_completion bnad_completions;
/* Burnt in MAC address */
mac_t perm_addr;
u8 perm_addr[ETH_ALEN];
struct workqueue_struct *work_q;
......
......@@ -76,8 +76,7 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
pr_warn("bnad %s: Failed to collect fwtrc\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed to collect fwtrc\n");
return -ENOMEM;
}
......@@ -117,8 +116,7 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
pr_warn("bna %s: Failed to collect fwsave\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed to collect fwsave\n");
return -ENOMEM;
}
......@@ -217,8 +215,7 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
drv_info->debug_buffer = NULL;
kfree(drv_info);
drv_info = NULL;
pr_warn("bna %s: Failed to collect drvinfo\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed to collect drvinfo\n");
return -ENOMEM;
}
......@@ -321,27 +318,20 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf)
return -ENOMEM;
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
/* Copy the user space buf */
kern_buf = memdup_user(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
pr_warn("bna %s: Failed to read user buffer\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
......@@ -355,8 +345,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, len);
if (rc) {
pr_warn("bna %s: Failed reg offset check\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed reg offset check\n");
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
......@@ -388,20 +377,14 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf)
return -ENOMEM;
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
/* Copy the user space buf */
kern_buf = memdup_user(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
pr_warn("bna %s: Failed to read user buffer\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
......@@ -412,8 +395,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, 1);
if (rc) {
pr_warn("bna %s: Failed reg offset check\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev, "failed reg offset check\n");
return -EINVAL;
}
......@@ -525,7 +507,8 @@ bnad_debugfs_init(struct bnad *bnad)
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
if (!bna_debugfs_root) {
pr_warn("BNA: debugfs root dir creation failed\n");
netdev_warn(bnad->netdev,
"debugfs root dir creation failed\n");
return;
}
}
......@@ -536,8 +519,8 @@ bnad_debugfs_init(struct bnad *bnad)
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
if (!bnad->port_debugfs_root) {
pr_warn("bna pci_dev %s: root dir creation failed\n",
pci_name(bnad->pcidev));
netdev_warn(bnad->netdev,
"debugfs root dir creation failed\n");
return;
}
......@@ -552,9 +535,9 @@ bnad_debugfs_init(struct bnad *bnad)
bnad,
file->fops);
if (!bnad->bnad_dentry_files[i]) {
pr_warn(
"BNA pci_dev:%s: create %s entry failed\n",
pci_name(bnad->pcidev), file->name);
netdev_warn(bnad->netdev,
"create %s entry failed\n",
file->name);
return;
}
}
......
......@@ -445,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
!BNA_POWER_OF_2(ringparam->rx_pending)) {
!is_power_of_2(ringparam->rx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
!BNA_POWER_OF_2(ringparam->tx_pending)) {
!is_power_of_2(ringparam->tx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
......@@ -533,7 +533,7 @@ bnad_set_pauseparam(struct net_device *netdev,
pause_config.rx_pause = pauseparam->rx_pause;
pause_config.tx_pause = pauseparam->tx_pause;
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
mutex_unlock(&bnad->conf_mutex);
......@@ -1080,7 +1080,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
if (ret) {
pr_err("BNA: Can't locate firmware %s\n", eflash->data);
netdev_err(netdev, "can't load firmware %s\n", eflash->data);
goto out;
}
......@@ -1093,7 +1093,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
bnad->id, (u8 *)fw->data, fw->size, 0,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
pr_warn("BNA: Flash update failed with err: %d\n", ret);
netdev_warn(netdev, "flash update failed with err=%d\n", ret);
ret = -EIO;
spin_unlock_irq(&bnad->bna_lock);
goto out;
......@@ -1103,8 +1103,9 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
wait_for_completion(&fcomp.comp);
if (fcomp.comp_status != BFA_STATUS_OK) {
ret = -EIO;
pr_warn("BNA: Firmware image update to flash failed with: %d\n",
fcomp.comp_status);
netdev_warn(netdev,
"firmware image update failed with err=%d\n",
fcomp.comp_status);
}
out:
release_firmware(fw);
......
......@@ -42,66 +42,4 @@ extern char bfa_version[];
#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
/*
* bfa_q_qe_init - to initialize a queue element
*/
#define bfa_q_qe_init(_qe) { \
bfa_q_next(_qe) = (struct list_head *) NULL; \
bfa_q_prev(_qe) = (struct list_head *) NULL; \
}
/*
* bfa_q_deq - dequeue an element from head of the queue
*/
#define bfa_q_deq(_q, _qe) { \
if (!list_empty(_q)) { \
(*((struct list_head **) (_qe))) = bfa_q_next(_q); \
bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **)(_qe)) = NULL; \
} \
}
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue
*/
#define bfa_q_deq_tail(_q, _qe) { \
if (!list_empty(_q)) { \
*((struct list_head **) (_qe)) = bfa_q_prev(_q); \
bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL; \
} \
}
/*
* bfa_add_tail_head - enqueue an element at the head of queue
*/
#define bfa_q_enq_head(_q, _qe) { \
if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
pr_err("Assertion failure: %s:%d: %d", \
__FILE__, __LINE__, \
(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
bfa_q_next(_qe) = bfa_q_next(_q); \
bfa_q_prev(_qe) = (struct list_head *) (_q); \
bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
bfa_q_next(_q) = (struct list_head *) (_qe); \
}
#endif /* __CNA_H__ */
......@@ -33,7 +33,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 n;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
pr_alert("Can't locate firmware %s\n", fw_name);
dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
goto error;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment