Commit 4507025d authored by Krishna Gudipati's avatar Krishna Gudipati Committed by James Bottomley

[SCSI] bfa: DMA memory allocation enhancement.

- Modified the design such that each BFA sub-module will provide
  the amount of DMA and KVA memory needed by it and queues the
  same request to the global dma and kva info queues.
- During the memory allocation we iterate over this queue to allocate
  the dma and kva memory requested by sub-modules.
- The change is needed to avoid requesting the aggregate amount of memory
  needed by all the BFA sub-modules as one contiguous chunk.
Signed-off-by: default avatarKrishna Gudipati <kgudipat@brocade.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 3fd45980
......@@ -172,34 +172,6 @@ struct bfa_pciid_s {
extern char bfa_version[];
/*
* BFA memory resources
*/
enum bfa_mem_type {
BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
};
struct bfa_mem_elem_s {
enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
u32 mem_len; /* Total Length in Bytes */
u8 *kva; /* kernel virtual address */
u64 dma; /* dma address if DMA memory */
u8 *kva_curp; /* kva allocation cursor */
u64 dma_curp; /* dma allocation cursor */
};
struct bfa_meminfo_s {
struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
};
#define bfa_meminfo_kva(_m) \
((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
#define bfa_meminfo_dma_virt(_m) \
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
#define bfa_meminfo_dma_phys(_m) \
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
struct bfa_iocfc_regs_s {
void __iomem *intr_status;
void __iomem *intr_mask;
......@@ -294,8 +266,19 @@ struct bfa_iocfc_s {
void *updateq_cbarg; /* bios callback arg */
u32 intr_mask;
struct bfa_faa_args_s faa_args;
struct bfa_mem_dma_s ioc_dma;
struct bfa_mem_dma_s iocfc_dma;
struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS];
struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS];
struct bfa_mem_kva_s kva_seg;
};
#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma))
#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma))
#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)]))
#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)]))
#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg))
#define bfa_fn_lpu(__bfa) \
bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
#define bfa_msix_init(__bfa, __nvecs) \
......@@ -329,17 +312,17 @@ struct bfa_iocfc_s {
/*
* FC specific IOC functions.
*/
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa);
void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void bfa_iocfc_init(struct bfa_s *bfa);
void bfa_iocfc_start(struct bfa_s *bfa);
void bfa_iocfc_stop(struct bfa_s *bfa);
void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
void bfa_iocfc_reset_queues(struct bfa_s *bfa);
......@@ -418,7 +401,8 @@ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo);
struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa);
void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
......
This diff is collapsed.
......@@ -286,10 +286,9 @@ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
* Compute and return memory needed by FCP(im) module.
*/
static void
bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
bfa_itnim_meminfo(cfg, km_len, dm_len);
bfa_itnim_meminfo(cfg, km_len);
/*
* IO memory
......@@ -308,8 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
static void
bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev)
struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
{
struct bfa_fcpim_s *fcpim = &fcp->fcpim;
struct bfa_s *bfa = fcp->bfa;
......@@ -328,9 +326,9 @@ bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
fcpim->profile_comp = NULL;
fcpim->profile_start = NULL;
bfa_itnim_attach(fcpim, meminfo);
bfa_tskim_attach(fcpim, meminfo);
bfa_ioim_attach(fcpim, meminfo);
bfa_itnim_attach(fcpim);
bfa_tskim_attach(fcpim);
bfa_ioim_attach(fcpim);
}
static void
......@@ -972,8 +970,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
}
void
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
/*
* ITN memory
......@@ -982,15 +979,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
}
void
bfa_itnim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_s *bfa = fcpim->bfa;
struct bfa_fcp_mod_s *fcp = fcpim->fcp;
struct bfa_itnim_s *itnim;
int i, j;
INIT_LIST_HEAD(&fcpim->itnim_q);
itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
fcpim->itnim_arr = itnim;
for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
......@@ -1012,7 +1010,7 @@ bfa_itnim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
}
bfa_meminfo_kva(minfo) = (u8 *) itnim;
bfa_mem_kva_curp(fcp) = (u8 *) itnim;
}
void
......@@ -2345,22 +2343,23 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
* Memory allocation and initialization.
*/
void
bfa_ioim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_ioim_s *ioim;
struct bfa_fcp_mod_s *fcp = fcpim->fcp;
struct bfa_ioim_sp_s *iosp;
u16 i;
/*
* claim memory first
*/
ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
fcpim->ioim_arr = ioim;
bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
fcpim->ioim_sp_arr = iosp;
bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
/*
* Initialize ioim free queues
......@@ -3109,15 +3108,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
* Memory allocation and initialization.
*/
void
bfa_tskim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_tskim_s *tskim;
struct bfa_fcp_mod_s *fcp = fcpim->fcp;
u16 i;
INIT_LIST_HEAD(&fcpim->tskim_free_q);
INIT_LIST_HEAD(&fcpim->tskim_unused_q);
tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
fcpim->tskim_arr = tskim;
for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
......@@ -3136,7 +3136,7 @@ bfa_tskim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
}
bfa_meminfo_kva(minfo) = (u8 *) tskim;
bfa_mem_kva_curp(fcp) = (u8 *) tskim;
}
void
......@@ -3233,9 +3233,14 @@ bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
BFA_MODULE(fcp);
static void
bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len)
bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
u16 num_io_req;
struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
struct bfa_mem_dma_s *seg_ptr;
u16 nsegs, idx, per_seg_ios, num_io_req;
u32 km_len = 0;
/*
* ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
......@@ -3261,43 +3266,69 @@ bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len)
cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
}
bfa_fcpim_meminfo(cfg, km_len, dm_len);
bfa_fcpim_meminfo(cfg, &km_len);
num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
*km_len += num_io_req * sizeof(struct bfa_iotag_s);
*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
*dm_len += num_io_req * BFI_IOIM_SNSLEN;
km_len += num_io_req * sizeof(struct bfa_iotag_s);
km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
/* dma memory */
nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
if (num_io_req >= per_seg_ios) {
num_io_req -= per_seg_ios;
bfa_mem_dma_setup(minfo, seg_ptr,
per_seg_ios * BFI_IOIM_SNSLEN);
} else
bfa_mem_dma_setup(minfo, seg_ptr,
num_io_req * BFI_IOIM_SNSLEN);
}
/* kva memory */
bfa_mem_kva_setup(minfo, fcp_kva, km_len);
}
static void
bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
struct bfa_pcidev_s *pcidev)
{
struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
u32 snsbufsz;
struct bfa_mem_dma_s *seg_ptr;
u16 idx, nsegs, num_io_req;
fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
fcp->num_itns = cfg->fwcfg.num_rports;
fcp->num_itns = cfg->fwcfg.num_rports;
fcp->bfa = bfa;
snsbufsz = (fcp->num_ioim_reqs + fcp->num_fwtio_reqs) * BFI_IOIM_SNSLEN;
fcp->snsbase.pa = bfa_meminfo_dma_phys(meminfo);
bfa_meminfo_dma_phys(meminfo) += snsbufsz;
/*
* Setup the pool of snsbase addr's, that is passed to fw as
* part of bfi_iocfc_cfg_s.
*/
num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
if (!bfa_mem_dma_virt(seg_ptr))
break;
fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
}
fcp->snsbase.kva = bfa_meminfo_dma_virt(meminfo);
bfa_meminfo_dma_virt(meminfo) += snsbufsz;
bfa_iocfc_set_snsbase(bfa, fcp->snsbase.pa);
bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
bfa_fcpim_attach(fcp, bfad, cfg, meminfo, pcidev);
bfa_iotag_attach(fcp);
fcp->itn_arr = (struct bfa_itn_s *) bfa_meminfo_kva(meminfo);
bfa_meminfo_kva(meminfo) = (u8 *)fcp->itn_arr +
fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
(fcp->num_itns * sizeof(struct bfa_itn_s));
memset(fcp->itn_arr, 0,
(fcp->num_itns * sizeof(struct bfa_itn_s)));
bfa_iotag_attach(fcp, meminfo);
}
static void
......@@ -3370,12 +3401,12 @@ bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}
void
bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo)
bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
{
struct bfa_iotag_s *iotag;
u16 num_io_req, i;
iotag = (struct bfa_iotag_s *) bfa_meminfo_kva(minfo);
iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
fcp->iotag_arr = iotag;
INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
......@@ -3392,5 +3423,5 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo)
list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
}
bfa_meminfo_kva(minfo) = (u8 *) iotag;
bfa_mem_kva_curp(fcp) = (u8 *) iotag;
}
......@@ -25,8 +25,8 @@
#include "bfa_cs.h"
/* FCP module related definitions */
#define BFA_IO_MAX 2000
#define BFA_FWTIO_MAX 0
#define BFA_IO_MAX BFI_IO_MAX
#define BFA_FWTIO_MAX 2000
struct bfa_fcp_mod_s;
struct bfa_iotag_s {
......@@ -41,16 +41,17 @@ struct bfa_itn_s {
void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo);
void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
(&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
#define BFA_ITN_FROM_TAG(_fcp, _tag) \
((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
(((u8 *)(_fcp)->snsbase.kva) + (_tag * BFI_IOIM_SNSLEN))
bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
#define BFA_ITNIM_MIN 32
#define BFA_ITNIM_MAX 1024
......@@ -130,6 +131,9 @@ struct bfa_fcpim_s {
bfa_fcpim_profile_t profile_start;
};
/* Max FCP dma segs required */
#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS
struct bfa_fcp_mod_s {
struct bfa_s *bfa;
struct list_head iotag_ioim_free_q; /* free IO resources */
......@@ -140,8 +144,10 @@ struct bfa_fcp_mod_s {
int num_ioim_reqs;
int num_fwtio_reqs;
int num_itns;
struct bfa_dma_s snsbase;
struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS];
struct bfa_fcpim_s fcpim;
struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
struct bfa_mem_kva_s kva_seg;
};
/*
......@@ -256,8 +262,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
/*
* function prototypes
*/
void bfa_ioim_attach(struct bfa_fcpim_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
struct bfi_msg_s *msg);
......@@ -267,18 +272,15 @@ void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
void bfa_ioim_tov(struct bfa_ioim_s *ioim);
void bfa_tskim_attach(struct bfa_fcpim_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_itnim_attach(struct bfa_fcpim_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
void bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
......
......@@ -84,6 +84,68 @@ struct bfa_sge_s {
#define bfa_sgaddr_le(_x) (_x)
#endif
/*
* BFA memory resources
*/
struct bfa_mem_dma_s {
struct list_head qe; /* Queue of DMA elements */
u32 mem_len; /* Total Length in Bytes */
u8 *kva; /* kernel virtual address */
u64 dma; /* dma address if DMA memory */
u8 *kva_curp; /* kva allocation cursor */
u64 dma_curp; /* dma allocation cursor */
};
#define bfa_mem_dma_t struct bfa_mem_dma_s
struct bfa_mem_kva_s {
struct list_head qe; /* Queue of KVA elements */
u32 mem_len; /* Total Length in Bytes */
u8 *kva; /* kernel virtual address */
u8 *kva_curp; /* kva allocation cursor */
};
#define bfa_mem_kva_t struct bfa_mem_kva_s
struct bfa_meminfo_s {
struct bfa_mem_dma_s dma_info;
struct bfa_mem_kva_s kva_info;
};
/* BFA memory segment setup macros */
#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
if (_seg_sz) \
list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
&(_meminfo)->dma_info.qe); \
} while (0)
#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
if (_seg_sz) \
list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
&(_meminfo)->kva_info.qe); \
} while (0)
/* BFA dma memory segments iterator */
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \
for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \
_i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp)
#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp)
#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
/* Get the corresponding dma buf kva for a req - from the tag */
#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \
(((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
/* Get the corresponding dma buf pa for a req - from the tag */
#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \
((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \
BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
/*
* PCI device information required by IOC
*/
......@@ -301,7 +363,9 @@ struct bfa_ablk_s {
bfa_ablk_cbfn_t cbfn;
void *cbarg;
struct bfa_ioc_notify_s ioc_notify;
struct bfa_mem_dma_s ablk_dma;
};
#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma))
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
......
......@@ -57,11 +57,11 @@ enum {
*/
#define BFA_MODULE(__mod) \
static void bfa_ ## __mod ## _meminfo( \
struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
u32 *dm_len); \
struct bfa_iocfc_cfg_s *cfg, \
struct bfa_meminfo_s *meminfo, \
struct bfa_s *bfa); \
static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
void *bfad, struct bfa_iocfc_cfg_s *cfg, \
struct bfa_meminfo_s *meminfo, \
struct bfa_pcidev_s *pcidev); \
static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
......@@ -87,11 +87,11 @@ enum {
* can leave entry points as NULL)
*/
struct bfa_module_s {
void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa);
void (*attach) (struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void (*detach) (struct bfa_s *bfa);
void (*start) (struct bfa_s *bfa);
......
......@@ -45,8 +45,11 @@ struct bfa_port_s {
bfa_status_t endis_status;
struct bfa_ioc_notify_s ioc_notify;
bfa_boolean_t pbc_disabled;
struct bfa_mem_dma_s port_dma;
};
#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma))
void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod);
void bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
......
This diff is collapsed.
......@@ -26,6 +26,7 @@
* Scatter-gather DMA related defines
*/
#define BFA_SGPG_MIN (16)
#define BFA_SGPG_MAX (8192)
/*
* Alignment macro for SG page allocation
......@@ -54,17 +55,21 @@ struct bfa_sgpg_s {
*/
#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
/* Max SGPG dma segs required */
#define BFA_SGPG_DMA_SEGS \
BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
struct bfa_sgpg_mod_s {
struct bfa_s *bfa;
int num_sgpgs; /* number of SG pages */
int free_sgpgs; /* number of free SG pages */
struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
u64 sgpg_arr_pa; /* SG page array DMA addr */
struct list_head sgpg_q; /* queue of free SG pages */
struct list_head sgpg_wait_q; /* wait queue for SG pages */
struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS];
struct bfa_mem_kva_s kva_seg;
};
#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
int nsgpgs);
......@@ -79,27 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
* FCXP related defines
*/
#define BFA_FCXP_MIN (1)
#define BFA_FCXP_MAX (256)
#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
/* Max FCXP dma segs required */
#define BFA_FCXP_DMA_SEGS \
BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \
(u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
struct list_head fcxp_free_q; /* free FCXPs */
struct list_head fcxp_active_q; /* active FCXPs */
void *req_pld_list_kva; /* list of FCXP req pld */
u64 req_pld_list_pa; /* list of FCXP req pld */
void *rsp_pld_list_kva; /* list of FCXP resp pld */
u64 rsp_pld_list_pa; /* list of FCXP resp pld */
struct list_head wait_q; /* wait queue for free fcxp */
struct list_head fcxp_unused_q; /* unused fcxps */
u32 req_pld_sz;
u32 rsp_pld_sz;
struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
struct bfa_mem_kva_s kva_seg;
};
#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
void *cb_arg, bfa_status_t req_status,
......@@ -207,13 +217,15 @@ struct bfa_fcxp_wqe_s {
#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->req_pld_list_pa + \
((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
(_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
(bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
(_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
(_fcxp)->fcxp_mod->req_pld_sz)
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
......@@ -241,9 +253,11 @@ struct bfa_rport_mod_s {
struct list_head rp_active_q; /* free bfa_rports */
struct list_head rp_unused_q; /* unused bfa rports */
u16 num_rports; /* number of rports */
struct bfa_mem_kva_s kva_seg;
};
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
/*
* Convert rport tag to RPORT
......@@ -301,7 +315,7 @@ struct bfa_rport_s {
*/
#define BFA_UF_MIN (4)
#define BFA_UF_MAX (256)
struct bfa_uf_s {
struct list_head qe; /* queue element */
......@@ -329,6 +343,18 @@ struct bfa_uf_s {
*/
typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
#define BFA_UF_BUFSZ (2 * 1024 + 256)
struct bfa_uf_buf_s {
u8 d[BFA_UF_BUFSZ];
};
#define BFA_PER_UF_DMA_SZ \
(u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
/* Max UF dma segs required */
#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
struct bfa_uf_mod_s {
struct bfa_s *bfa; /* back pointer to BFA */
struct bfa_uf_s *uf_list; /* array of UFs */
......@@ -336,32 +362,23 @@ struct bfa_uf_mod_s {
struct list_head uf_free_q; /* free UFs */
struct list_head uf_posted_q; /* UFs posted to IOC */
struct list_head uf_unused_q; /* unused UF's */
struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
u64 uf_pbs_pa; /* phy addr for UF bufs */
struct bfi_uf_buf_post_s *uf_buf_posts;
/* pre-built UF post msgs */
bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
void *cbarg; /* uf receive handler arg */
struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS];
struct bfa_mem_kva_s kva_seg;
};
#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg))
#define ufm_pbs_pa(_ufmod, _uftag) \
((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
#define BFA_UF_BUFSZ (2 * 1024 + 256)
/*
* @todo private
*/
struct bfa_uf_buf_s {
u8 d[BFA_UF_BUFSZ];
};
/*
* LPS - bfa lport login/logout service interface
*/
......@@ -406,10 +423,12 @@ struct bfa_lps_mod_s {
struct list_head lps_login_q;
struct bfa_lps_s *lps_arr;
int num_lps;
struct bfa_mem_kva_s kva_seg;
};
#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
/*
* external functions
......@@ -489,9 +508,11 @@ struct bfa_fcport_s {
bfa_boolean_t bbsc_op_state; /* Cred recov Oper State */
struct bfa_fcport_trunk_s trunk;
u16 fcoe_vlan;
struct bfa_mem_dma_s fcport_dma;
};
#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
/*
* protected functions
......
......@@ -531,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
void
bfad_hal_mem_release(struct bfad_s *bfad)
{
int i;
struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
struct bfa_mem_elem_s *meminfo_elem;
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
meminfo_elem = &hal_meminfo->meminfo[i];
if (meminfo_elem->kva != NULL) {
switch (meminfo_elem->mem_type) {
case BFA_MEM_TYPE_KVA:
vfree(meminfo_elem->kva);
break;
case BFA_MEM_TYPE_DMA:
dma_free_coherent(&bfad->pcidev->dev,
meminfo_elem->mem_len,
meminfo_elem->kva,
(dma_addr_t) meminfo_elem->dma);
break;
default:
WARN_ON(1);
break;
}
}
struct bfa_mem_dma_s *dma_info, *dma_elem;
struct bfa_mem_kva_s *kva_info, *kva_elem;
struct list_head *dm_qe, *km_qe;
dma_info = &hal_meminfo->dma_info;
kva_info = &hal_meminfo->kva_info;
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
vfree(kva_elem->kva);
}
/* Iterate through the DMA meminfo queue */
list_for_each(dm_qe, &dma_info->qe) {
dma_elem = (struct bfa_mem_dma_s *) dm_qe;
dma_free_coherent(&bfad->pcidev->dev,
dma_elem->mem_len, dma_elem->kva,
(dma_addr_t) dma_elem->dma);
}
memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
......@@ -567,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
if (num_tms > 0)
bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
if (num_fcxps > 0)
if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
if (num_ufbufs > 0)
if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
if (reqq_size > 0)
bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
if (rspq_size > 0)
bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
if (num_sgpgs > 0)
if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
/*
......@@ -595,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
bfa_status_t
bfad_hal_mem_alloc(struct bfad_s *bfad)
{
int i;
struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
struct bfa_mem_elem_s *meminfo_elem;
dma_addr_t phys_addr;
void *kva;
struct bfa_mem_dma_s *dma_info, *dma_elem;
struct bfa_mem_kva_s *kva_info, *kva_elem;
struct list_head *dm_qe, *km_qe;
bfa_status_t rc = BFA_STATUS_OK;
int retry_count = 0;
int reset_value = 1;
int min_num_sgpgs = 512;
dma_addr_t phys_addr;
bfa_cfg_get_default(&bfad->ioc_cfg);
retry:
bfad_update_hal_cfg(&bfad->ioc_cfg);
bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
meminfo_elem = &hal_meminfo->meminfo[i];
switch (meminfo_elem->mem_type) {
case BFA_MEM_TYPE_KVA:
kva = vmalloc(meminfo_elem->mem_len);
if (kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
memset(kva, 0, meminfo_elem->mem_len);
meminfo_elem->kva = kva;
break;
case BFA_MEM_TYPE_DMA:
kva = dma_alloc_coherent(&bfad->pcidev->dev,
meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
if (kva == NULL) {
bfad_hal_mem_release(bfad);
/*
* If we cannot allocate with default
* num_sgpages try with half the value.
*/
if (num_sgpgs > min_num_sgpgs) {
printk(KERN_INFO
"bfad[%d]: memory allocation failed"
" with num_sgpgs: %d\n",
bfad->inst_no, num_sgpgs);
nextLowerInt(&num_sgpgs);
printk(KERN_INFO
"bfad[%d]: trying to allocate memory"
" with num_sgpgs: %d\n",
bfad->inst_no, num_sgpgs);
retry_count++;
goto retry;
} else {
if (num_sgpgs_parm > 0)
num_sgpgs = num_sgpgs_parm;
else {
reset_value =
(1 << retry_count);
num_sgpgs *= reset_value;
}
rc = BFA_STATUS_ENOMEM;
goto ext;
}
}
if (num_sgpgs_parm > 0)
num_sgpgs = num_sgpgs_parm;
else {
reset_value = (1 << retry_count);
num_sgpgs *= reset_value;
}
memset(kva, 0, meminfo_elem->mem_len);
meminfo_elem->kva = kva;
meminfo_elem->dma = phys_addr;
break;
default:
break;
bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
dma_info = &hal_meminfo->dma_info;
kva_info = &hal_meminfo->kva_info;
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
kva_elem->kva = vmalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
list_for_each(dm_qe, &dma_info->qe) {
dma_elem = (struct bfa_mem_dma_s *) dm_qe;
dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
dma_elem->mem_len,
&phys_addr, GFP_KERNEL);
if (dma_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
dma_elem->dma = phys_addr;
memset(dma_elem->kva, 0, dma_elem->mem_len);
}
ext:
return rc;
......
......@@ -276,21 +276,6 @@ struct bfad_hal_comp {
struct completion comp;
};
/*
* Macro to obtain the immediate lower power
* of two for the integer.
*/
#define nextLowerInt(x) \
do { \
int __i; \
(*x)--; \
for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
(*x) = (*x) | (*x) >> __i; \
(*x)++; \
(*x) = (*x) >> 1; \
} while (0)
#define BFA_LOG(level, bfad, mask, fmt, arg...) \
do { \
if (((mask) == 4) || (level[1] <= '4')) \
......
......@@ -23,6 +23,24 @@
#pragma pack(1)
/* Per dma segment max size */
#define BFI_MEM_DMA_SEG_SZ (131072)
/* Get number of dma segments required */
#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \
((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \
~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
/* Get num dma reqs - that fit in a segment */
#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
/* Get segment num from tag */
#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
/* Get dma req offset in a segment */
#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \
((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
/*
* BFI FW image type
*/
......@@ -46,7 +64,6 @@ struct bfi_mhdr_s {
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
#define bfi_mhdr_2_qid(_m) ((_mh)->mtag.h2i.qid)
#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
(_mh).msg_class = (_mc); \
......@@ -133,6 +150,12 @@ struct bfi_sgpg_s {
u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
};
/* FCP module definitions */
#define BFI_IO_MAX (2000)
#define BFI_IOIM_SNSLEN (256)
#define BFI_IOIM_SNSBUF_SEGS \
BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
/*
* Large Message structure - 128 Bytes size Msgs
*/
......
......@@ -46,10 +46,12 @@ struct bfi_iocfc_cfg_s {
u8 sense_buf_len; /* SCSI sense length */
u16 rsvd_1;
u32 endian_sig; /* endian signature of host */
u8 rsvd_2;
u8 single_msix_vec;
u8 rsvd[2];
__be16 num_ioim_reqs;
__be16 num_fwtio_reqs;
u8 single_msix_vec;
u8 rsvd[3];
/*
* Request and response circular queue base addresses, size and
......@@ -64,7 +66,8 @@ struct bfi_iocfc_cfg_s {
union bfi_addr_u stats_addr; /* DMA-able address for stats */
union bfi_addr_u cfgrsp_addr; /* config response dma address */
union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
/* IO sense buf base addr segments */
struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
};
......@@ -753,7 +756,6 @@ enum bfi_ioim_status {
BFI_IOIM_STS_PATHTOV = 8,
};
#define BFI_IOIM_SNSLEN (256)
/*
* I/O response message
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment