Commit e3e7d3ee authored by Maggie Zhang's avatar Maggie Zhang Committed by James Bottomley

[SCSI] bfa: scatter gather processing change

Modified scatter gather processing to use the kernel provided
scsi_for_each_sg() macro.

1) Instead of allocating and setting up sgpg in bfa_ioim_sge_setup(),
   we only do allocation.  As a result, we remove
   bfa_ioim_sgpg_setup() and rename bfa_ioim_sge_setup() to
   bfa_ioim_sgpg_alloc().

2) bfa_ioim_send_ioreq() call scsi_for_each_sg() to handle both inline
   and sgpg setup.
Signed-off-by: default avatarMaggie Zhang <xmzhang@brocade.com>
Signed-off-by: default avatarJing Huang <huangj@brocade.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent b77ee1fb
...@@ -218,8 +218,7 @@ static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, ...@@ -218,8 +218,7 @@ static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
* forward declaration for BFA IOIM functions * forward declaration for BFA IOIM functions
*/ */
static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim); static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
...@@ -1621,7 +1620,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) ...@@ -1621,7 +1620,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
} }
if (ioim->nsges > BFI_SGE_INLINE) { if (ioim->nsges > BFI_SGE_INLINE) {
if (!bfa_ioim_sge_setup(ioim)) { if (!bfa_ioim_sgpg_alloc(ioim)) {
bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
return; return;
} }
...@@ -2304,7 +2303,7 @@ bfa_ioim_sgpg_alloced(void *cbarg) ...@@ -2304,7 +2303,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
bfa_ioim_sgpg_setup(ioim); ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
} }
...@@ -2317,12 +2316,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) ...@@ -2317,12 +2316,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
struct bfa_itnim_s *itnim = ioim->itnim; struct bfa_itnim_s *itnim = ioim->itnim;
struct bfi_ioim_req_s *m; struct bfi_ioim_req_s *m;
static struct fcp_cmnd_s cmnd_z0 = { 0 }; static struct fcp_cmnd_s cmnd_z0 = { 0 };
struct bfi_sge_s *sge; struct bfi_sge_s *sge, *sgpge;
u32 pgdlen = 0; u32 pgdlen = 0;
u32 fcp_dl; u32 fcp_dl;
u64 addr; u64 addr;
struct scatterlist *sg; struct scatterlist *sg;
struct bfa_sgpg_s *sgpg;
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
u32 i, sge_id, pgcumsz;
/* /*
* check for room in queue to send request now * check for room in queue to send request now
...@@ -2342,20 +2343,59 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) ...@@ -2342,20 +2343,59 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
m->rport_hdl = ioim->itnim->rport->fw_handle; m->rport_hdl = ioim->itnim->rport->fw_handle;
m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
/*
* build inline IO SG element here
*/
sge = &m->sges[0]; sge = &m->sges[0];
if (ioim->nsges) { sgpg = ioim->sgpg;
sg = (struct scatterlist *)scsi_sglist(cmnd); sge_id = 0;
addr = bfa_os_sgaddr(sg_dma_address(sg)); sgpge = NULL;
sge->sga = *(union bfi_addr_u *) &addr; pgcumsz = 0;
pgdlen = sg_dma_len(sg); scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
sge->sg_len = pgdlen; if (i == 0) {
sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? /* build inline IO SG element */
addr = bfa_os_sgaddr(sg_dma_address(sg));
sge->sga = *(union bfi_addr_u *) &addr;
pgdlen = sg_dma_len(sg);
sge->sg_len = pgdlen;
sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
bfa_sge_to_be(sge); bfa_sge_to_be(sge);
sge++; sge++;
} else {
if (sge_id == 0)
sgpge = sgpg->sgpg->sges;
addr = bfa_os_sgaddr(sg_dma_address(sg));
sgpge->sga = *(union bfi_addr_u *) &addr;
sgpge->sg_len = sg_dma_len(sg);
pgcumsz += sgpge->sg_len;
/* set flags */
if (i < (ioim->nsges - 1) &&
sge_id < (BFI_SGPG_DATA_SGES - 1))
sgpge->flags = BFI_SGE_DATA;
else if (i < (ioim->nsges - 1))
sgpge->flags = BFI_SGE_DATA_CPL;
else
sgpge->flags = BFI_SGE_DATA_LAST;
bfa_sge_to_le(sgpge);
sgpge++;
if (i == (ioim->nsges - 1)) {
sgpge->flags = BFI_SGE_PGDLEN;
sgpge->sga.a32.addr_lo = 0;
sgpge->sga.a32.addr_hi = 0;
sgpge->sg_len = pgcumsz;
bfa_sge_to_le(sgpge);
} else if (++sge_id == BFI_SGPG_DATA_SGES) {
sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
sgpge->flags = BFI_SGE_LINK;
sgpge->sga = sgpg->sgpg_pa;
sgpge->sg_len = pgcumsz;
bfa_sge_to_le(sgpge);
sge_id = 0;
pgcumsz = 0;
}
}
} }
if (ioim->nsges > BFI_SGE_INLINE) { if (ioim->nsges > BFI_SGE_INLINE) {
...@@ -2414,7 +2454,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) ...@@ -2414,7 +2454,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
* at queuing time. * at queuing time.
*/ */
static bfa_boolean_t static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
{ {
u16 nsgpgs; u16 nsgpgs;
...@@ -2434,73 +2474,11 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) ...@@ -2434,73 +2474,11 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
} }
ioim->nsgpgs = nsgpgs; ioim->nsgpgs = nsgpgs;
bfa_ioim_sgpg_setup(ioim); ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
return BFA_TRUE; return BFA_TRUE;
} }
static void
bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
{
int sgeid, nsges, i;
struct bfi_sge_s *sge;
struct bfa_sgpg_s *sgpg;
u32 pgcumsz;
u64 addr;
struct scatterlist *sg;
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
sgeid = BFI_SGE_INLINE;
ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
sg = scsi_sglist(cmnd);
sg = sg_next(sg);
do {
sge = sgpg->sgpg->sges;
nsges = ioim->nsges - sgeid;
if (nsges > BFI_SGPG_DATA_SGES)
nsges = BFI_SGPG_DATA_SGES;
pgcumsz = 0;
for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
addr = bfa_os_sgaddr(sg_dma_address(sg));
sge->sga = *(union bfi_addr_u *) &addr;
sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
/*
* set flags
*/
if (i < (nsges - 1))
sge->flags = BFI_SGE_DATA;
else if (sgeid < (ioim->nsges - 1))
sge->flags = BFI_SGE_DATA_CPL;
else
sge->flags = BFI_SGE_DATA_LAST;
bfa_sge_to_le(sge);
}
sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
/*
* set the link element of each page
*/
if (sgeid == ioim->nsges) {
sge->flags = BFI_SGE_PGDLEN;
sge->sga.a32.addr_lo = 0;
sge->sga.a32.addr_hi = 0;
} else {
sge->flags = BFI_SGE_LINK;
sge->sga = sgpg->sgpg_pa;
}
sge->sg_len = pgcumsz;
bfa_sge_to_le(sge);
} while (sgeid < ioim->nsges);
}
/* /*
* Send I/O abort request to firmware. * Send I/O abort request to firmware.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment