Commit f47451c4 authored by Tejun Heo's avatar Tejun Heo Committed by Jeff Garzik

libata-sff: ata_sff_[dumb_]qc_prep are BMDMA specific

Both qc_prep functions deal only with BMDMA PRD setup and PIO only SFF
drivers don't need them.  Rename to ata_bmdma_[dumb_]qc_prep() and
relocate.

All usages are renamed except for pdc_adma and sata_qstor.  Those two
drivers are not BMDMA drivers and don't need to call BMDMA qc_prep
functions.  Calls to ata_sff_qc_prep() in the two drivers are removed.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent fe06e5f9
...@@ -45,7 +45,7 @@ static struct workqueue_struct *ata_sff_wq; ...@@ -45,7 +45,7 @@ static struct workqueue_struct *ata_sff_wq;
const struct ata_port_operations ata_sff_port_ops = { const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops, .inherits = &ata_base_port_ops,
.qc_prep = ata_sff_qc_prep, .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue, .qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf, .qc_fill_rtf = ata_sff_qc_fill_rtf,
...@@ -70,149 +70,6 @@ const struct ata_port_operations ata_sff_port_ops = { ...@@ -70,149 +70,6 @@ const struct ata_port_operations ata_sff_port_ops = {
}; };
EXPORT_SYMBOL_GPL(ata_sff_port_ops); EXPORT_SYMBOL_GPL(ata_sff_port_ops);
/**
* ata_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
ap->prd[pi].addr = cpu_to_le32(addr);
ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
addr += len;
}
}
ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_fill_sg_dumb - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command. Perform the fill
* so that we avoid writing any length 64K records for
* controllers that don't follow the spec.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len, blen;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
blen = len & 0xffff;
ap->prd[pi].addr = cpu_to_le32(addr);
if (blen == 0) {
/* Some PATA chipsets like the CS5530 can't
cope with 0x0000 meaning 64K as the spec
says */
ap->prd[pi].flags_len = cpu_to_le32(0x8000);
blen = 0x8000;
ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
}
ap->prd[pi].flags_len = cpu_to_le32(blen);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
addr += len;
}
}
ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_sff_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_sff_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
ata_fill_sg(qc);
}
EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
/**
* ata_sff_dumb_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
ata_fill_sg_dumb(qc);
}
EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
/** /**
* ata_sff_check_status - Read device status reg & clear interrupt * ata_sff_check_status - Read device status reg & clear interrupt
* @ap: port where the device is * @ap: port where the device is
...@@ -2760,6 +2617,8 @@ const struct ata_port_operations ata_bmdma_port_ops = { ...@@ -2760,6 +2617,8 @@ const struct ata_port_operations ata_bmdma_port_ops = {
.error_handler = ata_bmdma_error_handler, .error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd, .post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_bmdma_qc_prep,
.bmdma_setup = ata_bmdma_setup, .bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start, .bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop, .bmdma_stop = ata_bmdma_stop,
...@@ -2777,6 +2636,149 @@ const struct ata_port_operations ata_bmdma32_port_ops = { ...@@ -2777,6 +2636,149 @@ const struct ata_port_operations ata_bmdma32_port_ops = {
}; };
EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
/**
* ata_bmdma_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
ap->prd[pi].addr = cpu_to_le32(addr);
ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
addr += len;
}
}
ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command. Perform the fill
* so that we avoid writing any length 64K records for
* controllers that don't follow the spec.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len, blen;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
blen = len & 0xffff;
ap->prd[pi].addr = cpu_to_le32(addr);
if (blen == 0) {
/* Some PATA chipsets like the CS5530 can't
cope with 0x0000 meaning 64K as the spec
says */
ap->prd[pi].flags_len = cpu_to_le32(0x8000);
blen = 0x8000;
ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
}
ap->prd[pi].flags_len = cpu_to_le32(blen);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
addr += len;
}
}
ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_bmdma_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
ata_bmdma_fill_sg(qc);
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
/**
* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
ata_bmdma_fill_sg_dumb(qc);
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
/** /**
* ata_bmdma_error_handler - Stock error handler for BMDMA controller * ata_bmdma_error_handler - Stock error handler for BMDMA controller
* @ap: port to handle error for * @ap: port to handle error for
......
...@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = { ...@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
static struct ata_port_operations atiixp_port_ops = { static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops, .inherits = &ata_bmdma_port_ops,
.qc_prep = ata_sff_dumb_qc_prep, .qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start, .bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop, .bmdma_stop = atiixp_bmdma_stop,
......
...@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = { ...@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
static struct ata_port_operations cs5520_port_ops = { static struct ata_port_operations cs5520_port_ops = {
.inherits = &ata_bmdma_port_ops, .inherits = &ata_bmdma_port_ops,
.qc_prep = ata_sff_dumb_qc_prep, .qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = ata_cable_40wire, .cable_detect = ata_cable_40wire,
.set_piomode = cs5520_set_piomode, .set_piomode = cs5520_set_piomode,
}; };
......
...@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = { ...@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
static struct ata_port_operations cs5530_port_ops = { static struct ata_port_operations cs5530_port_ops = {
.inherits = &ata_bmdma_port_ops, .inherits = &ata_bmdma_port_ops,
.qc_prep = ata_sff_dumb_qc_prep, .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = cs5530_qc_issue, .qc_issue = cs5530_qc_issue,
.cable_detect = ata_cable_40wire, .cable_detect = ata_cable_40wire,
......
...@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = { ...@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
static struct ata_port_operations sc1200_port_ops = { static struct ata_port_operations sc1200_port_ops = {
.inherits = &ata_bmdma_port_ops, .inherits = &ata_bmdma_port_ops,
.qc_prep = ata_sff_dumb_qc_prep, .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = sc1200_qc_issue, .qc_issue = sc1200_qc_issue,
.qc_defer = sc1200_qc_defer, .qc_defer = sc1200_qc_defer,
.cable_detect = ata_cable_40wire, .cable_detect = ata_cable_40wire,
......
...@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) ...@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
adma_enter_reg_mode(qc->ap); adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA) { if (qc->tf.protocol != ATA_PROT_DMA)
ata_sff_qc_prep(qc);
return; return;
}
buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */ buf[i++] = 0; /* reserved */
......
...@@ -1409,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) ...@@ -1409,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP)); (qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap); nv_adma_register_mode(qc->ap);
ata_sff_qc_prep(qc); ata_bmdma_qc_prep(qc);
return; return;
} }
...@@ -2012,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap) ...@@ -2012,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{ {
if (qc->tf.protocol != ATA_PROT_NCQ) { if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_sff_qc_prep(qc); ata_bmdma_qc_prep(qc);
return; return;
} }
......
...@@ -303,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) ...@@ -303,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
qs_enter_reg_mode(qc->ap); qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA) { if (qc->tf.protocol != ATA_PROT_DMA)
ata_sff_qc_prep(qc);
return; return;
}
nelem = qs_fill_sg(qc); nelem = qs_fill_sg(qc);
......
...@@ -1570,8 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops; ...@@ -1570,8 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \ .sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY .dma_boundary = ATA_DMA_BOUNDARY
extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap); extern u8 ata_sff_check_status(struct ata_port *ap);
extern void ata_sff_pause(struct ata_port *ap); extern void ata_sff_pause(struct ata_port *ap);
...@@ -1628,6 +1626,8 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev, ...@@ -1628,6 +1626,8 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv, int hflags); struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_bmdma_error_handler(struct ata_port *ap); extern void ata_bmdma_error_handler(struct ata_port *ap);
extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
extern void ata_bmdma_setup(struct ata_queued_cmd *qc); extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment