Commit 945fb655 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bart.bkbits.net/ide-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents a9ce9706 01b81c2d
...@@ -1172,8 +1172,7 @@ PIO drivers (or drivers that need to revert to PIO transfer once in a ...@@ -1172,8 +1172,7 @@ PIO drivers (or drivers that need to revert to PIO transfer once in a
while (IDE for example)), where the CPU is doing the actual data while (IDE for example)), where the CPU is doing the actual data
transfer a virtual mapping is needed. If the driver supports highmem I/O, transfer a virtual mapping is needed. If the driver supports highmem I/O,
(Sec 1.1, (ii) ) it needs to use __bio_kmap_atomic and bio_kmap_irq to (Sec 1.1, (ii) ) it needs to use __bio_kmap_atomic and bio_kmap_irq to
temporarily map a bio into the virtual address space. See how IDE handles temporarily map a bio into the virtual address space.
this with ide_map_buffer.
8. Prior/Related/Impacted patches 8. Prior/Related/Impacted patches
......
...@@ -297,8 +297,10 @@ static int e100_dma_setup(ide_drive_t *drive) ...@@ -297,8 +297,10 @@ static int e100_dma_setup(ide_drive_t *drive)
} }
/* set up the Etrax DMA descriptors */ /* set up the Etrax DMA descriptors */
if (e100_ide_build_dmatable(drive)) if (e100_ide_build_dmatable(drive)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
return 0; return 0;
} }
......
...@@ -206,8 +206,6 @@ static void icside_maskproc(ide_drive_t *drive, int mask) ...@@ -206,8 +206,6 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
* here, but we rely on the main IDE driver spotting that both * here, but we rely on the main IDE driver spotting that both
* interfaces use the same IRQ, which should guarantee this. * interfaces use the same IRQ, which should guarantee this.
*/ */
#define NR_ENTRIES 256
#define TABLE_SIZE (NR_ENTRIES * 8)
static void icside_build_sglist(ide_drive_t *drive, struct request *rq) static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
...@@ -527,7 +525,7 @@ static int icside_dma_lostirq(ide_drive_t *drive) ...@@ -527,7 +525,7 @@ static int icside_dma_lostirq(ide_drive_t *drive)
return 1; return 1;
} }
static int icside_dma_init(ide_hwif_t *hwif) static void icside_dma_init(ide_hwif_t *hwif)
{ {
int autodma = 0; int autodma = 0;
...@@ -537,11 +535,6 @@ static int icside_dma_init(ide_hwif_t *hwif) ...@@ -537,11 +535,6 @@ static int icside_dma_init(ide_hwif_t *hwif)
printk(" %s: SG-DMA", hwif->name); printk(" %s: SG-DMA", hwif->name);
hwif->sg_table = kmalloc(sizeof(struct scatterlist) * NR_ENTRIES,
GFP_KERNEL);
if (!hwif->sg_table)
goto failed;
hwif->atapi_dma = 1; hwif->atapi_dma = 1;
hwif->mwdma_mask = 7; /* MW0..2 */ hwif->mwdma_mask = 7; /* MW0..2 */
hwif->swdma_mask = 7; /* SW0..2 */ hwif->swdma_mask = 7; /* SW0..2 */
...@@ -569,24 +562,9 @@ static int icside_dma_init(ide_hwif_t *hwif) ...@@ -569,24 +562,9 @@ static int icside_dma_init(ide_hwif_t *hwif)
hwif->drives[1].autodma = hwif->autodma; hwif->drives[1].autodma = hwif->autodma;
printk(" capable%s\n", hwif->autodma ? ", auto-enable" : ""); printk(" capable%s\n", hwif->autodma ? ", auto-enable" : "");
return 1;
failed:
printk(" disabled, unable to allocate DMA table\n");
return 0;
}
static void icside_dma_exit(ide_hwif_t *hwif)
{
if (hwif->sg_table) {
kfree(hwif->sg_table);
hwif->sg_table = NULL;
}
} }
#else #else
#define icside_dma_init(hwif) (0) #define icside_dma_init(hwif) (0)
#define icside_dma_exit(hwif) do { } while (0)
#endif #endif
static ide_hwif_t *icside_find_hwif(unsigned long dataport) static ide_hwif_t *icside_find_hwif(unsigned long dataport)
...@@ -811,9 +789,6 @@ static void __devexit icside_remove(struct expansion_card *ec) ...@@ -811,9 +789,6 @@ static void __devexit icside_remove(struct expansion_card *ec)
case ICS_TYPE_V6: case ICS_TYPE_V6:
/* FIXME: tell IDE to stop using the interface */ /* FIXME: tell IDE to stop using the interface */
icside_dma_exit(state->hwif[1]);
icside_dma_exit(state->hwif[0]);
if (ec->dma != NO_DMA) if (ec->dma != NO_DMA)
free_dma(ec->dma); free_dma(ec->dma);
......
...@@ -122,216 +122,6 @@ static int lba_capacity_is_ok (struct hd_driveid *id) ...@@ -122,216 +122,6 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
#ifndef CONFIG_IDE_TASKFILE_IO #ifndef CONFIG_IDE_TASKFILE_IO
/*
* read_intr() is the handler for disk read/multread interrupts
*/
static ide_startstop_t read_intr (ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u32 i = 0, nsect = 0, msect = drive->mult_count;
struct request *rq;
unsigned long flags;
u8 stat;
char *to;
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat=hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
if (stat & (ERR_STAT|DRQ_STAT)) {
return DRIVER(drive)->error(drive, "read_intr", stat);
}
/* no data yet, so wait for another interrupt */
ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started;
}
read_next:
rq = HWGROUP(drive)->rq;
if (msect) {
if ((nsect = rq->current_nr_sectors) > msect)
nsect = msect;
msect -= nsect;
} else
nsect = 1;
to = ide_map_buffer(rq, &flags);
taskfile_input_data(drive, to, nsect * SECTOR_WORDS);
#ifdef DEBUG
printk("%s: read: sectors(%ld-%ld), buffer=0x%08lx, remaining=%ld\n",
drive->name, rq->sector, rq->sector+nsect-1,
(unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
#endif
ide_unmap_buffer(rq, to, &flags);
rq->sector += nsect;
rq->errors = 0;
i = (rq->nr_sectors -= nsect);
if (((long)(rq->current_nr_sectors -= nsect)) <= 0)
ide_end_request(drive, 1, rq->hard_cur_sectors);
/*
* Another BH Page walker and DATA INTEGRITY Questioned on ERROR.
* If passed back up on multimode read, BAD DATA could be ACKED
* to FILE SYSTEMS above ...
*/
if (i > 0) {
if (msect)
goto read_next;
ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started;
}
return ide_stopped;
}
/*
* write_intr() is the handler for disk write interrupts
*/
static ide_startstop_t write_intr (ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq = hwgroup->rq;
u32 i = 0;
u8 stat;
if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),
DRIVE_READY, drive->bad_wstat)) {
printk("%s: write_intr error1: nr_sectors=%ld, stat=0x%02x\n",
drive->name, rq->nr_sectors, stat);
} else {
#ifdef DEBUG
printk("%s: write: sector %ld, buffer=0x%08lx, remaining=%ld\n",
drive->name, rq->sector, (unsigned long) rq->buffer,
rq->nr_sectors-1);
#endif
if ((rq->nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
rq->sector++;
rq->errors = 0;
i = --rq->nr_sectors;
--rq->current_nr_sectors;
if (((long)rq->current_nr_sectors) <= 0)
ide_end_request(drive, 1, rq->hard_cur_sectors);
if (i > 0) {
unsigned long flags;
char *to = ide_map_buffer(rq, &flags);
taskfile_output_data(drive, to, SECTOR_WORDS);
ide_unmap_buffer(rq, to, &flags);
ide_set_handler(drive, &write_intr, WAIT_CMD, NULL);
return ide_started;
}
return ide_stopped;
}
/* the original code did this here (?) */
return ide_stopped;
}
return DRIVER(drive)->error(drive, "write_intr", stat);
}
/*
* ide_multwrite() transfers a block of up to mcount sectors of data
* to a drive as part of a disk multiple-sector write operation.
*
* Note that we may be called from two contexts - __ide_do_rw_disk() context
* and IRQ context. The IRQ can happen any time after we've output the
* full "mcount" number of sectors, so we must make sure we update the
* state _before_ we output the final part of the data!
*
* The update and return to BH is a BLOCK Layer Fakey to get more data
* to satisfy the hardware atomic segment. If the hardware atomic segment
* is shorter or smaller than the BH segment then we should be OKAY.
* This is only valid if we can rewind the rq->current_nr_sectors counter.
*/
static void ide_multwrite(ide_drive_t *drive, unsigned int mcount)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
struct request *rq = &hwgroup->wrq;
do {
char *buffer;
int nsect = rq->current_nr_sectors;
unsigned long flags;
if (nsect > mcount)
nsect = mcount;
mcount -= nsect;
buffer = ide_map_buffer(rq, &flags);
rq->sector += nsect;
rq->nr_sectors -= nsect;
rq->current_nr_sectors -= nsect;
/* Do we move to the next bh after this? */
if (!rq->current_nr_sectors) {
struct bio *bio = rq->bio;
/*
* only move to next bio, when we have processed
* all bvecs in this one.
*/
if (++bio->bi_idx >= bio->bi_vcnt) {
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
bio = bio->bi_next;
}
/* end early early we ran out of requests */
if (!bio) {
mcount = 0;
} else {
rq->bio = bio;
rq->nr_cbio_segments = bio_segments(bio);
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
}
}
/*
* Ok, we're all setup for the interrupt
* re-entering us on the last transfer.
*/
taskfile_output_data(drive, buffer, nsect<<7);
ide_unmap_buffer(rq, buffer, &flags);
} while (mcount);
}
/*
* multwrite_intr() is the handler for disk multwrite interrupts
*/
static ide_startstop_t multwrite_intr (ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq = &hwgroup->wrq;
struct bio *bio = rq->bio;
u8 stat;
stat = hwif->INB(IDE_STATUS_REG);
if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) {
if (stat & DRQ_STAT) {
/*
* The drive wants data. Remember rq is the copy
* of the request
*/
if (rq->nr_sectors) {
ide_multwrite(drive, drive->mult_count);
ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL);
return ide_started;
}
} else {
/*
* If the copy has all the blocks completed then
* we can end the original request.
*/
if (!rq->nr_sectors) { /* all done? */
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
rq = hwgroup->rq;
ide_end_request(drive, 1, rq->nr_sectors);
return ide_stopped;
}
}
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
/* the original code did this here (?) */
return ide_stopped;
}
bio->bi_idx = bio->bi_vcnt - rq->nr_cbio_segments;
return DRIVER(drive)->error(drive, "multwrite_intr", stat);
}
/* /*
* __ide_do_rw_disk() issues READ and WRITE commands to a disk, * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
* using LBA if supported, or CHS otherwise, to address sectors. * using LBA if supported, or CHS otherwise, to address sectors.
...@@ -352,6 +142,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector ...@@ -352,6 +142,11 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
dma = 0; dma = 0;
} }
if (!dma) {
ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
}
if (IDE_CONTROL_REG) if (IDE_CONTROL_REG)
hwif->OUTB(drive->ctl, IDE_CONTROL_REG); hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
...@@ -435,44 +230,33 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector ...@@ -435,44 +230,33 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
return ide_started; return ide_started;
} }
/* fallback to PIO */ /* fallback to PIO */
ide_init_sg_cmd(drive, rq);
} }
if (rq_data_dir(rq) == READ) { if (rq_data_dir(rq) == READ) {
command = ((drive->mult_count) ?
((lba48) ? WIN_MULTREAD_EXT : WIN_MULTREAD) :
((lba48) ? WIN_READ_EXT : WIN_READ));
ide_execute_command(drive, command, &read_intr, WAIT_CMD, NULL);
return ide_started;
} else {
ide_startstop_t startstop;
command = ((drive->mult_count) ?
((lba48) ? WIN_MULTWRITE_EXT : WIN_MULTWRITE) :
((lba48) ? WIN_WRITE_EXT : WIN_WRITE));
hwif->OUTB(command, IDE_COMMAND_REG);
if (ide_wait_stat(&startstop, drive, DATA_READY,
drive->bad_wstat, WAIT_DRQ)) {
printk(KERN_ERR "%s: no DRQ after issuing %s\n",
drive->name,
drive->mult_count ? "MULTWRITE" : "WRITE");
return startstop;
}
if (!drive->unmask)
local_irq_disable();
if (drive->mult_count) { if (drive->mult_count) {
ide_hwgroup_t *hwgroup = HWGROUP(drive); hwif->data_phase = TASKFILE_MULTI_IN;
command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD;
} else {
hwif->data_phase = TASKFILE_IN;
command = lba48 ? WIN_READ_EXT : WIN_READ;
}
hwgroup->wrq = *rq; /* scratchpad */ ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL);
ide_set_handler(drive, &multwrite_intr, WAIT_CMD, NULL); return ide_started;
ide_multwrite(drive, drive->mult_count); } else {
if (drive->mult_count) {
hwif->data_phase = TASKFILE_MULTI_OUT;
command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE;
} else { } else {
unsigned long flags; hwif->data_phase = TASKFILE_OUT;
char *to = ide_map_buffer(rq, &flags); command = lba48 ? WIN_WRITE_EXT : WIN_WRITE;
ide_set_handler(drive, &write_intr, WAIT_CMD, NULL);
taskfile_output_data(drive, to, SECTOR_WORDS);
ide_unmap_buffer(rq, to, &flags);
} }
hwif->OUTB(command, IDE_COMMAND_REG);
pre_task_out_intr(drive, rq);
return ide_started; return ide_started;
} }
} }
...@@ -516,6 +300,11 @@ static u8 get_command(ide_drive_t *drive, struct request *rq, ide_task_t *task) ...@@ -516,6 +300,11 @@ static u8 get_command(ide_drive_t *drive, struct request *rq, ide_task_t *task)
dma = 0; dma = 0;
} }
if (!dma) {
ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
}
if (rq_data_dir(rq) == READ) { if (rq_data_dir(rq) == READ) {
task->command_type = IDE_DRIVE_TASK_IN; task->command_type = IDE_DRIVE_TASK_IN;
if (dma) if (dma)
...@@ -779,10 +568,6 @@ ide_startstop_t idedisk_error (ide_drive_t *drive, const char *msg, u8 stat) ...@@ -779,10 +568,6 @@ ide_startstop_t idedisk_error (ide_drive_t *drive, const char *msg, u8 stat)
ide_end_drive_cmd(drive, stat, err); ide_end_drive_cmd(drive, stat, err);
return ide_stopped; return ide_stopped;
} }
#ifdef CONFIG_IDE_TASKFILE_IO
/* make rq completion pointers new submission pointers */
blk_rq_prep_restart(rq);
#endif
if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
/* other bits are useless when BUSY */ /* other bits are useless when BUSY */
......
...@@ -610,8 +610,10 @@ int ide_dma_setup(ide_drive_t *drive) ...@@ -610,8 +610,10 @@ int ide_dma_setup(ide_drive_t *drive)
reading = 1 << 3; reading = 1 << 3;
/* fall back to pio! */ /* fall back to pio! */
if (!ide_build_dmatable(drive, rq)) if (!ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
/* PRD table */ /* PRD table */
hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable); hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
...@@ -810,10 +812,6 @@ int ide_release_dma_engine (ide_hwif_t *hwif) ...@@ -810,10 +812,6 @@ int ide_release_dma_engine (ide_hwif_t *hwif)
hwif->dmatable_dma); hwif->dmatable_dma);
hwif->dmatable_cpu = NULL; hwif->dmatable_cpu = NULL;
} }
if (hwif->sg_table) {
kfree(hwif->sg_table);
hwif->sg_table = NULL;
}
return 1; return 1;
} }
...@@ -846,15 +844,12 @@ int ide_allocate_dma_engine (ide_hwif_t *hwif) ...@@ -846,15 +844,12 @@ int ide_allocate_dma_engine (ide_hwif_t *hwif)
hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev, hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
PRD_ENTRIES * PRD_BYTES, PRD_ENTRIES * PRD_BYTES,
&hwif->dmatable_dma); &hwif->dmatable_dma);
hwif->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
GFP_KERNEL);
if ((hwif->dmatable_cpu) && (hwif->sg_table)) if (hwif->dmatable_cpu)
return 0; return 0;
printk(KERN_ERR "%s: -- Error, unable to allocate%s%s table(s).\n", printk(KERN_ERR "%s: -- Error, unable to allocate%s DMA table(s).\n",
(hwif->dmatable_cpu == NULL) ? " CPU" : "", (hwif->dmatable_cpu == NULL) ? " CPU" : "",
(hwif->sg_table == NULL) ? " SG DMA" : " DMA",
hwif->cds->name); hwif->cds->name);
ide_release_dma_engine(hwif); ide_release_dma_engine(hwif);
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -674,6 +675,31 @@ ide_startstop_t do_special (ide_drive_t *drive) ...@@ -674,6 +675,31 @@ ide_startstop_t do_special (ide_drive_t *drive)
EXPORT_SYMBOL(do_special); EXPORT_SYMBOL(do_special);
void ide_map_sg(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) {
hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
} else {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
hwif->sg_nents = 1;
}
}
EXPORT_SYMBOL_GPL(ide_map_sg);
void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = drive->hwif;
hwif->nsect = hwif->nleft = rq->nr_sectors;
hwif->cursg = hwif->cursg_ofs = 0;
}
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
/** /**
* execute_drive_command - issue special drive command * execute_drive_command - issue special drive command
* @drive: the drive to issue th command on * @drive: the drive to issue th command on
...@@ -697,6 +723,17 @@ ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) ...@@ -697,6 +723,17 @@ ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq)
hwif->data_phase = args->data_phase; hwif->data_phase = args->data_phase;
switch (hwif->data_phase) {
case TASKFILE_MULTI_OUT:
case TASKFILE_OUT:
case TASKFILE_MULTI_IN:
case TASKFILE_IN:
ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
default:
break;
}
if (args->tf_out_flags.all != 0) if (args->tf_out_flags.all != 0)
return flagged_taskfile(drive, args); return flagged_taskfile(drive, args);
return do_rw_taskfile(drive, args); return do_rw_taskfile(drive, args);
......
...@@ -1246,6 +1246,16 @@ static int hwif_init(ide_hwif_t *hwif) ...@@ -1246,6 +1246,16 @@ static int hwif_init(ide_hwif_t *hwif)
if (register_blkdev(hwif->major, hwif->name)) if (register_blkdev(hwif->major, hwif->name))
return 0; return 0;
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
}
if (alloc_disks(hwif) < 0) if (alloc_disks(hwif) < 0)
goto out; goto out;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2001-2002 Klaus Smolin * Copyright (C) 2001-2002 Klaus Smolin
* IBM Storage Technology Division * IBM Storage Technology Division
* Copyright (C) 2003 Bartlomiej Zolnierkiewicz * Copyright (C) 2003-2004 Bartlomiej Zolnierkiewicz
* *
* The big the bad and the ugly. * The big the bad and the ugly.
* *
...@@ -253,73 +253,6 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive) ...@@ -253,73 +253,6 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
EXPORT_SYMBOL(task_no_data_intr); EXPORT_SYMBOL(task_no_data_intr);
static void task_buffer_sectors(ide_drive_t *drive, struct request *rq,
unsigned nsect, unsigned rw)
{
char *buf = rq->buffer + blk_rq_offset(rq);
rq->sector += nsect;
rq->current_nr_sectors -= nsect;
rq->nr_sectors -= nsect;
__task_sectors(drive, buf, nsect, rw);
}
static inline void task_buffer_multi_sectors(ide_drive_t *drive,
struct request *rq, unsigned rw)
{
unsigned int msect = drive->mult_count, nsect;
nsect = rq->current_nr_sectors;
if (nsect > msect)
nsect = msect;
task_buffer_sectors(drive, rq, nsect, rw);
}
#ifdef CONFIG_IDE_TASKFILE_IO
static void task_sectors(ide_drive_t *drive, struct request *rq,
unsigned nsect, unsigned rw)
{
if (rq->cbio) { /* fs request */
rq->errors = 0;
task_bio_sectors(drive, rq, nsect, rw);
} else /* task request */
task_buffer_sectors(drive, rq, nsect, rw);
}
static inline void task_bio_multi_sectors(ide_drive_t *drive,
struct request *rq, unsigned rw)
{
unsigned int nsect, msect = drive->mult_count;
do {
nsect = rq->current_nr_sectors;
if (nsect > msect)
nsect = msect;
task_bio_sectors(drive, rq, nsect, rw);
if (!rq->nr_sectors)
msect = 0;
else
msect -= nsect;
} while (msect);
}
static void task_multi_sectors(ide_drive_t *drive,
struct request *rq, unsigned rw)
{
if (rq->cbio) { /* fs request */
rq->errors = 0;
task_bio_multi_sectors(drive, rq, rw);
} else /* task request */
task_buffer_multi_sectors(drive, rq, rw);
}
#else
# define task_sectors(d, rq, nsect, rw) task_buffer_sectors(d, rq, nsect, rw)
# define task_multi_sectors(d, rq, rw) task_buffer_multi_sectors(d, rq, rw)
#endif /* CONFIG_IDE_TASKFILE_IO */
static u8 wait_drive_not_busy(ide_drive_t *drive) static u8 wait_drive_not_busy(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
...@@ -340,37 +273,86 @@ static u8 wait_drive_not_busy(ide_drive_t *drive) ...@@ -340,37 +273,86 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
return stat; return stat;
} }
static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
struct page *page;
#ifdef CONFIG_HIGHMEM
unsigned long flags;
#endif
u8 *buf;
page = sg[hwif->cursg].page;
#ifdef CONFIG_HIGHMEM
local_irq_save(flags);
#endif
buf = kmap_atomic(page, KM_BIO_SRC_IRQ) +
sg[hwif->cursg].offset + (hwif->cursg_ofs * SECTOR_SIZE);
hwif->nleft--;
hwif->cursg_ofs++;
if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
hwif->cursg++;
hwif->cursg_ofs = 0;
}
/* do the actual data transfer */
if (write)
taskfile_output_data(drive, buf, SECTOR_WORDS);
else
taskfile_input_data(drive, buf, SECTOR_WORDS);
kunmap_atomic(page, KM_BIO_SRC_IRQ);
#ifdef CONFIG_HIGHMEM
local_irq_restore(flags);
#endif
}
static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
{
unsigned int nsect;
nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
while (nsect--)
ide_pio_sector(drive, write);
}
static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq, static inline void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
unsigned int write) unsigned int write)
{ {
if (rq->bio) /* fs request */
rq->errors = 0;
switch (drive->hwif->data_phase) { switch (drive->hwif->data_phase) {
case TASKFILE_MULTI_IN: case TASKFILE_MULTI_IN:
case TASKFILE_MULTI_OUT: case TASKFILE_MULTI_OUT:
task_multi_sectors(drive, rq, write); ide_pio_multi(drive, write);
break; break;
default: default:
task_sectors(drive, rq, 1, write); ide_pio_sector(drive, write);
break; break;
} }
} }
#ifdef CONFIG_IDE_TASKFILE_IO
static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
const char *s, u8 stat) const char *s, u8 stat)
{ {
if (rq->bio) { if (rq->bio) {
int sectors = rq->hard_nr_sectors - rq->nr_sectors; ide_hwif_t *hwif = drive->hwif;
int sectors = hwif->nsect - hwif->nleft;
switch (drive->hwif->data_phase) { switch (hwif->data_phase) {
case TASKFILE_IN: case TASKFILE_IN:
if (rq->nr_sectors) if (hwif->nleft)
break; break;
/* fall through */ /* fall through */
case TASKFILE_OUT: case TASKFILE_OUT:
sectors--; sectors--;
break; break;
case TASKFILE_MULTI_IN: case TASKFILE_MULTI_IN:
if (rq->nr_sectors) if (hwif->nleft)
break; break;
/* fall through */ /* fall through */
case TASKFILE_MULTI_OUT: case TASKFILE_MULTI_OUT:
...@@ -384,9 +366,6 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, ...@@ -384,9 +366,6 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
} }
return drive->driver->error(drive, s, stat); return drive->driver->error(drive, s, stat);
} }
#else
# define task_error(d, rq, s, stat) drive->driver->error(d, s, stat)
#endif
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{ {
...@@ -407,9 +386,11 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) ...@@ -407,9 +386,11 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
*/ */
ide_startstop_t task_in_intr (ide_drive_t *drive) ide_startstop_t task_in_intr (ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq; struct request *rq = HWGROUP(drive)->rq;
u8 stat = HWIF(drive)->INB(IDE_STATUS_REG); u8 stat = hwif->INB(IDE_STATUS_REG);
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) { if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
if (stat & (ERR_STAT | DRQ_STAT)) if (stat & (ERR_STAT | DRQ_STAT))
return task_error(drive, rq, __FUNCTION__, stat); return task_error(drive, rq, __FUNCTION__, stat);
...@@ -421,7 +402,7 @@ ide_startstop_t task_in_intr (ide_drive_t *drive) ...@@ -421,7 +402,7 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
ide_pio_datablock(drive, rq, 0); ide_pio_datablock(drive, rq, 0);
/* If it was the last datablock check status and finish transfer. */ /* If it was the last datablock check status and finish transfer. */
if (!rq->nr_sectors) { if (!hwif->nleft) {
stat = wait_drive_not_busy(drive); stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_R_STAT)) if (!OK_STAT(stat, 0, BAD_R_STAT))
return task_error(drive, rq, __FUNCTION__, stat); return task_error(drive, rq, __FUNCTION__, stat);
...@@ -441,18 +422,18 @@ EXPORT_SYMBOL(task_in_intr); ...@@ -441,18 +422,18 @@ EXPORT_SYMBOL(task_in_intr);
*/ */
ide_startstop_t task_out_intr (ide_drive_t *drive) ide_startstop_t task_out_intr (ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq; struct request *rq = HWGROUP(drive)->rq;
u8 stat; u8 stat = hwif->INB(IDE_STATUS_REG);
stat = HWIF(drive)->INB(IDE_STATUS_REG);
if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
return task_error(drive, rq, __FUNCTION__, stat); return task_error(drive, rq, __FUNCTION__, stat);
/* Deal with unexpected ATA data phase. */ /* Deal with unexpected ATA data phase. */
if (((stat & DRQ_STAT) == 0) ^ !rq->nr_sectors) if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
return task_error(drive, rq, __FUNCTION__, stat); return task_error(drive, rq, __FUNCTION__, stat);
if (!rq->nr_sectors) { if (!hwif->nleft) {
task_end_request(drive, rq, stat); task_end_request(drive, rq, stat);
return ide_stopped; return ide_stopped;
} }
......
...@@ -712,6 +712,8 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) ...@@ -712,6 +712,8 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
hwif->INSW = tmp_hwif->INSW; hwif->INSW = tmp_hwif->INSW;
hwif->INSL = tmp_hwif->INSL; hwif->INSL = tmp_hwif->INSL;
hwif->sg_max_nents = tmp_hwif->sg_max_nents;
hwif->mmio = tmp_hwif->mmio; hwif->mmio = tmp_hwif->mmio;
hwif->rqsize = tmp_hwif->rqsize; hwif->rqsize = tmp_hwif->rqsize;
hwif->no_lba48 = tmp_hwif->no_lba48; hwif->no_lba48 = tmp_hwif->no_lba48;
...@@ -900,6 +902,7 @@ void ide_unregister(unsigned int index) ...@@ -900,6 +902,7 @@ void ide_unregister(unsigned int index)
hwif->drives[i].disk = NULL; hwif->drives[i].disk = NULL;
put_disk(disk); put_disk(disk);
} }
kfree(hwif->sg_table);
unregister_blkdev(hwif->major, hwif->name); unregister_blkdev(hwif->major, hwif->name);
spin_lock_irq(&ide_lock); spin_lock_irq(&ide_lock);
......
...@@ -404,11 +404,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) ...@@ -404,11 +404,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
if (!hwif->dmatable_cpu) if (!hwif->dmatable_cpu)
goto dma_alloc_failure; goto dma_alloc_failure;
hwif->sg_table = hwif->sg_max_nents = IOC4_PRD_ENTRIES;
kmalloc(sizeof (struct scatterlist) * IOC4_PRD_ENTRIES, GFP_KERNEL);
if (!hwif->sg_table)
goto dma_sgalloc_failure;
hwif->dma_base2 = (unsigned long) hwif->dma_base2 = (unsigned long)
pci_alloc_consistent(hwif->pci_dev, pci_alloc_consistent(hwif->pci_dev,
...@@ -421,9 +417,6 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) ...@@ -421,9 +417,6 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
return; return;
dma_base2alloc_failure: dma_base2alloc_failure:
kfree(hwif->sg_table);
dma_sgalloc_failure:
pci_free_consistent(hwif->pci_dev, pci_free_consistent(hwif->pci_dev,
IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
hwif->dmatable_cpu, hwif->dmatable_dma); hwif->dmatable_cpu, hwif->dmatable_dma);
...@@ -584,6 +577,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive) ...@@ -584,6 +577,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) { if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) {
/* try PIO instead of DMA */ /* try PIO instead of DMA */
ide_map_sg(drive, rq);
return 1; return 1;
} }
......
...@@ -78,10 +78,6 @@ typedef struct pmac_ide_hwif { ...@@ -78,10 +78,6 @@ typedef struct pmac_ide_hwif {
*/ */
volatile struct dbdma_regs __iomem * dma_regs; volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu; struct dbdma_cmd* dma_table_cpu;
dma_addr_t dma_table_dma;
struct scatterlist* sg_table;
int sg_nents;
int sg_dma_direction;
#endif #endif
} pmac_ide_hwif_t; } pmac_ide_hwif_t;
...@@ -1245,6 +1241,8 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) ...@@ -1245,6 +1241,8 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->noprobe = 0; hwif->noprobe = 0;
#endif /* CONFIG_PMAC_PBOOK */ #endif /* CONFIG_PMAC_PBOOK */
hwif->sg_max_nents = MAX_DCMDS;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* has a DBDMA controller channel */ /* has a DBDMA controller channel */
if (pmif->dma_regs) if (pmif->dma_regs)
...@@ -1562,26 +1560,23 @@ pmac_ide_probe(void) ...@@ -1562,26 +1560,23 @@ pmac_ide_probe(void)
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* /*
* This is very close to the generic ide-dma version of the function except * We build & map the sglist for a given request.
* that we don't use the fields in the hwif but our own copies for sg_table
* and friends. We build & map the sglist for a given request
*/ */
static int __pmac static int __pmac
pmac_ide_build_sglist(ide_drive_t *drive, struct request *rq) pmac_ide_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; struct scatterlist *sg = hwif->sg_table;
struct scatterlist *sg = pmif->sg_table;
int nents; int nents;
nents = blk_rq_map_sg(drive->queue, rq, sg); nents = blk_rq_map_sg(drive->queue, rq, sg);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
pmif->sg_dma_direction = PCI_DMA_FROMDEVICE; hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
else else
pmif->sg_dma_direction = PCI_DMA_TODEVICE; hwif->sg_dma_direction = PCI_DMA_TODEVICE;
return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction); return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
} }
/* /*
...@@ -1591,18 +1586,17 @@ static int __pmac ...@@ -1591,18 +1586,17 @@ static int __pmac
pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq) pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; struct scatterlist *sg = hwif->sg_table;
struct scatterlist *sg = pmif->sg_table;
int nents = 0; int nents = 0;
ide_task_t *args = rq->special; ide_task_t *args = rq->special;
unsigned char *virt_addr = rq->buffer; unsigned char *virt_addr = rq->buffer;
int sector_count = rq->nr_sectors; int sector_count = rq->nr_sectors;
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
pmif->sg_dma_direction = PCI_DMA_TODEVICE; hwif->sg_dma_direction = PCI_DMA_TODEVICE;
else else
pmif->sg_dma_direction = PCI_DMA_FROMDEVICE; hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
if (sector_count > 128) { if (sector_count > 128) {
sg_init_one(&sg[nents], virt_addr, 128 * SECTOR_SIZE); sg_init_one(&sg[nents], virt_addr, 128 * SECTOR_SIZE);
nents++; nents++;
...@@ -1611,8 +1605,8 @@ pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq) ...@@ -1611,8 +1605,8 @@ pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
} }
sg_init_one(&sg[nents], virt_addr, sector_count * SECTOR_SIZE); sg_init_one(&sg[nents], virt_addr, sector_count * SECTOR_SIZE);
nents++; nents++;
return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction); return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
} }
/* /*
...@@ -1640,14 +1634,14 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) ...@@ -1640,14 +1634,14 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
/* Build sglist */ /* Build sglist */
if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE)
pmif->sg_nents = i = pmac_ide_raw_build_sglist(drive, rq); hwif->sg_nents = i = pmac_ide_raw_build_sglist(drive, rq);
else else
pmif->sg_nents = i = pmac_ide_build_sglist(drive, rq); hwif->sg_nents = i = pmac_ide_build_sglist(drive, rq);
if (!i) if (!i)
return 0; return 0;
/* Build DBDMA commands list */ /* Build DBDMA commands list */
sg = pmif->sg_table; sg = hwif->sg_table;
while (i && sg_dma_len(sg)) { while (i && sg_dma_len(sg)) {
u32 cur_addr; u32 cur_addr;
u32 cur_len; u32 cur_len;
...@@ -1692,16 +1686,16 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) ...@@ -1692,16 +1686,16 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
memset(table, 0, sizeof(struct dbdma_cmd)); memset(table, 0, sizeof(struct dbdma_cmd));
st_le16(&table->command, DBDMA_STOP); st_le16(&table->command, DBDMA_STOP);
mb(); mb();
writel(pmif->dma_table_dma, &dma->cmdptr); writel(hwif->dmatable_dma, &dma->cmdptr);
return 1; return 1;
} }
printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
use_pio_instead: use_pio_instead:
pci_unmap_sg(hwif->pci_dev, pci_unmap_sg(hwif->pci_dev,
pmif->sg_table, hwif->sg_table,
pmif->sg_nents, hwif->sg_nents,
pmif->sg_dma_direction); hwif->sg_dma_direction);
return 0; /* revert to PIO for this request */ return 0; /* revert to PIO for this request */
} }
...@@ -1709,14 +1703,14 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) ...@@ -1709,14 +1703,14 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
static void __pmac static void __pmac
pmac_ide_destroy_dmatable (ide_drive_t *drive) pmac_ide_destroy_dmatable (ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = HWIF(drive)->pci_dev; struct pci_dev *dev = HWIF(drive)->pci_dev;
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; struct scatterlist *sg = hwif->sg_table;
struct scatterlist *sg = pmif->sg_table; int nents = hwif->sg_nents;
int nents = pmif->sg_nents;
if (nents) { if (nents) {
pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction); pci_unmap_sg(dev, sg, nents, hwif->sg_dma_direction);
pmif->sg_nents = 0; hwif->sg_nents = 0;
} }
} }
...@@ -1891,8 +1885,10 @@ pmac_ide_dma_setup(ide_drive_t *drive) ...@@ -1891,8 +1885,10 @@ pmac_ide_dma_setup(ide_drive_t *drive)
return 1; return 1;
ata4 = (pmif->kind == controller_kl_ata4); ata4 = (pmif->kind == controller_kl_ata4);
if (!pmac_ide_build_dmatable(drive, rq)) if (!pmac_ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq);
return 1; return 1;
}
/* Apple adds 60ns to wrDataSetup on reads */ /* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
...@@ -2065,21 +2061,13 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) ...@@ -2065,21 +2061,13 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent( pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent(
hwif->pci_dev, hwif->pci_dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
&pmif->dma_table_dma); &hwif->dmatable_dma);
if (pmif->dma_table_cpu == NULL) { if (pmif->dma_table_cpu == NULL) {
printk(KERN_ERR "%s: unable to allocate DMA command list\n", printk(KERN_ERR "%s: unable to allocate DMA command list\n",
hwif->name); hwif->name);
return; return;
} }
pmif->sg_table = kmalloc(sizeof(struct scatterlist) * MAX_DCMDS,
GFP_KERNEL);
if (pmif->sg_table == NULL) {
pci_free_consistent( hwif->pci_dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
pmif->dma_table_cpu, pmif->dma_table_dma);
return;
}
hwif->ide_dma_off_quietly = &__ide_dma_off_quietly; hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
hwif->ide_dma_on = &__ide_dma_on; hwif->ide_dma_on = &__ide_dma_on;
hwif->ide_dma_check = &pmac_ide_dma_check; hwif->ide_dma_check = &pmac_ide_dma_check;
......
...@@ -789,27 +789,6 @@ typedef struct ide_drive_s { ...@@ -789,27 +789,6 @@ typedef struct ide_drive_s {
struct gendisk *disk; struct gendisk *disk;
} ide_drive_t; } ide_drive_t;
/*
* mapping stuff, prepare for highmem...
*
* temporarily mapping a (possible) highmem bio for PIO transfer
*/
#ifndef CONFIG_IDE_TASKFILE_IO
#define ide_rq_offset(rq) \
(((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9)
static inline void *ide_map_buffer(struct request *rq, unsigned long *flags)
{
return bio_kmap_irq(rq->bio, flags) + ide_rq_offset(rq);
}
static inline void ide_unmap_buffer(struct request *rq, char *buffer, unsigned long *flags)
{
bio_kunmap_irq(buffer, flags);
}
#endif /* !CONFIG_IDE_TASKFILE_IO */
#define IDE_CHIPSET_PCI_MASK \ #define IDE_CHIPSET_PCI_MASK \
((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
...@@ -920,12 +899,18 @@ typedef struct hwif_s { ...@@ -920,12 +899,18 @@ typedef struct hwif_s {
dma_addr_t dmatable_dma; dma_addr_t dmatable_dma;
/* Scatter-gather list used to build the above */ /* Scatter-gather list used to build the above */
struct scatterlist *sg_table; struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */
int sg_nents; /* Current number of entries in it */ int sg_nents; /* Current number of entries in it */
int sg_dma_direction; /* dma transfer direction */ int sg_dma_direction; /* dma transfer direction */
/* data phase of the active command (currently only valid for PIO/DMA) */ /* data phase of the active command (currently only valid for PIO/DMA) */
int data_phase; int data_phase;
unsigned int nsect;
unsigned int nleft;
unsigned int cursg;
unsigned int cursg_ofs;
int mmio; /* hosts iomio (0) or custom (2) select */ int mmio; /* hosts iomio (0) or custom (2) select */
int rqsize; /* max sectors per request */ int rqsize; /* max sectors per request */
int irq; /* our irq number */ int irq; /* our irq number */
...@@ -1369,35 +1354,6 @@ extern void atapi_output_bytes(ide_drive_t *, void *, u32); ...@@ -1369,35 +1354,6 @@ extern void atapi_output_bytes(ide_drive_t *, void *, u32);
extern void taskfile_input_data(ide_drive_t *, void *, u32); extern void taskfile_input_data(ide_drive_t *, void *, u32);
extern void taskfile_output_data(ide_drive_t *, void *, u32); extern void taskfile_output_data(ide_drive_t *, void *, u32);
#define IDE_PIO_IN 0
#define IDE_PIO_OUT 1
static inline void __task_sectors(ide_drive_t *drive, char *buf,
unsigned nsect, unsigned rw)
{
/*
* IRQ can happen instantly after reading/writing
* last sector of the datablock.
*/
if (rw == IDE_PIO_OUT)
taskfile_output_data(drive, buf, nsect * SECTOR_WORDS);
else
taskfile_input_data(drive, buf, nsect * SECTOR_WORDS);
}
#ifdef CONFIG_IDE_TASKFILE_IO
static inline void task_bio_sectors(ide_drive_t *drive, struct request *rq,
unsigned nsect, unsigned rw)
{
unsigned long flags;
char *buf = rq_map_buffer(rq, &flags);
process_that_request_first(rq, nsect);
__task_sectors(drive, buf, nsect, rw);
rq_unmap_buffer(buf, &flags);
}
#endif /* CONFIG_IDE_TASKFILE_IO */
extern int drive_is_ready(ide_drive_t *); extern int drive_is_ready(ide_drive_t *);
extern int wait_for_ready(ide_drive_t *, int /* timeout */); extern int wait_for_ready(ide_drive_t *, int /* timeout */);
...@@ -1528,6 +1484,9 @@ typedef struct ide_pci_device_s { ...@@ -1528,6 +1484,9 @@ typedef struct ide_pci_device_s {
extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *); extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *);
extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *); extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *);
void ide_map_sg(ide_drive_t *, struct request *);
void ide_init_sg_cmd(ide_drive_t *, struct request *);
#define BAD_DMA_DRIVE 0 #define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1 #define GOOD_DMA_DRIVE 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment