Commit 2dbd1502 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] IDE 98

Synchronize with 2.5.25.

Incorporate IDE-94, as well as 95, 96, 97 and 98-pre as announced by Bartek and
unfortunately still not included in 2.5.25, which makes admittedly things
still fall appart:

Missing changelog for 98-pre by Bartlomiej Zolnierkiewicz (BTW.  Handling
Unicode should be essential at least to make proper crediting of many many
peoples possible!) follows here:

 - add missing channel->lock unlocking/locking and fix some comments
   in ide_timer_expiry()

 - allow PCI drivers to disable autodma in ->init_dma()
   (bug introduced in IDE 97, affects sl82c105.c only)

   noticed by Russell King

 - alim15x3.c, if revision is <= 0x20 disable autodma

 - remove unneeded checks (drive.dn > 3) from pdc202xx.c and sis5513.c

 - use block layer wrappers

And my additions follow:

 - Fix TCQ code. Patch based on work by Alexander Atanasov.

 - Use the FreeBSD derived request handler return values:

	ATA_OP_FINISHED
	ATA_OP_CONTINUES
	ATA_OP_RELEASED
	ATA_OP_READY	/* for status ready reporting during poll */

 - PMAC compilation fix by Paul Mackerras.

 - Simplify the ata_status_poll function significantly.

 - Fix logic used to prevent drive IRQ assertion from drive on channels sharing
   our interrupt.

NOTE: We will move it later to the time where a request is really finished
soon.

 - Don't use ata_busy_poll() use ata_status_poll() instead. This increases code
   unification.

NOTE: We should maybe invent some way to prevent the error recovery path to be
taken at all. In esp to prevent ata_error from trying to reissue commands.
parent 5b2a1577
...@@ -703,11 +703,11 @@ static ide_startstop_t etrax_dma_intr(struct ata_device *drive, struct request * ...@@ -703,11 +703,11 @@ static ide_startstop_t etrax_dma_intr(struct ata_device *drive, struct request *
i -= rq->current_nr_sectors; i -= rq->current_nr_sectors;
ide_end_request(drive, rq, 1); ide_end_request(drive, rq, 1);
} }
return ide_stopped; return ATA_OP_FINISHED;
} }
printk("%s: bad DMA status\n", drive->name); printk("%s: bad DMA status\n", drive->name);
} }
return ata_error(drive, __FUNCTION__); return ata_error(drive, rq, __FUNCTION__);
} }
/* /*
......
...@@ -1202,6 +1202,26 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1202,6 +1202,26 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
return rq; return rq;
} }
/*
* Non-locking blk_get_request variant, for special requests from drivers.
*/
struct request *__blk_get_request(request_queue_t *q, int rw)
{
struct request *rq;
BUG_ON(rw != READ && rw != WRITE);
rq = get_request(q, rw);
if (rq) {
rq->flags = 0;
rq->buffer = NULL;
rq->bio = rq->biotail = NULL;
rq->waiting = NULL;
}
return rq;
}
void blk_put_request(struct request *rq) void blk_put_request(struct request *rq)
{ {
blkdev_release_request(rq); blkdev_release_request(rq);
...@@ -1381,6 +1401,14 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) ...@@ -1381,6 +1401,14 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
/*
* Non-locking blk_attempt_remerge variant.
*/
void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
{
attempt_back_merge(q, rq);
}
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req, *freereq = NULL;
...@@ -2039,6 +2067,7 @@ EXPORT_SYMBOL(generic_unplug_device); ...@@ -2039,6 +2067,7 @@ EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_plug_device);
EXPORT_SYMBOL(blk_remove_plug); EXPORT_SYMBOL(blk_remove_plug);
EXPORT_SYMBOL(blk_attempt_remerge); EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(__blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn); EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_pfn); EXPORT_SYMBOL(blk_max_pfn);
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
...@@ -2055,6 +2084,7 @@ EXPORT_SYMBOL(blk_queue_assign_lock); ...@@ -2055,6 +2084,7 @@ EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment); EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment); EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
EXPORT_SYMBOL(__blk_get_request);
EXPORT_SYMBOL(blk_put_request); EXPORT_SYMBOL(blk_put_request);
EXPORT_SYMBOL(blk_queue_prep_rq); EXPORT_SYMBOL(blk_queue_prep_rq);
......
...@@ -160,16 +160,15 @@ static void aec62xx_tune_drive(struct ata_device *drive, unsigned char pio) ...@@ -160,16 +160,15 @@ static void aec62xx_tune_drive(struct ata_device *drive, unsigned char pio)
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int aec62xx_udma_setup(struct ata_device *drive) static int __init aec62xx_modes_map(struct ata_channel *ch)
{ {
u32 bmide = pci_resource_start(drive->channel->pci_dev, 4); u32 bmide = pci_resource_start(ch->pci_dev, 4);
short speed;
int map; int map;
map = XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_SWDMA | XFER_UDMA; map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
if (drive->channel->udma_four) if (ch->udma_four)
switch (drive->channel->pci_dev->device) { switch (ch->pci_dev->device) {
case PCI_DEVICE_ID_ARTOP_ATP865R: case PCI_DEVICE_ID_ARTOP_ATP865R:
case PCI_DEVICE_ID_ARTOP_ATP865: case PCI_DEVICE_ID_ARTOP_ATP865:
/* Can't use these modes simultaneously, /* Can't use these modes simultaneously,
...@@ -180,11 +179,7 @@ static int aec62xx_udma_setup(struct ata_device *drive) ...@@ -180,11 +179,7 @@ static int aec62xx_udma_setup(struct ata_device *drive)
map |= XFER_UDMA_66; map |= XFER_UDMA_66;
} }
speed = ata_timing_mode(drive, map); return map;
aec_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
} }
#endif #endif
...@@ -256,11 +251,12 @@ static void __init aec62xx_init_channel(struct ata_channel *ch) ...@@ -256,11 +251,12 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
ch->tuneproc = aec62xx_tune_drive; ch->tuneproc = aec62xx_tune_drive;
ch->speedproc = aec_set_drive; ch->speedproc = aec_set_drive;
ch->autodma = 0;
ch->io_32bit = 1; ch->io_32bit = 1;
ch->unmask = 1; ch->unmask = 1;
ch->udma_four = aec62xx_ata66_check(ch);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
ch->drives[i].autotune = 1; ch->drives[i].autotune = 1;
ch->drives[i].dn = ch->unit * 2 + i; ch->drives[i].dn = ch->unit * 2 + i;
...@@ -269,11 +265,8 @@ static void __init aec62xx_init_channel(struct ata_channel *ch) ...@@ -269,11 +265,8 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (ch->dma_base) { if (ch->dma_base) {
ch->highmem = 1; ch->highmem = 1;
ch->udma_setup = aec62xx_udma_setup; ch->modes_map = aec62xx_modes_map(ch);
#ifdef CONFIG_IDEDMA_AUTO ch->udma_setup = udma_generic_setup;
if (!noautodma)
ch->autodma = 1;
#endif
} }
#endif #endif
} }
...@@ -306,17 +299,15 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -306,17 +299,15 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP, vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860, device: PCI_DEVICE_ID_ARTOP_ATP860,
init_chipset: aec62xx_init_chipset, init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel, init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD, bootable: NEVER_BOARD,
flags: ATA_F_IRQ | ATA_F_NOADMA | ATA_F_DMA flags: ATA_F_IRQ | ATA_F_DMA
}, },
{ {
vendor: PCI_VENDOR_ID_ARTOP, vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860R, device: PCI_DEVICE_ID_ARTOP_ATP860R,
init_chipset: aec62xx_init_chipset, init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel, init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD, bootable: OFF_BOARD,
...@@ -326,7 +317,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -326,7 +317,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP, vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865, device: PCI_DEVICE_ID_ARTOP_ATP865,
init_chipset: aec62xx_init_chipset, init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel, init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD, bootable: NEVER_BOARD,
...@@ -336,7 +326,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -336,7 +326,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP, vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865R, device: PCI_DEVICE_ID_ARTOP_ATP865R,
init_chipset: aec62xx_init_chipset, init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel, init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD, bootable: OFF_BOARD,
......
...@@ -99,43 +99,6 @@ static void ali15x3_tune_drive(struct ata_device *drive, byte pio) ...@@ -99,43 +99,6 @@ static void ali15x3_tune_drive(struct ata_device *drive, byte pio)
__restore_flags(flags); __restore_flags(flags);
} }
static byte ali15x3_can_ultra(struct ata_device *drive)
{
if (m5229_revision <= 0x20) {
return 0;
} else if ((m5229_revision < 0xC2) &&
#ifndef CONFIG_WDC_ALI15X3
((chip_is_1543c_e && strstr(drive->id->model, "WDC ")) ||
(drive->type != ATA_DISK))) {
#else
(drive->type != ATA_DISK)) {
#endif
return 0;
} else {
return 1;
}
}
static int ali15x3_ratemask(struct ata_device *drive)
{
int map = 0;
if (!ali15x3_can_ultra(drive))
return 0;
map |= XFER_UDMA;
if (!eighty_ninty_three(drive))
return map;
if (m5229_revision >= 0xC4)
map |= XFER_UDMA_100;
if (m5229_revision >= 0xC2)
map |= XFER_UDMA_66;
return map;
}
static int ali15x3_tune_chipset(struct ata_device *drive, byte speed) static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
{ {
struct pci_dev *dev = drive->channel->pci_dev; struct pci_dev *dev = drive->channel->pci_dev;
...@@ -156,6 +119,7 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed) ...@@ -156,6 +119,7 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
if (speed < XFER_SW_DMA_0) if (speed < XFER_SW_DMA_0)
ali15x3_tune_drive(drive, speed); ali15x3_tune_drive(drive, speed);
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
/* FIXME: no support for MWDMA and SWDMA modes --bkz */
else if (speed >= XFER_UDMA_0) { else if (speed >= XFER_UDMA_0) {
pci_read_config_byte(dev, m5229_udma, &tmpbyte); pci_read_config_byte(dev, m5229_udma, &tmpbyte);
tmpbyte &= (0x0f << ((1-unit) << 2)); tmpbyte &= (0x0f << ((1-unit) << 2));
...@@ -176,91 +140,40 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed) ...@@ -176,91 +140,40 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma) static int ali15x3_udma_setup(struct ata_device *drive, int map)
{ {
int map; #ifndef CONFIG_WDC_ALI15X3
u8 mode; if ((m5229_revision < 0xC2) && chip_is_1543c_e &&
strstr(drive->id->model, "WDC "))
if (udma) map &= ~XFER_UDMA_ALL;
map = ali15x3_ratemask(drive); #endif
else return udma_generic_setup(drive, map);
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !ali15x3_tune_chipset(drive, mode);
} }
static int ali15x3_udma_setup(struct ata_device *drive) static int ali15x3_udma_init(struct ata_device *drive, struct request *rq)
{ {
struct hd_driveid *id = drive->id; if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK))
struct ata_channel *hwif = drive->channel; return ATA_OP_FINISHED; /* try PIO instead of DMA */
int on = 1;
int verbose = 1;
byte can_ultra_dma = ali15x3_can_ultra(drive);
if ((m5229_revision<=0x20) && (drive->type != ATA_DISK)) { return udma_pci_init(drive, rq);
udma_enable(drive, 0, 0); }
return 0;
}
if ((id != NULL) && ((id->capability & 1) != 0) && hwif->autodma) { static int __init ali15x3_modes_map(struct ata_channel *ch)
/* Consult the list of known "bad" drives */ {
if (udma_black_list(drive)) { int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) {
if (id->dma_ultra & 0x003F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, can_ultra_dma);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
ali15x3_tune_drive(drive, 255);
}
udma_enable(drive, on, verbose); if (m5229_revision <= 0x20)
return map;
return 0; map |= XFER_UDMA;
}
static int ali15x3_udma_init(struct ata_device *drive, struct request *rq) if (m5229_revision >= 0xC2) {
{ map |= XFER_UDMA_66;
if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK)) if (m5229_revision >= 0xC4)
return ide_stopped; /* try PIO instead of DMA */ map |= XFER_UDMA_100;
}
return udma_pci_init(drive, rq); return map;
} }
#endif #endif
...@@ -426,6 +339,8 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif) ...@@ -426,6 +339,8 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
} }
#endif /* CONFIG_SPARC64 */ #endif /* CONFIG_SPARC64 */
hwif->udma_four = ali15x3_ata66_check(hwif);
hwif->tuneproc = &ali15x3_tune_drive; hwif->tuneproc = &ali15x3_tune_drive;
hwif->drives[0].autotune = 1; hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1; hwif->drives[1].autotune = 1;
...@@ -436,22 +351,21 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif) ...@@ -436,22 +351,21 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
/* /*
* M1543C or newer for DMAing * M1543C or newer for DMAing
*/ */
hwif->udma_init = ali15x3_udma_init; hwif->modes_map = ali15x3_modes_map(hwif);
if (m5229_revision < 0xC2)
hwif->no_atapi_autodma = 1;
hwif->udma_setup = ali15x3_udma_setup; hwif->udma_setup = ali15x3_udma_setup;
hwif->autodma = 1; hwif->udma_init = ali15x3_udma_init;
} }
if (noautodma)
hwif->autodma = 0;
#else
hwif->autodma = 0;
#endif #endif
} }
static void __init ali15x3_init_dma(struct ata_channel *ch, unsigned long dmabase) static void __init ali15x3_init_dma(struct ata_channel *ch, unsigned long dmabase)
{ {
if ((dmabase) && (m5229_revision < 0x20)) if (dmabase && (m5229_revision < 0x20)) {
ch->autodma = 0;
return; return;
}
ata_init_dma(ch, dmabase); ata_init_dma(ch, dmabase);
} }
...@@ -472,7 +386,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -472,7 +386,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AL, vendor: PCI_VENDOR_ID_AL,
device: PCI_DEVICE_ID_AL_M5229, device: PCI_DEVICE_ID_AL_M5229,
init_chipset: ali15x3_init_chipset, init_chipset: ali15x3_init_chipset,
ata66_check: ali15x3_ata66_check,
init_channel: ali15x3_init_channel, init_channel: ali15x3_init_channel,
init_dma: ali15x3_init_dma, init_dma: ali15x3_init_dma,
enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} }, enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} },
......
...@@ -175,21 +175,15 @@ static void amd74xx_tune_drive(struct ata_device *drive, u8 pio) ...@@ -175,21 +175,15 @@ static void amd74xx_tune_drive(struct ata_device *drive, u8 pio)
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int amd74xx_udma_setup(struct ata_device *drive) static int __init amd_modes_map(struct ata_channel *ch)
{ {
short w80 = drive->channel->udma_four; short w80 = ch->udma_four;
int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA |
short speed = ata_timing_mode(drive,
XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA |
((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) | ((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) | (w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0)); (w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0);
amd_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0; return map;
} }
#endif #endif
...@@ -274,9 +268,10 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif) ...@@ -274,9 +268,10 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
{ {
int i; int i;
hwif->udma_four = amd74xx_ata66_check(hwif);
hwif->tuneproc = &amd74xx_tune_drive; hwif->tuneproc = &amd74xx_tune_drive;
hwif->speedproc = &amd_set_drive; hwif->speedproc = &amd_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1; hwif->io_32bit = 1;
hwif->unmask = 1; hwif->unmask = 1;
...@@ -289,11 +284,8 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif) ...@@ -289,11 +284,8 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) { if (hwif->dma_base) {
hwif->highmem = 1; hwif->highmem = 1;
hwif->udma_setup = amd74xx_udma_setup; hwif->modes_map = amd_modes_map(hwif);
# ifdef CONFIG_IDEDMA_AUTO hwif->udma_setup = udma_generic_setup;
if (!noautodma)
hwif->autodma = 1;
# endif
} }
#endif #endif
} }
...@@ -314,7 +306,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -314,7 +306,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD, vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_COBRA_7401, device: PCI_DEVICE_ID_AMD_COBRA_7401,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
...@@ -324,7 +315,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -324,7 +315,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD, vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7409, device: PCI_DEVICE_ID_AMD_VIPER_7409,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
...@@ -335,7 +325,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -335,7 +325,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD, vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7411, device: PCI_DEVICE_ID_AMD_VIPER_7411,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
...@@ -345,7 +334,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -345,7 +334,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD, vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_OPUS_7441, device: PCI_DEVICE_ID_AMD_OPUS_7441,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
...@@ -355,7 +343,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -355,7 +343,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD, vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_8111_IDE, device: PCI_DEVICE_ID_AMD_8111_IDE,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
...@@ -365,7 +352,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -365,7 +352,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_NVIDIA, vendor: PCI_VENDOR_ID_NVIDIA,
device: PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, device: PCI_DEVICE_ID_NVIDIA_NFORCE_IDE,
init_chipset: amd74xx_init_chipset, init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel, init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma, init_dma: amd74xx_init_dma,
enablebits: {{0x50,0x01,0x01}, {0x50,0x02,0x02}}, enablebits: {{0x50,0x01,0x01}, {0x50,0x02,0x02}},
......
...@@ -86,9 +86,11 @@ short ata_timing_mode(struct ata_device *drive, int map) ...@@ -86,9 +86,11 @@ short ata_timing_mode(struct ata_device *drive, int map)
if ((map & XFER_UDMA_100) == XFER_UDMA_100) if ((map & XFER_UDMA_100) == XFER_UDMA_100)
if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best; if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best;
if ((map & XFER_UDMA_66) == XFER_UDMA_66) if ((map & XFER_UDMA_66_4) == XFER_UDMA_66_4)
if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 : if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 : 0)) return best;
(id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((map & XFER_UDMA_66_3) == XFER_UDMA_66_3)
if ((best = (id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 : if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 :
(id->dma_ultra & 0x0002) ? XFER_UDMA_1 : (id->dma_ultra & 0x0002) ? XFER_UDMA_1 :
......
...@@ -59,15 +59,22 @@ extern struct ata_timing ata_timing[]; ...@@ -59,15 +59,22 @@ extern struct ata_timing ata_timing[];
#define ENOUGH(v,unit) (((v)-1)/(unit)+1) #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
#define XFER_MODE 0xf0 /* see hpt366.c for details */
#define XFER_UDMA_133 0x48 #define XFER_UDMA_66_3 0x100
#define XFER_UDMA_100 0x44 #define XFER_UDMA_66_4 0x200
#define XFER_UDMA_66 0x42
#define XFER_UDMA 0x40 #define XFER_MODE 0xff0
#define XFER_MWDMA 0x20 #define XFER_UDMA_133 0x800
#define XFER_SWDMA 0x10 #define XFER_UDMA_100 0x400
#define XFER_EPIO 0x01 #define XFER_UDMA_66 0x300
#define XFER_PIO 0x00 #define XFER_UDMA 0x040
#define XFER_MWDMA 0x020
#define XFER_SWDMA 0x010
#define XFER_EPIO 0x001
#define XFER_PIO 0x000
#define XFER_UDMA_ALL 0xf40
#define XFER_UDMA_80W 0xf00
/* External interface to host chips channel timing setup. /* External interface to host chips channel timing setup.
* *
......
...@@ -217,10 +217,10 @@ static void cmd64x_tuneproc(struct ata_device *drive, u8 pio) ...@@ -217,10 +217,10 @@ static void cmd64x_tuneproc(struct ata_device *drive, u8 pio)
ide_config_drive_speed(drive, speed); ide_config_drive_speed(drive, speed);
} }
static int cmd64x_ratemask(struct ata_device *drive) static int __init cmd6xx_modes_map(struct ata_channel *ch)
{ {
struct pci_dev *dev = drive->channel->pci_dev; struct pci_dev *dev = ch->pci_dev;
int map = 0; int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
switch(dev->device) { switch(dev->device) {
case PCI_DEVICE_ID_CMD_680: case PCI_DEVICE_ID_CMD_680:
...@@ -234,10 +234,9 @@ static int cmd64x_ratemask(struct ata_device *drive) ...@@ -234,10 +234,9 @@ static int cmd64x_ratemask(struct ata_device *drive)
break; break;
case PCI_DEVICE_ID_CMD_646: case PCI_DEVICE_ID_CMD_646:
{ {
u32 class_rev; u32 rev;
pci_read_config_dword(dev, pci_read_config_dword(dev, PCI_CLASS_REVISION, &rev);
PCI_CLASS_REVISION, &class_rev); rev &= 0xff;
class_rev &= 0xff;
/* /*
* UltraDMA only supported on PCI646U and PCI646U2, which * UltraDMA only supported on PCI646U and PCI646U2, which
* correspond to revisions 0x03, 0x05 and 0x07 respectively. * correspond to revisions 0x03, 0x05 and 0x07 respectively.
...@@ -250,7 +249,7 @@ static int cmd64x_ratemask(struct ata_device *drive) ...@@ -250,7 +249,7 @@ static int cmd64x_ratemask(struct ata_device *drive)
* *
* So we only do UltraDMA on revision 0x05 and 0x07 chipsets. * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
*/ */
switch(class_rev) { switch(rev) {
case 0x07: case 0x07:
case 0x05: case 0x05:
map |= XFER_UDMA; map |= XFER_UDMA;
...@@ -260,11 +259,6 @@ static int cmd64x_ratemask(struct ata_device *drive) ...@@ -260,11 +259,6 @@ static int cmd64x_ratemask(struct ata_device *drive)
} }
} }
if (!eighty_ninty_three(drive)) {
if (map & XFER_UDMA)
return XFER_UDMA;
return 0;
}
return map; return map;
} }
...@@ -515,80 +509,6 @@ speed_break : ...@@ -515,80 +509,6 @@ speed_break :
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
{
int map;
u8 mode;
if (udma)
map = cmd64x_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
return !drive->channel->speedproc(drive, mode);
}
static int cmd6xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 1;
int verbose = 1;
hwif->tuneproc(drive, 255);
if ((id != NULL) && ((id->capability & 1) != 0) &&
hwif->autodma && (drive->type == ATA_DISK)) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4)) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hwif->tuneproc(drive, 255);
}
udma_enable(drive, on, verbose);
return 0;
}
static int cmd64x_udma_stop(struct ata_device *drive) static int cmd64x_udma_stop(struct ata_device *drive)
{ {
struct ata_channel *ch = drive->channel; struct ata_channel *ch = drive->channel;
...@@ -822,13 +742,6 @@ static unsigned int cmd64x_ata66(struct ata_channel *hwif) ...@@ -822,13 +742,6 @@ static unsigned int cmd64x_ata66(struct ata_channel *hwif)
return (ata66 & mask) ? 1 : 0; return (ata66 & mask) ? 1 : 0;
} }
static unsigned int __init cmd64x_ata66_check(struct ata_channel *hwif)
{
if (hwif->pci_dev->device == PCI_DEVICE_ID_CMD_680)
return cmd680_ata66(hwif);
return cmd64x_ata66(hwif);
}
static void __init cmd64x_init_channel(struct ata_channel *hwif) static void __init cmd64x_init_channel(struct ata_channel *hwif)
{ {
struct pci_dev *dev = hwif->pci_dev; struct pci_dev *dev = hwif->pci_dev;
...@@ -843,32 +756,28 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif) ...@@ -843,32 +756,28 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
switch(dev->device) { switch(dev->device) {
case PCI_DEVICE_ID_CMD_680: case PCI_DEVICE_ID_CMD_680:
hwif->busproc = cmd680_busproc; hwif->busproc = cmd680_busproc;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base)
hwif->udma_setup = cmd6xx_udma_setup;
#endif
hwif->resetproc = cmd680_reset; hwif->resetproc = cmd680_reset;
hwif->speedproc = cmd680_tune_chipset; hwif->speedproc = cmd680_tune_chipset;
hwif->tuneproc = cmd680_tuneproc; hwif->tuneproc = cmd680_tuneproc;
hwif->udma_four = cmd680_ata66(hwif);
break; break;
case PCI_DEVICE_ID_CMD_649: case PCI_DEVICE_ID_CMD_649:
case PCI_DEVICE_ID_CMD_648: case PCI_DEVICE_ID_CMD_648:
case PCI_DEVICE_ID_CMD_643: case PCI_DEVICE_ID_CMD_643:
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) { if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
hwif->udma_stop = cmd64x_udma_stop; hwif->udma_stop = cmd64x_udma_stop;
hwif->udma_irq_status = cmd64x_udma_irq_status; hwif->udma_irq_status = cmd64x_udma_irq_status;
} }
#endif #endif
hwif->tuneproc = cmd64x_tuneproc; hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset; hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break; break;
case PCI_DEVICE_ID_CMD_646: case PCI_DEVICE_ID_CMD_646:
hwif->chipset = ide_cmd646; hwif->chipset = ide_cmd646;
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) { if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
if (class_rev == 0x01) { if (class_rev == 0x01) {
hwif->udma_stop = cmd646_1_udma_stop; hwif->udma_stop = cmd646_1_udma_stop;
} else { } else {
...@@ -879,6 +788,7 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif) ...@@ -879,6 +788,7 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#endif #endif
hwif->tuneproc = cmd64x_tuneproc; hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset; hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break; break;
default: default:
break; break;
...@@ -887,10 +797,9 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif) ...@@ -887,10 +797,9 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) { if (hwif->dma_base) {
hwif->highmem = 1; hwif->highmem = 1;
# ifdef CONFIG_IDEDMA_AUTO hwif->modes_map = cmd6xx_modes_map(hwif);
if (!noautodma) hwif->no_atapi_autodma = 1;
hwif->autodma = 1; hwif->udma_setup = udma_generic_setup;
# endif
} }
#endif #endif
} }
...@@ -919,7 +828,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -919,7 +828,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD, vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_648, device: PCI_DEVICE_ID_CMD_648,
init_chipset: cmd64x_init_chipset, init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel, init_channel: cmd64x_init_channel,
bootable: ON_BOARD, bootable: ON_BOARD,
flags: ATA_F_DMA flags: ATA_F_DMA
...@@ -928,7 +836,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -928,7 +836,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD, vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_649, device: PCI_DEVICE_ID_CMD_649,
init_chipset: cmd64x_init_chipset, init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel, init_channel: cmd64x_init_channel,
bootable: ON_BOARD, bootable: ON_BOARD,
flags: ATA_F_DMA flags: ATA_F_DMA
...@@ -937,7 +844,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -937,7 +844,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD, vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_680, device: PCI_DEVICE_ID_CMD_680,
init_chipset: cmd64x_init_chipset, init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel, init_channel: cmd64x_init_channel,
bootable: ON_BOARD, bootable: ON_BOARD,
flags: ATA_F_DMA flags: ATA_F_DMA
......
...@@ -191,7 +191,7 @@ static int cs5530_config_dma(struct ata_device *drive) ...@@ -191,7 +191,7 @@ static int cs5530_config_dma(struct ata_device *drive)
return 0; return 0;
} }
static int cs5530_udma_setup(struct ata_device *drive) static int cs5530_udma_setup(struct ata_device *drive, int map)
{ {
return cs5530_config_dma(drive); return cs5530_config_dma(drive);
} }
...@@ -285,17 +285,15 @@ static unsigned int __init pci_init_cs5530(struct pci_dev *dev) ...@@ -285,17 +285,15 @@ static unsigned int __init pci_init_cs5530(struct pci_dev *dev)
*/ */
static void __init ide_init_cs5530(struct ata_channel *hwif) static void __init ide_init_cs5530(struct ata_channel *hwif)
{ {
u32 basereg, d0_timings;
hwif->serialized = 1; hwif->serialized = 1;
if (!hwif->dma_base) {
hwif->autodma = 0;
} else {
unsigned int basereg, d0_timings;
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
hwif->udma_setup = cs5530_udma_setup; if (hwif->dma_base) {
hwif->highmem = 1; hwif->highmem = 1;
#else hwif->udma_setup = cs5530_udma_setup;
hwif->autodma = 0; }
#endif #endif
hwif->tuneproc = &cs5530_tuneproc; hwif->tuneproc = &cs5530_tuneproc;
...@@ -311,7 +309,6 @@ static void __init ide_init_cs5530(struct ata_channel *hwif) ...@@ -311,7 +309,6 @@ static void __init ide_init_cs5530(struct ata_channel *hwif)
if (!hwif->drives[1].autotune) if (!hwif->drives[1].autotune)
hwif->drives[1].autotune = 1; /* needs autotuning later */ hwif->drives[1].autotune = 1; /* needs autotuning later */
} }
}
} }
......
...@@ -237,7 +237,7 @@ static void cy82c693_dma_enable(struct ata_device *drive, int mode, int single) ...@@ -237,7 +237,7 @@ static void cy82c693_dma_enable(struct ata_device *drive, int mode, int single)
/* /*
* used to set DMA mode for CY82C693 (single and multi modes) * used to set DMA mode for CY82C693 (single and multi modes)
*/ */
static int cy82c693_udma_setup(struct ata_device *drive) static int cy82c693_udma_setup(struct ata_device *drive, int map)
{ {
/* /*
* Set dma mode for drive everything else is done by the defaul func. * Set dma mode for drive everything else is done by the defaul func.
...@@ -414,14 +414,11 @@ static void __init ide_init_cy82c693(struct ata_channel *hwif) ...@@ -414,14 +414,11 @@ static void __init ide_init_cy82c693(struct ata_channel *hwif)
hwif->tuneproc = cy82c693_tune_drive; hwif->tuneproc = cy82c693_tune_drive;
hwif->drives[0].autotune = 1; hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1; hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) { if (hwif->dma_base) {
hwif->highmem = 1; hwif->highmem = 1;
hwif->udma_setup = cy82c693_udma_setup; hwif->udma_setup = cy82c693_udma_setup;
if (!noautodma)
hwif->autodma = 1;
} }
#endif #endif
} }
......
...@@ -79,30 +79,8 @@ void ata_mask(struct ata_device *drive) ...@@ -79,30 +79,8 @@ void ata_mask(struct ata_device *drive)
ch->maskproc(drive); ch->maskproc(drive);
} }
/*
* Spin until the drive is no longer busy.
*
* Not exported, since it's not used within any modules.
*/
int ata_busy_poll(struct ata_device *drive, unsigned long timeout)
{
/* spec allows drive 400ns to assert "BUSY" */
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout))
return 1;
}
}
return 0;
}
/* /*
* Check the state of the status register. * Check the state of the status register.
*
* FIXME: Channel lock should be held.
*/ */
int ata_status(struct ata_device *drive, u8 good, u8 bad) int ata_status(struct ata_device *drive, u8 good, u8 bad)
{ {
...@@ -120,31 +98,33 @@ EXPORT_SYMBOL(ata_status); ...@@ -120,31 +98,33 @@ EXPORT_SYMBOL(ata_status);
* all of the "good" bits and none of the "bad" bits, and if all is okay it * all of the "good" bits and none of the "bad" bits, and if all is okay it
* returns 0. All other cases return 1 after invoking error handler -- caller * returns 0. All other cases return 1 after invoking error handler -- caller
* should just return. * should just return.
*
* This routine should get fixed to not hog the cpu during extra long waits..
* That could be done by busy-waiting for the first jiffy or two, and then
* setting a timer to wake up at half second intervals thereafter, until
* timeout is achieved, before timing out.
*
* Channel lock should be held.
*/ */
int ata_status_poll(struct ata_device *drive, u8 good, u8 bad, int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
unsigned long timeout, unsigned long timeout, struct request *rq)
struct request *rq, ide_startstop_t *startstop)
{ {
int i; int i;
/* bail early if we've exceeded max_failures */ /* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) { if (drive->max_failures && (drive->failures > drive->max_failures))
*startstop = ide_stopped; return ATA_OP_FINISHED;
/*
* Spin until the drive is no longer busy.
* Spec allows drive 400ns to assert "BUSY"
*/
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
unsigned long flags;
return 1; __save_flags(flags);
ide__sti();
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout)) {
__restore_flags(flags);
return ata_error(drive, rq, "status timeout");
} }
}
if (ata_busy_poll(drive, timeout)) { __restore_flags(flags);
*startstop = ata_error(drive, rq, "status timeout");
return 1;
} }
/* /*
...@@ -156,12 +136,10 @@ int ata_status_poll(struct ata_device *drive, u8 good, u8 bad, ...@@ -156,12 +136,10 @@ int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
udelay(1); udelay(1);
if (ata_status(drive, good, bad)) if (ata_status(drive, good, bad))
return 0; return ATA_OP_READY;
} }
*startstop = ata_error(drive, rq, "status error"); return ata_error(drive, rq, "status error");
return 1;
} }
EXPORT_SYMBOL(ata_status_poll); EXPORT_SYMBOL(ata_status_poll);
......
...@@ -72,83 +72,13 @@ static void hpt34x_tune_drive(struct ata_device *drive, u8 pio) ...@@ -72,83 +72,13 @@ static void hpt34x_tune_drive(struct ata_device *drive, u8 pio)
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma) static int hpt34x_udma_setup(struct ata_device *drive, int map)
{ {
int map; #ifdef CONFIG_HPT34X_AUTODMA
u8 mode; return udma_generic_setup(drive, map);
#else
if (drive->type != ATA_DISK)
return 0;
if (udma)
map = XFER_UDMA;
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0; return 0;
return !hpt34x_tune_chipset(drive, mode);
}
static int hpt34x_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x0007) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt34x_tune_chipset(drive, ata_best_pio_mode(drive));
}
#ifndef CONFIG_HPT34X_AUTODMA
if (on)
on = 0;
#endif #endif
udma_enable(drive, on, verbose);
return 0;
} }
static int hpt34x_udma_stop(struct ata_device *drive) static int hpt34x_udma_stop(struct ata_device *drive)
...@@ -173,7 +103,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq) ...@@ -173,7 +103,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
u8 cmd; u8 cmd;
if (!(count = udma_new_table(drive, rq))) if (!(count = udma_new_table(drive, rq)))
return ide_stopped; /* try PIO instead of DMA */ return ATA_OP_FINISHED; /* try PIO instead of DMA */
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
cmd = 0x09; cmd = 0x09;
...@@ -189,7 +119,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq) ...@@ -189,7 +119,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
OUT_BYTE((cmd == 0x09) ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG); OUT_BYTE((cmd == 0x09) ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
} }
return ide_started; return ATA_OP_CONTINUES;
} }
#endif #endif
...@@ -252,24 +182,21 @@ static void __init ide_init_hpt34x(struct ata_channel *hwif) ...@@ -252,24 +182,21 @@ static void __init ide_init_hpt34x(struct ata_channel *hwif)
unsigned short pcicmd = 0; unsigned short pcicmd = 0;
pci_read_config_word(hwif->pci_dev, PCI_COMMAND, &pcicmd); pci_read_config_word(hwif->pci_dev, PCI_COMMAND, &pcicmd);
if (!noautodma) #ifdef CONFIG_IDEDMA_AUTO
hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0; hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
else #endif
hwif->autodma = 0;
hwif->udma_stop = hpt34x_udma_stop; hwif->udma_stop = hpt34x_udma_stop;
hwif->udma_init = hpt34x_udma_init; hwif->udma_init = hpt34x_udma_init;
hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->no_atapi_autodma = 1;
hwif->udma_setup = hpt34x_udma_setup; hwif->udma_setup = hpt34x_udma_setup;
hwif->highmem = 1; hwif->highmem = 1;
} else { } else
#endif
{
hwif->drives[0].autotune = 1; hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1; hwif->drives[1].autotune = 1;
} }
#else
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#endif
} }
...@@ -281,7 +208,7 @@ static struct ata_pci_device chipset __initdata = { ...@@ -281,7 +208,7 @@ static struct ata_pci_device chipset __initdata = {
init_channel: ide_init_hpt34x, init_channel: ide_init_hpt34x,
bootable: NEVER_BOARD, bootable: NEVER_BOARD,
extra: 16, extra: 16,
flags: ATA_F_NOADMA | ATA_F_DMA flags: ATA_F_DMA
}; };
int __init init_hpt34x(void) int __init init_hpt34x(void)
......
...@@ -493,37 +493,23 @@ static unsigned int hpt_revision(struct pci_dev *dev) ...@@ -493,37 +493,23 @@ static unsigned int hpt_revision(struct pci_dev *dev)
return class_rev; return class_rev;
} }
static int hpt3xx_ratemask(struct ata_device *drive) static int __init hpt3xx_modes_map(struct ata_channel *ch)
{ {
u32 rev = hpt_revision(drive->channel->pci_dev); u32 rev = hpt_revision(ch->pci_dev);
int map = XFER_UDMA; int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_UDMA_66;
if (rev >= 8) { /* HPT374 */ if (rev >= 8) { /* HPT374 */
if (HPT374_ALLOW_ATA133_6) if (HPT374_ALLOW_ATA133_6)
map |= XFER_UDMA_133; map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66); map |= XFER_UDMA_100;
} else if (rev >= 5) { /* HPT372 */ } else if (rev >= 5) { /* HPT372 */
if (HPT372_ALLOW_ATA133_6) if (HPT372_ALLOW_ATA133_6)
map |= XFER_UDMA_133; map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66);
} else if (rev >= 4) { /* HPT370A */
if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100; map |= XFER_UDMA_100;
map |= XFER_UDMA_66; } else if (rev >= 3) { /* HPT370A / HPT370 */
} else if (rev >= 3) { /* HPT370 */
if (HPT370_ALLOW_ATA100_5) if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100; map |= XFER_UDMA_100;
map |= XFER_UDMA_66; } /* HPT366 / HPT368 */
if (check_in_drive_lists(drive, bad_ata33))
return 0;
} else { /* HPT366 and HPT368 */
map |= XFER_UDMA_66;
if (check_in_drive_lists(drive, bad_ata33))
return 0;
}
if (!eighty_ninty_three(drive))
return XFER_UDMA;
return map; return map;
} }
...@@ -662,62 +648,42 @@ static int hpt3xx_tune_chipset(struct ata_device *drive, u8 speed) ...@@ -662,62 +648,42 @@ static int hpt3xx_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed); return ide_config_drive_speed(drive, speed);
} }
/* FIXME: pio == 255 -> ata_best_pio_mode(drive) --bkz */
static void hpt3xx_tune_drive(struct ata_device *drive, u8 pio) static void hpt3xx_tune_drive(struct ata_device *drive, u8 pio)
{ {
(void) hpt3xx_tune_chipset(drive, XFER_PIO_0 + min_t(u8, pio, 4)); (void) hpt3xx_tune_chipset(drive, XFER_PIO_0 + min_t(u8, pio, 4));
} }
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive) static int hpt3xx_udma_setup(struct ata_device *drive, int map)
{ {
int map;
u32 rev; u32 rev;
u8 mode;
if (drive->type != ATA_DISK) if (drive->type != ATA_DISK)
return 0; return 0;
rev = hpt_revision(drive->channel->pci_dev); rev = hpt_revision(drive->channel->pci_dev);
/* FIXME: check SWDMA modes --bkz */ /* FIXME: badlists need futher investigation --bkz */
map = hpt3xx_ratemask(drive) | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
/* FIXME: badlists need futher investigation --bkz /* bad_ata100_5 is for HPT370/370A,
bad_ata100_5 is for HPT370/370A, bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368 */
bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368
*/ if (rev < 5 && check_in_drive_lists(drive, bad_ata100_5))
if (mode == XFER_UDMA_5 && rev < 5) {
if (check_in_drive_lists(drive, bad_ata100_5)) {
/* FIXME: make XFER_UDMA_66/100/133
independent of XFER_UDMA --bkz */
map &= ~XFER_UDMA_100; map &= ~XFER_UDMA_100;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map); if (rev < 3) {
} if (check_in_drive_lists(drive, bad_ata66_4))
} map &= ~XFER_UDMA_66_4;
if (mode == XFER_UDMA_4 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_4)) { if (check_in_drive_lists(drive, bad_ata66_3))
if (drive->id->dma_ultra & 0x0008) { map &= ~XFER_UDMA_66_3;
mode = XFER_UDMA_3;
} else { if (check_in_drive_lists(drive, bad_ata33))
map &= ~XFER_UDMA_66; map &= ~XFER_UDMA_ALL;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
}
}
if (mode == XFER_UDMA_3 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_3)) {
map &= ~XFER_UDMA_66;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
} }
if (check_in_drive_lists(drive, bad_ata33) && rev < 3)
mode = ata_timing_mode(drive, XFER_MWDMA);
return !hpt3xx_tune_chipset(drive, mode); return udma_generic_setup(drive, map);
} }
static int hpt3xx_quirkproc(struct ata_device *drive) static int hpt3xx_quirkproc(struct ata_device *drive)
...@@ -754,59 +720,6 @@ static void hpt3xx_maskproc(struct ata_device *drive) ...@@ -754,59 +720,6 @@ static void hpt3xx_maskproc(struct ata_device *drive)
} }
} }
static int hpt3xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if (id->dma_mword & 0x0007) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt3xx_tune_chipset(drive, ata_best_pio_mode(drive));
}
udma_enable(drive, on, verbose);
return 0;
}
static void hpt366_udma_irq_lost(struct ata_device *drive) static void hpt366_udma_irq_lost(struct ata_device *drive)
{ {
struct pci_dev *dev = drive->channel->pci_dev; struct pci_dev *dev = drive->channel->pci_dev;
...@@ -1232,6 +1145,8 @@ static void __init hpt366_init_channel(struct ata_channel *ch) ...@@ -1232,6 +1145,8 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
struct pci_dev *dev = ch->pci_dev; struct pci_dev *dev = ch->pci_dev;
u32 rev = hpt_revision(dev); u32 rev = hpt_revision(dev);
ch->udma_four = hpt366_ata66_check(ch);
ch->tuneproc = hpt3xx_tune_drive; ch->tuneproc = hpt3xx_tune_drive;
ch->speedproc = hpt3xx_tune_chipset; ch->speedproc = hpt3xx_tune_chipset;
ch->quirkproc = hpt3xx_quirkproc; ch->quirkproc = hpt3xx_quirkproc;
...@@ -1272,17 +1187,12 @@ static void __init hpt366_init_channel(struct ata_channel *ch) ...@@ -1272,17 +1187,12 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
// ch->resetproc = hpt3xx_reset; // ch->resetproc = hpt3xx_reset;
// ch->busproc = hpt3xx_tristate; // ch->busproc = hpt3xx_tristate;
} }
ch->modes_map = hpt3xx_modes_map(ch);
ch->udma_setup = hpt3xx_udma_setup; ch->udma_setup = hpt3xx_udma_setup;
if (!noautodma)
ch->autodma = 1;
else
ch->autodma = 0;
ch->highmem = 1; ch->highmem = 1;
} else } else
#endif #endif
{ {
ch->autodma = 0;
ch->drives[0].autotune = 1; ch->drives[0].autotune = 1;
ch->drives[1].autotune = 1; ch->drives[1].autotune = 1;
} }
...@@ -1315,7 +1225,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -1315,7 +1225,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI, vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT366, device: PCI_DEVICE_ID_TTI_HPT366,
init_chipset: hpt366_init_chipset, init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel, init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma, init_dma: hpt366_init_dma,
bootable: OFF_BOARD, bootable: OFF_BOARD,
...@@ -1326,7 +1235,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -1326,7 +1235,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI, vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT372, device: PCI_DEVICE_ID_TTI_HPT372,
init_chipset: hpt366_init_chipset, init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel, init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma, init_dma: hpt366_init_dma,
bootable: OFF_BOARD, bootable: OFF_BOARD,
...@@ -1337,7 +1245,6 @@ static struct ata_pci_device chipsets[] __initdata = { ...@@ -1337,7 +1245,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI, vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT374, device: PCI_DEVICE_ID_TTI_HPT374,
init_chipset: hpt366_init_chipset, init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel, init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma, init_dma: hpt366_init_dma,
bootable: OFF_BOARD, bootable: OFF_BOARD,
......
...@@ -405,7 +405,7 @@ static void icside_dma_enable(struct ata_device *drive, int on, int verbose) ...@@ -405,7 +405,7 @@ static void icside_dma_enable(struct ata_device *drive, int on, int verbose)
#endif #endif
} }
static int icside_dma_check(struct ata_device *drive) static int icside_dma_check(struct ata_device *drive, int map)
{ {
struct hd_driveid *id = drive->id; struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel; struct ata_channel *ch = drive->channel;
...@@ -466,7 +466,7 @@ static ide_startstop_t icside_dmaintr(struct ata_device *drive, struct request * ...@@ -466,7 +466,7 @@ static ide_startstop_t icside_dmaintr(struct ata_device *drive, struct request *
if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) { if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
if (!dma_stat) { if (!dma_stat) {
__ide_end_request(drive, rq, 1, rq->nr_sectors); __ide_end_request(drive, rq, 1, rq->nr_sectors);
return ide_stopped; return ATA_OP_FINISHED;
} }
printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n", printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat); drive->name, dma_stat);
...@@ -516,10 +516,10 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq) ...@@ -516,10 +516,10 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
u8 int cmd; u8 int cmd;
if (icside_dma_common(drive, rq, DMA_MODE_WRITE)) if (icside_dma_common(drive, rq, DMA_MODE_WRITE))
return ide_stopped; return ATA_OP_FINISHED;
if (drive->type != ATA_DISK) if (drive->type != ATA_DISK)
return ide_started; return ATA_OP_CONTINUES;
ata_set_handler(drive, icside_dmaintr, WAIT_CMD, NULL); ata_set_handler(drive, icside_dmaintr, WAIT_CMD, NULL);
...@@ -535,7 +535,7 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq) ...@@ -535,7 +535,7 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
enable_dma(ch->hw.dma); enable_dma(ch->hw.dma);
return ide_started; return ATA_OP_CONTINUES;
} }
static int icside_irq_status(struct ata_device *drive) static int icside_irq_status(struct ata_device *drive)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -257,24 +257,13 @@ static int __init setup_host_channel(struct pci_dev *dev, ...@@ -257,24 +257,13 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->flags & ATA_F_NODMA) if (d->flags & ATA_F_NODMA)
goto no_dma; goto no_dma;
/* Check whatever this interface is UDMA4 mode capable. */ if (ch->udma_four)
if (ch->udma_four) {
printk("%s: warning: ATA-66/100 forced bit set!\n", dev->name); printk("%s: warning: ATA-66/100 forced bit set!\n", dev->name);
} else {
if (d->ata66_check)
ch->udma_four = d->ata66_check(ch);
}
#ifdef CONFIG_BLK_DEV_IDEDMA #ifdef CONFIG_BLK_DEV_IDEDMA
/* /*
* Setup DMA transfers on the channel. * Setup DMA transfers on the channel.
*/ */
if (d->flags & ATA_F_NOADMA)
autodma = 0;
if (autodma)
ch->autodma = 1;
if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80)))) if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80))))
goto no_dma; goto no_dma;
/* /*
...@@ -324,6 +313,10 @@ static int __init setup_host_channel(struct pci_dev *dev, ...@@ -324,6 +313,10 @@ static int __init setup_host_channel(struct pci_dev *dev,
* already enabled by the primary channel run. * already enabled by the primary channel run.
*/ */
pci_set_master(dev); pci_set_master(dev);
if (autodma)
ch->autodma = 1;
if (d->init_dma) if (d->init_dma)
d->init_dma(ch, dma_base); d->init_dma(ch, dma_base);
else else
...@@ -335,6 +328,11 @@ static int __init setup_host_channel(struct pci_dev *dev, ...@@ -335,6 +328,11 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->init_channel) if (d->init_channel)
d->init_channel(ch); d->init_channel(ch);
#ifdef CONFIG_BLK_DEV_IDEDMA
if ((d->flags & ATA_F_NOADMA) || noautodma)
ch->autodma = 0;
#endif
return 0; return 0;
} }
......
...@@ -256,11 +256,11 @@ struct { ...@@ -256,11 +256,11 @@ struct {
static void pmac_ide_setup_dma(struct device_node *np, int ix); static void pmac_ide_setup_dma(struct device_node *np, int ix);
static void pmac_udma_enable(struct ata_device *drive, int on, int verbose); static void pmac_udma_enable(struct ata_device *drive, int on, int verbose);
static int pmac_udma_start(struct ata_device *drive, struct request *rq); static void pmac_udma_start(struct ata_device *drive, struct request *rq);
static int pmac_udma_stop(struct ata_device *drive); static int pmac_udma_stop(struct ata_device *drive);
static int pmac_udma_init(struct ata_device *drive, struct request *rq); static int pmac_udma_init(struct ata_device *drive, struct request *rq);
static int pmac_udma_irq_status(struct ata_device *drive); static int pmac_udma_irq_status(struct ata_device *drive);
static int pmac_udma_setup(struct ata_device *drive); static int pmac_udma_setup(struct ata_device *drive, int map);
static int pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr); static int pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr);
static int pmac_ide_tune_chipset(struct ata_device *drive, byte speed); static int pmac_ide_tune_chipset(struct ata_device *drive, byte speed);
static void pmac_ide_tuneproc(struct ata_device *drive, byte pio); static void pmac_ide_tuneproc(struct ata_device *drive, byte pio);
...@@ -1340,7 +1340,7 @@ static void pmac_udma_enable(struct ata_device *drive, int on, int verbose) ...@@ -1340,7 +1340,7 @@ static void pmac_udma_enable(struct ata_device *drive, int on, int verbose)
ide_toggle_bounce(drive, 0); ide_toggle_bounce(drive, 0);
} }
static int pmac_udma_start(struct ata_device *drive, struct request *rq) static void pmac_udma_start(struct ata_device *drive, struct request *rq)
{ {
int ix, ata4; int ix, ata4;
volatile struct dbdma_regs *dma; volatile struct dbdma_regs *dma;
...@@ -1350,7 +1350,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq) ...@@ -1350,7 +1350,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
*/ */
ix = pmac_ide_find(drive); ix = pmac_ide_find(drive);
if (ix < 0) if (ix < 0)
return ide_stopped; return;
dma = pmac_ide[ix].dma_regs; dma = pmac_ide[ix].dma_regs;
ata4 = (pmac_ide[ix].kind == controller_kl_ata4 || ata4 = (pmac_ide[ix].kind == controller_kl_ata4 ||
...@@ -1360,7 +1360,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq) ...@@ -1360,7 +1360,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
/* Make sure it gets to the controller right now */ /* Make sure it gets to the controller right now */
(void)in_le32(&dma->control); (void)in_le32(&dma->control);
return ide_started; return;
} }
static int pmac_udma_stop(struct ata_device *drive) static int pmac_udma_stop(struct ata_device *drive)
...@@ -1397,7 +1397,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq) ...@@ -1397,7 +1397,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
*/ */
ix = pmac_ide_find(drive); ix = pmac_ide_find(drive);
if (ix < 0) if (ix < 0)
return ide_stopped; return ATA_OP_FINISHED;
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
reading = 1; reading = 1;
...@@ -1409,7 +1409,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq) ...@@ -1409,7 +1409,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
pmac_ide[ix].kind == controller_kl_ata4_80); pmac_ide[ix].kind == controller_kl_ata4_80);
if (!pmac_ide_build_dmatable(drive, rq, ix, !reading)) if (!pmac_ide_build_dmatable(drive, rq, ix, !reading))
return ide_stopped; return ATA_OP_FINISHED;
/* Apple adds 60ns to wrDataSetup on reads */ /* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) { if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) {
out_le32((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG + _IO_BASE), out_le32((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG + _IO_BASE),
...@@ -1419,7 +1419,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq) ...@@ -1419,7 +1419,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
} }
if (drive->type != ATA_DISK) if (drive->type != ATA_DISK)
return ide_started; return ATA_OP_CONTINUES;
ata_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL); ata_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL);
if ((rq->flags & REQ_SPECIAL) && if ((rq->flags & REQ_SPECIAL) &&
...@@ -1435,7 +1435,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq) ...@@ -1435,7 +1435,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
udma_start(drive, rq); udma_start(drive, rq);
return ide_started; return ATA_OP_CONTINUES;
} }
/* /*
...@@ -1491,14 +1491,14 @@ static int pmac_udma_irq_status(struct ata_device *drive) ...@@ -1491,14 +1491,14 @@ static int pmac_udma_irq_status(struct ata_device *drive)
set_bit(IDE_DMA, drive->channel->active); set_bit(IDE_DMA, drive->channel->active);
// if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { // if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
// printk(KERN_WARNING "ide%d, timeout waiting \ // printk(KERN_WARNING "ide%d, timeout waiting \
for dbdma command stop\n", ix); // for dbdma command stop\n", ix);
return 1; // return 1;
} // }
udelay(1); udelay(1);
return 0; return 0;
} }
static int pmac_udma_setup(struct ata_device *drive) static int pmac_udma_setup(struct ata_device *drive, int map)
{ {
/* Change this to better match ide-dma.c */ /* Change this to better match ide-dma.c */
pmac_ide_check_dma(drive); pmac_ide_check_dma(drive);
......
This diff is collapsed.
...@@ -197,13 +197,14 @@ int drive_is_ready(struct ata_device *drive) ...@@ -197,13 +197,14 @@ int drive_is_ready(struct ata_device *drive)
int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action) int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action)
{ {
unsigned long flags; unsigned long flags;
unsigned int major = drive->channel->major; struct ata_channel *ch = drive->channel;
unsigned int major = ch->major;
request_queue_t *q = &drive->queue; request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head; struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
#ifdef CONFIG_BLK_DEV_PDC4030 #ifdef CONFIG_BLK_DEV_PDC4030
if (drive->channel->chipset == ide_pdc4030 && rq->buffer != NULL) if (ch->chipset == ide_pdc4030 && rq->buffer)
return -ENOSYS; /* special drive cmds not supported */ return -ENOSYS; /* special drive cmds not supported */
#endif #endif
rq->errors = 0; rq->errors = 0;
...@@ -212,22 +213,18 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t ...@@ -212,22 +213,18 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
if (action == ide_wait) if (action == ide_wait)
rq->waiting = &wait; rq->waiting = &wait;
spin_lock_irqsave(drive->channel->lock, flags); spin_lock_irqsave(ch->lock, flags);
if (blk_queue_empty(&drive->queue) || action == ide_preempt) {
if (action == ide_preempt) if (action == ide_preempt)
drive->rq = NULL; drive->rq = NULL;
} else { else if (!blk_queue_empty(&drive->queue))
if (action == ide_wait) queue_head = queue_head->prev; /* ide_end and ide_wait */
queue_head = queue_head->prev;
else __elv_add_request(q, rq, queue_head);
queue_head = queue_head->next;
}
q->elevator.elevator_add_req_fn(q, rq, queue_head);
do_ide_request(q); do_ide_request(q);
spin_unlock_irqrestore(drive->channel->lock, flags); spin_unlock_irqrestore(ch->lock, flags);
if (action == ide_wait) { if (action == ide_wait) {
wait_for_completion(&wait); /* wait for it to be serviced */ wait_for_completion(&wait); /* wait for it to be serviced */
...@@ -235,23 +232,20 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t ...@@ -235,23 +232,20 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
} }
return 0; return 0;
} }
/* /*
* Invoked on completion of a special REQ_SPECIAL command. * Invoked on completion of a special REQ_SPECIAL command.
*/ */
ide_startstop_t ata_special_intr(struct ata_device *drive, struct static ide_startstop_t special_intr(struct ata_device *drive, struct
request *rq) { request *rq) {
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ide_stopped;
unsigned long flags; unsigned long flags;
struct ata_channel *ch =drive->channel;
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ATA_OP_FINISHED;
ide__sti(); /* local CPU only */ ide__sti();
spin_lock_irqsave(drive->channel->lock, flags);
if (rq->buffer && ar->taskfile.sector_number) { if (rq->buffer && ar->taskfile.sector_number) {
if (!ata_status(drive, 0, DRQ_STAT) && ar->taskfile.sector_number) { if (!ata_status(drive, 0, DRQ_STAT) && ar->taskfile.sector_number) {
...@@ -283,24 +277,27 @@ ide_startstop_t ata_special_intr(struct ata_device *drive, struct ...@@ -283,24 +277,27 @@ ide_startstop_t ata_special_intr(struct ata_device *drive, struct
ata_in_regfile(drive, &ar->hobfile); ata_in_regfile(drive, &ar->hobfile);
} }
spin_lock_irqsave(ch->lock, flags);
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
drive->rq = NULL; drive->rq = NULL;
end_that_request_last(rq); end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags); spin_unlock_irqrestore(ch->lock, flags);
return ret; return ret;
} }
int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar) int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *buf)
{ {
struct request req; struct request req;
ar->command_type = IDE_DRIVE_TASK_NO_DATA; ar->command_type = IDE_DRIVE_TASK_NO_DATA;
ar->XXX_handler = ata_special_intr; ar->XXX_handler = special_intr;
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL; req.flags = REQ_SPECIAL;
req.buffer = buf;
req.special = ar; req.special = ar;
return ide_do_drive_cmd(drive, &req, ide_wait); return ide_do_drive_cmd(drive, &req, ide_wait);
...@@ -310,5 +307,4 @@ EXPORT_SYMBOL(drive_is_ready); ...@@ -310,5 +307,4 @@ EXPORT_SYMBOL(drive_is_ready);
EXPORT_SYMBOL(ide_do_drive_cmd); EXPORT_SYMBOL(ide_do_drive_cmd);
EXPORT_SYMBOL(ata_read); EXPORT_SYMBOL(ata_read);
EXPORT_SYMBOL(ata_write); EXPORT_SYMBOL(ata_write);
EXPORT_SYMBOL(ata_special_intr);
EXPORT_SYMBOL(ide_raw_taskfile); EXPORT_SYMBOL(ide_raw_taskfile);
This diff is collapsed.
...@@ -47,7 +47,6 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg) ...@@ -47,7 +47,6 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
u8 *argbuf = vals; u8 *argbuf = vals;
int argsize = 4; int argsize = 4;
struct ata_taskfile args; struct ata_taskfile args;
struct request req;
/* Second phase. /* Second phase.
*/ */
...@@ -80,15 +79,7 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg) ...@@ -80,15 +79,7 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
/* Issue ATA command and wait for completion. /* Issue ATA command and wait for completion.
*/ */
args.command_type = IDE_DRIVE_TASK_NO_DATA; err = ide_raw_taskfile(drive, &args, argbuf + 4);
args.XXX_handler = ata_special_intr;
memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL;
req.special = &args;
req.buffer = argbuf + 4;
err = ide_do_drive_cmd(drive, &req, ide_wait);
argbuf[0] = drive->status; argbuf[0] = drive->status;
argbuf[1] = args.taskfile.feature; argbuf[1] = args.taskfile.feature;
...@@ -131,9 +122,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -131,9 +122,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_32BIT: { case HDIO_GET_32BIT: {
unsigned long val = drive->channel->io_32bit; unsigned long val = drive->channel->io_32bit;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg)) if (put_user(val, (unsigned long *) arg))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -181,9 +169,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -181,9 +169,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_UNMASKINTR: { case HDIO_GET_UNMASKINTR: {
unsigned long val = drive->channel->unmask; unsigned long val = drive->channel->unmask;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg)) if (put_user(val, (unsigned long *) arg))
return -EFAULT; return -EFAULT;
...@@ -211,9 +196,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -211,9 +196,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_DMA: { case HDIO_GET_DMA: {
unsigned long val = drive->using_dma; unsigned long val = drive->using_dma;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg)) if (put_user(val, (unsigned long *) arg))
return -EFAULT; return -EFAULT;
...@@ -245,9 +227,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -245,9 +227,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
struct hd_geometry *loc = (struct hd_geometry *) arg; struct hd_geometry *loc = (struct hd_geometry *) arg;
unsigned short bios_cyl = drive->bios_cyl; /* truncate */ unsigned short bios_cyl = drive->bios_cyl; /* truncate */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY)) if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL; return -EINVAL;
...@@ -270,9 +249,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -270,9 +249,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GETGEO_BIG_RAW: { case HDIO_GETGEO_BIG_RAW: {
struct hd_big_geometry *loc = (struct hd_big_geometry *) arg; struct hd_big_geometry *loc = (struct hd_big_geometry *) arg;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY)) if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL; return -EINVAL;
...@@ -293,8 +269,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -293,8 +269,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
} }
case HDIO_GET_IDENTITY: case HDIO_GET_IDENTITY:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (minor(inode->i_rdev) & PARTN_MASK) if (minor(inode->i_rdev) & PARTN_MASK)
return -EINVAL; return -EINVAL;
...@@ -308,8 +282,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -308,8 +282,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0; return 0;
case HDIO_GET_NICE: case HDIO_GET_NICE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return put_user(drive->dsc_overlap << IDE_NICE_DSC_OVERLAP | return put_user(drive->dsc_overlap << IDE_NICE_DSC_OVERLAP |
drive->atapi_overlap << IDE_NICE_ATAPI_OVERLAP, drive->atapi_overlap << IDE_NICE_ATAPI_OVERLAP,
...@@ -332,8 +304,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned ...@@ -332,8 +304,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0; return 0;
case HDIO_GET_BUSSTATE: case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(drive->channel->bus_state, (long *)arg)) if (put_user(drive->channel->bus_state, (long *)arg))
return -EFAULT; return -EFAULT;
......
...@@ -179,14 +179,6 @@ static int it8172_tune_chipset(struct ata_device *drive, u8 speed) ...@@ -179,14 +179,6 @@ static int it8172_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed); return ide_config_drive_speed(drive, speed);
} }
static int it8172_udma_setup(struct ata_device *drive)
{
u8 speed = ata_timing_mode(drive, XFER_PIO | XFER_EPIO |
XFER_SWDMA | XFER_MWDMA | XFER_UDMA);
return !it8172_tune_chipset(drive, speed);
}
#endif /* defined(CONFIG_BLK_DEV_IDEDMA) && (CONFIG_IT8172_TUNING) */ #endif /* defined(CONFIG_BLK_DEV_IDEDMA) && (CONFIG_IT8172_TUNING) */
...@@ -216,15 +208,11 @@ static void __init ide_init_it8172(struct ata_channel *hwif) ...@@ -216,15 +208,11 @@ static void __init ide_init_it8172(struct ata_channel *hwif)
if (!hwif->dma_base) if (!hwif->dma_base)
return; return;
#ifndef CONFIG_BLK_DEV_IDEDMA
hwif->autodma = 0;
#else /* CONFIG_BLK_DEV_IDEDMA */
# ifdef CONFIG_IT8172_TUNING # ifdef CONFIG_IT8172_TUNING
hwif->autodma = 1; hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->dmaproc = &it8172_dmaproc; hwif->udma_setup = udma_generic_setup;
hwif->speedproc = &it8172_tune_chipset; hwif->speedproc = &it8172_tune_chipset;
# endif # endif
#endif
cmdBase = dev->resource[0].start; cmdBase = dev->resource[0].start;
ctrlBase = dev->resource[1].start; ctrlBase = dev->resource[1].start;
......
...@@ -1074,7 +1074,8 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv ...@@ -1074,7 +1074,8 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
/* Default autotune or requested autotune */ /* Default autotune or requested autotune */
if (drive->autotune != 2) { if (drive->autotune != 2) {
if (drive->channel->udma_setup) { struct ata_channel *ch = drive->channel;
if (ch->udma_setup) {
/* /*
* Force DMAing for the beginning of the check. Some * Force DMAing for the beginning of the check. Some
...@@ -1085,7 +1086,7 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv ...@@ -1085,7 +1086,7 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
*/ */
udma_enable(drive, 0, 0); udma_enable(drive, 0, 0);
drive->channel->udma_setup(drive); ch->udma_setup(drive, ch->modes_map);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT #ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
udma_tcq_enable(drive, 1); udma_tcq_enable(drive, 1);
#endif #endif
......
...@@ -105,21 +105,21 @@ static int ns87415_udma_init(struct ata_device *drive, struct request *rq) ...@@ -105,21 +105,21 @@ static int ns87415_udma_init(struct ata_device *drive, struct request *rq)
ns87415_prepare_drive(drive, 1); /* select DMA xfer */ ns87415_prepare_drive(drive, 1); /* select DMA xfer */
if (udma_pci_init(drive, rq)) /* use standard DMA stuff */ if (udma_pci_init(drive, rq)) /* use standard DMA stuff */
return ide_started; return ATA_OP_CONTINUES;
ns87415_prepare_drive(drive, 0); /* DMA failed: select PIO xfer */ ns87415_prepare_drive(drive, 0); /* DMA failed: select PIO xfer */
return ide_stopped; return ATA_OP_FINISHED;
} }
static int ns87415_udma_setup(struct ata_device *drive) static int ns87415_udma_setup(struct ata_device *drive, int map)
{ {
if (drive->type != ATA_DISK) { if (drive->type != ATA_DISK) {
udma_enable(drive, 0, 0); udma_enable(drive, 0, 0);
return 0; return 0;
} }
return udma_pci_setup(drive); return udma_pci_setup(drive, map);
} }
#endif #endif
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <linux/ide.h> #include <linux/ide.h>
#include <linux/delay.h> #include <linux/delay.h>
#include "ata-timing.h"
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -44,18 +46,9 @@ ide_startstop_t ide_dma_intr(struct ata_device *drive, struct request *rq) ...@@ -44,18 +46,9 @@ ide_startstop_t ide_dma_intr(struct ata_device *drive, struct request *rq)
if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) { if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
if (!dma_stat) { if (!dma_stat) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, 1, rq->nr_sectors); __ata_end_request(drive, rq, 1, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped; return ATA_OP_FINISHED;
} }
printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat); drive->name, dma_stat);
...@@ -128,7 +121,7 @@ static int build_sglist(struct ata_device *drive, struct request *rq) ...@@ -128,7 +121,7 @@ static int build_sglist(struct ata_device *drive, struct request *rq)
/* /*
* 1 dma-ing, 2 error, 4 intr * 1 dma-ing, 2 error, 4 intr
*/ */
static int dma_timer_expiry(struct ata_device *drive, struct request *rq) static ide_startstop_t dma_timer_expiry(struct ata_device *drive, struct request *rq, unsigned long *wait)
{ {
/* FIXME: What's that? */ /* FIXME: What's that? */
u8 dma_stat = inb(drive->channel->dma_base + 2); u8 dma_stat = inb(drive->channel->dma_base + 2);
...@@ -140,15 +133,17 @@ static int dma_timer_expiry(struct ata_device *drive, struct request *rq) ...@@ -140,15 +133,17 @@ static int dma_timer_expiry(struct ata_device *drive, struct request *rq)
#if 0 #if 0
drive->expiry = NULL; /* one free ride for now */ drive->expiry = NULL; /* one free ride for now */
#endif #endif
*wait = 0;
if (dma_stat & 2) { /* ERROR */ if (dma_stat & 2) { /* ERROR */
ata_status(drive, 0, 0); ata_status(drive, 0, 0);
return ata_error(drive, rq, __FUNCTION__); return ata_error(drive, rq, __FUNCTION__);
} }
if (dma_stat & 1) /* DMAing */ if (dma_stat & 1) { /* DMAing */
return WAIT_CMD; *wait = WAIT_CMD;
return ATA_OP_CONTINUES;
}
return 0; return ATA_OP_FINISHED;
} }
int ata_start_dma(struct ata_device *drive, struct request *rq) int ata_start_dma(struct ata_device *drive, struct request *rq)
...@@ -171,10 +166,73 @@ int ata_start_dma(struct ata_device *drive, struct request *rq) ...@@ -171,10 +166,73 @@ int ata_start_dma(struct ata_device *drive, struct request *rq)
return 0; return 0;
} }
/* generic udma_setup() function for drivers having ->speedproc/tuneproc */
int udma_generic_setup(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel;
int on = 0;
u8 mode;
if (!id || (drive->type != ATA_DISK && ch->no_atapi_autodma))
return 0;
if ((map & XFER_UDMA_80W) && !eighty_ninty_three(drive))
map &= ~XFER_UDMA_80W;
if ((id->capability & 1) && ch->autodma && ch->speedproc) {
/* Consult the list of known "bad" devices. */
if (udma_black_list(drive))
goto set_dma;
mode = ata_timing_mode(drive, map);
/* Device is UltraDMA capable. */
if (mode & XFER_UDMA) {
if((on = !ch->speedproc(drive, mode)))
goto set_dma;
printk(KERN_WARNING "%s: UDMA auto-tune failed.\n", drive->name);
map &= ~XFER_UDMA_ALL;
mode = ata_timing_mode(drive, map);
}
/* Device is regular DMA capable. */
if (mode & (XFER_SWDMA | XFER_MWDMA)) {
if((on = !ch->speedproc(drive, mode)))
goto set_dma;
printk(KERN_WARNING "%s: DMA auto-tune failed.\n", drive->name);
}
/* FIXME: this seems non-functional --bkz */
/* Consult the list of known "good" devices. */
if (udma_white_list(drive)) {
if (id->eide_dma_time > 150)
goto set_dma;
printk(KERN_INFO "%s: device is on DMA whitelist.\n", drive->name);
// on = 1;
}
/* Revert to PIO. */
if (!on && ch->tuneproc)
ch->tuneproc(drive, 255);
}
set_dma:
udma_enable(drive, on, !on);
return 0;
}
/* /*
* Configure a device for DMA operation. * Configure a device for DMA operation.
*/ */
int udma_pci_setup(struct ata_device *drive) int udma_pci_setup(struct ata_device *drive, int map)
{ {
int config_allows_dma = 1; int config_allows_dma = 1;
struct hd_driveid *id = drive->id; struct hd_driveid *id = drive->id;
...@@ -399,8 +457,6 @@ int udma_new_table(struct ata_device *drive, struct request *rq) ...@@ -399,8 +457,6 @@ int udma_new_table(struct ata_device *drive, struct request *rq)
/* /*
* Teardown mappings after DMA has completed. * Teardown mappings after DMA has completed.
*
* Channel lock should be held.
*/ */
void udma_destroy_table(struct ata_channel *ch) void udma_destroy_table(struct ata_channel *ch)
{ {
...@@ -411,8 +467,6 @@ void udma_destroy_table(struct ata_channel *ch) ...@@ -411,8 +467,6 @@ void udma_destroy_table(struct ata_channel *ch)
* Prepare the channel for a DMA startfer. Please note that only the broken * Prepare the channel for a DMA startfer. Please note that only the broken
* Pacific Digital host chip needs the reques to be passed there to decide * Pacific Digital host chip needs the reques to be passed there to decide
* about addressing modes. * about addressing modes.
*
* Channel lock should be held.
*/ */
void udma_pci_start(struct ata_device *drive, struct request *rq) void udma_pci_start(struct ata_device *drive, struct request *rq)
{ {
...@@ -426,9 +480,6 @@ void udma_pci_start(struct ata_device *drive, struct request *rq) ...@@ -426,9 +480,6 @@ void udma_pci_start(struct ata_device *drive, struct request *rq)
outb(inb(dma_base) | 1, dma_base); /* start DMA */ outb(inb(dma_base) | 1, dma_base); /* start DMA */
} }
/*
* Channel lock should be held.
*/
int udma_pci_stop(struct ata_device *drive) int udma_pci_stop(struct ata_device *drive)
{ {
struct ata_channel *ch = drive->channel; struct ata_channel *ch = drive->channel;
...@@ -445,8 +496,6 @@ int udma_pci_stop(struct ata_device *drive) ...@@ -445,8 +496,6 @@ int udma_pci_stop(struct ata_device *drive)
/* /*
* FIXME: This should be attached to a channel as we can see now! * FIXME: This should be attached to a channel as we can see now!
*
* Channel lock should be held.
*/ */
int udma_pci_irq_status(struct ata_device *drive) int udma_pci_irq_status(struct ata_device *drive)
{ {
...@@ -533,19 +582,17 @@ void ata_init_dma(struct ata_channel *ch, unsigned long dma_base) ...@@ -533,19 +582,17 @@ void ata_init_dma(struct ata_channel *ch, unsigned long dma_base)
* *
* It's exported only for host chips which use it for fallback or (too) late * It's exported only for host chips which use it for fallback or (too) late
* capability checking. * capability checking.
*
* Channel lock should be held.
*/ */
int udma_pci_init(struct ata_device *drive, struct request *rq) int udma_pci_init(struct ata_device *drive, struct request *rq)
{ {
u8 cmd; u8 cmd;
if (ata_start_dma(drive, rq)) if (ata_start_dma(drive, rq))
return ide_stopped; return ATA_OP_FINISHED;
/* No DMA transfers on ATAPI devices. */ /* No DMA transfers on ATAPI devices. */
if (drive->type != ATA_DISK) if (drive->type != ATA_DISK)
return ide_started; return ATA_OP_CONTINUES;
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
cmd = 0x08; cmd = 0x08;
...@@ -560,7 +607,7 @@ int udma_pci_init(struct ata_device *drive, struct request *rq) ...@@ -560,7 +607,7 @@ int udma_pci_init(struct ata_device *drive, struct request *rq)
udma_start(drive, rq); udma_start(drive, rq);
return ide_started; return ATA_OP_CONTINUES;
} }
EXPORT_SYMBOL(ide_dma_intr); EXPORT_SYMBOL(ide_dma_intr);
......
...@@ -117,7 +117,6 @@ struct ata_pci_device { ...@@ -117,7 +117,6 @@ struct ata_pci_device {
unsigned short vendor; unsigned short vendor;
unsigned short device; unsigned short device;
unsigned int (*init_chipset)(struct pci_dev *); unsigned int (*init_chipset)(struct pci_dev *);
unsigned int (*ata66_check)(struct ata_channel *);
void (*init_channel)(struct ata_channel *); void (*init_channel)(struct ata_channel *);
void (*init_dma)(struct ata_channel *, unsigned long); void (*init_dma)(struct ata_channel *, unsigned long);
ide_pci_enablebit_t enablebits[2]; ide_pci_enablebit_t enablebits[2];
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -312,8 +312,7 @@ byte eighty_ninty_three(struct ata_device *drive) ...@@ -312,8 +312,7 @@ byte eighty_ninty_three(struct ata_device *drive)
int ide_config_drive_speed(struct ata_device *drive, byte speed) int ide_config_drive_speed(struct ata_device *drive, byte speed)
{ {
struct ata_channel *ch = drive->channel; struct ata_channel *ch = drive->channel;
int i; int ret;
int error = 1;
#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(__CRIS__) #if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(__CRIS__)
u8 unit = (drive->select.b.unit & 0x01); u8 unit = (drive->select.b.unit & 0x01);
...@@ -338,33 +337,14 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed) ...@@ -338,33 +337,14 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed)
if (drive->quirk_list == 2) if (drive->quirk_list == 2)
ata_irq_enable(drive, 1); ata_irq_enable(drive, 1);
udelay(1); udelay(1);
ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_CMD, NULL);
/* FIXME: use ata_status_poll() --bkz */
ata_busy_poll(drive, WAIT_CMD);
/*
* Allow status to settle, then read it again.
* A few rare drives vastly violate the 400ns spec here,
* so we'll wait up to 10usec for a "good" status
* rather than expensively fail things immediately.
* This fix courtesy of Matthew Faupel & Niccolo Rigacci.
*/
for (i = 0; i < 10; i++) {
udelay(1);
if (ata_status(drive, DRIVE_READY, BUSY_STAT | DRQ_STAT | ERR_STAT)) {
error = 0;
break;
}
}
ata_mask(drive); ata_mask(drive);
enable_irq(ch->irq); enable_irq(ch->irq);
if (error) { if (ret != ATA_OP_READY) {
ata_dump(drive, NULL, "set drive speed"); ata_dump(drive, NULL, "set drive speed");
return error; return 1;
} }
drive->id->dma_ultra &= ~0xFF00; drive->id->dma_ultra &= ~0xFF00;
...@@ -399,7 +379,7 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed) ...@@ -399,7 +379,7 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed)
drive->current_speed = speed; drive->current_speed = speed;
return error; return 0;
} }
static inline void do_identify(struct ata_device *drive, u8 cmd) static inline void do_identify(struct ata_device *drive, u8 cmd)
......
This diff is collapsed.
This diff is collapsed.
...@@ -130,7 +130,7 @@ static int config_for_dma(struct ata_device *drive) ...@@ -130,7 +130,7 @@ static int config_for_dma(struct ata_device *drive)
* Check to see if the drive and * Check to see if the drive and
* chipset is capable of DMA mode * chipset is capable of DMA mode
*/ */
static int sl82c105_dma_setup(struct ata_device *drive) static int sl82c105_dma_setup(struct ata_device *drive, int map)
{ {
int on = 0; int on = 0;
...@@ -333,7 +333,6 @@ static void __init sl82c105_init_dma(struct ata_channel *ch, unsigned long dma_b ...@@ -333,7 +333,6 @@ static void __init sl82c105_init_dma(struct ata_channel *ch, unsigned long dma_b
dma_state &= ~0x60; dma_state &= ~0x60;
} else { } else {
dma_state |= 0x60; dma_state |= 0x60;
ch->autodma = 1;
} }
outb(dma_state, dma_base + 2); outb(dma_state, dma_base + 2);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -283,7 +283,9 @@ extern void generic_make_request(struct bio *bio); ...@@ -283,7 +283,9 @@ extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *bdev_get_queue(struct block_device *bdev); extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *); extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int); extern struct request *blk_get_request(request_queue_t *, int, int);
extern struct request *__blk_get_request(request_queue_t *, int);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *); extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *); extern int blk_remove_plug(request_queue_t *);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment