Commit 2dbd1502 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] IDE 98

Synchronize with 2.5.25.

Incorporate IDE-94, as well as 95, 96, 97 and 98-pre as announced by Bartek and
unfortunately still not included in 2.5.25, which makes admittedly things
still fall appart:

Missing changelog for 98-pre by Bartlomiej Zolnierkiewicz (BTW.  Handling
Unicode should be essential at least to make proper crediting of many many
peoples possible!) follows here:

 - add missing channel->lock unlocking/locking and fix some comments
   in ide_timer_expiry()

 - allow PCI drivers to disable autodma in ->init_dma()
   (bug introduced in IDE 97, affects sl82c105.c only)

   noticed by Russell King

 - alim15x3.c, if revision is <= 0x20 disable autodma

 - remove unneeded checks (drive.dn > 3) from pdc202xx.c and sis5513.c

 - use block layer wrappers

And my additions follow:

 - Fix TCQ code. Patch based on work by Alexander Atanasov.

 - Use the FreeBSD derived request handler return values:

	ATA_OP_FINISHED
	ATA_OP_CONTINUES
	ATA_OP_RELEASED
	ATA_OP_READY	/* for status ready reporting during poll */

 - PMAC compilation fix by Paul Mackerras.

 - Simplify the ata_status_poll function significantly.

 - Fix logic used to prevent drive IRQ assertion from drive on channels sharing
   our interrupt.

NOTE: We will move it later to the time where a request is really finished
soon.

 - Don't use ata_busy_poll() use ata_status_poll() instead. This increases code
   unification.

NOTE: We should maybe invent some way to prevent the error recovery path to be
taken at all. In esp to prevent ata_error from trying to reissue commands.
parent 5b2a1577
......@@ -703,11 +703,11 @@ static ide_startstop_t etrax_dma_intr(struct ata_device *drive, struct request *
i -= rq->current_nr_sectors;
ide_end_request(drive, rq, 1);
}
return ide_stopped;
return ATA_OP_FINISHED;
}
printk("%s: bad DMA status\n", drive->name);
}
return ata_error(drive, __FUNCTION__);
return ata_error(drive, rq, __FUNCTION__);
}
/*
......
......@@ -1202,6 +1202,26 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
return rq;
}
/*
* Non-locking blk_get_request variant, for special requests from drivers.
*/
struct request *__blk_get_request(request_queue_t *q, int rw)
{
struct request *rq;
BUG_ON(rw != READ && rw != WRITE);
rq = get_request(q, rw);
if (rq) {
rq->flags = 0;
rq->buffer = NULL;
rq->bio = rq->biotail = NULL;
rq->waiting = NULL;
}
return rq;
}
void blk_put_request(struct request *rq)
{
blkdev_release_request(rq);
......@@ -1381,6 +1401,14 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Non-locking blk_attempt_remerge variant.
*/
void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
{
attempt_back_merge(q, rq);
}
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
......@@ -2039,6 +2067,7 @@ EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_plug_device);
EXPORT_SYMBOL(blk_remove_plug);
EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(__blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_pfn);
EXPORT_SYMBOL(blk_queue_max_sectors);
......@@ -2055,6 +2084,7 @@ EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request);
EXPORT_SYMBOL(__blk_get_request);
EXPORT_SYMBOL(blk_put_request);
EXPORT_SYMBOL(blk_queue_prep_rq);
......
......@@ -160,16 +160,15 @@ static void aec62xx_tune_drive(struct ata_device *drive, unsigned char pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int aec62xx_udma_setup(struct ata_device *drive)
static int __init aec62xx_modes_map(struct ata_channel *ch)
{
u32 bmide = pci_resource_start(drive->channel->pci_dev, 4);
short speed;
u32 bmide = pci_resource_start(ch->pci_dev, 4);
int map;
map = XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_SWDMA | XFER_UDMA;
map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
if (drive->channel->udma_four)
switch (drive->channel->pci_dev->device) {
if (ch->udma_four)
switch (ch->pci_dev->device) {
case PCI_DEVICE_ID_ARTOP_ATP865R:
case PCI_DEVICE_ID_ARTOP_ATP865:
/* Can't use these modes simultaneously,
......@@ -180,11 +179,7 @@ static int aec62xx_udma_setup(struct ata_device *drive)
map |= XFER_UDMA_66;
}
speed = ata_timing_mode(drive, map);
aec_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
return map;
}
#endif
......@@ -256,11 +251,12 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
ch->tuneproc = aec62xx_tune_drive;
ch->speedproc = aec_set_drive;
ch->autodma = 0;
ch->io_32bit = 1;
ch->unmask = 1;
ch->udma_four = aec62xx_ata66_check(ch);
for (i = 0; i < 2; i++) {
ch->drives[i].autotune = 1;
ch->drives[i].dn = ch->unit * 2 + i;
......@@ -269,11 +265,8 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (ch->dma_base) {
ch->highmem = 1;
ch->udma_setup = aec62xx_udma_setup;
#ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
ch->autodma = 1;
#endif
ch->modes_map = aec62xx_modes_map(ch);
ch->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -306,17 +299,15 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD,
flags: ATA_F_IRQ | ATA_F_NOADMA | ATA_F_DMA
flags: ATA_F_IRQ | ATA_F_DMA
},
{
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860R,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD,
......@@ -326,7 +317,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD,
......@@ -336,7 +326,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865R,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD,
......
......@@ -99,43 +99,6 @@ static void ali15x3_tune_drive(struct ata_device *drive, byte pio)
__restore_flags(flags);
}
static byte ali15x3_can_ultra(struct ata_device *drive)
{
if (m5229_revision <= 0x20) {
return 0;
} else if ((m5229_revision < 0xC2) &&
#ifndef CONFIG_WDC_ALI15X3
((chip_is_1543c_e && strstr(drive->id->model, "WDC ")) ||
(drive->type != ATA_DISK))) {
#else
(drive->type != ATA_DISK)) {
#endif
return 0;
} else {
return 1;
}
}
static int ali15x3_ratemask(struct ata_device *drive)
{
int map = 0;
if (!ali15x3_can_ultra(drive))
return 0;
map |= XFER_UDMA;
if (!eighty_ninty_three(drive))
return map;
if (m5229_revision >= 0xC4)
map |= XFER_UDMA_100;
if (m5229_revision >= 0xC2)
map |= XFER_UDMA_66;
return map;
}
static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
{
struct pci_dev *dev = drive->channel->pci_dev;
......@@ -156,6 +119,7 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
if (speed < XFER_SW_DMA_0)
ali15x3_tune_drive(drive, speed);
#ifdef CONFIG_BLK_DEV_IDEDMA
/* FIXME: no support for MWDMA and SWDMA modes --bkz */
else if (speed >= XFER_UDMA_0) {
pci_read_config_byte(dev, m5229_udma, &tmpbyte);
tmpbyte &= (0x0f << ((1-unit) << 2));
......@@ -176,91 +140,40 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
static int ali15x3_udma_setup(struct ata_device *drive, int map)
{
int map;
u8 mode;
if (udma)
map = ali15x3_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !ali15x3_tune_chipset(drive, mode);
#ifndef CONFIG_WDC_ALI15X3
if ((m5229_revision < 0xC2) && chip_is_1543c_e &&
strstr(drive->id->model, "WDC "))
map &= ~XFER_UDMA_ALL;
#endif
return udma_generic_setup(drive, map);
}
static int ali15x3_udma_setup(struct ata_device *drive)
static int ali15x3_udma_init(struct ata_device *drive, struct request *rq)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 1;
int verbose = 1;
byte can_ultra_dma = ali15x3_can_ultra(drive);
if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK))
return ATA_OP_FINISHED; /* try PIO instead of DMA */
if ((m5229_revision<=0x20) && (drive->type != ATA_DISK)) {
udma_enable(drive, 0, 0);
return 0;
}
return udma_pci_init(drive, rq);
}
if ((id != NULL) && ((id->capability & 1) != 0) && hwif->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) {
if (id->dma_ultra & 0x003F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, can_ultra_dma);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
ali15x3_tune_drive(drive, 255);
}
static int __init ali15x3_modes_map(struct ata_channel *ch)
{
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
udma_enable(drive, on, verbose);
if (m5229_revision <= 0x20)
return map;
return 0;
}
map |= XFER_UDMA;
static int ali15x3_udma_init(struct ata_device *drive, struct request *rq)
{
if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK))
return ide_stopped; /* try PIO instead of DMA */
if (m5229_revision >= 0xC2) {
map |= XFER_UDMA_66;
if (m5229_revision >= 0xC4)
map |= XFER_UDMA_100;
}
return udma_pci_init(drive, rq);
return map;
}
#endif
......@@ -426,6 +339,8 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
}
#endif /* CONFIG_SPARC64 */
hwif->udma_four = ali15x3_ata66_check(hwif);
hwif->tuneproc = &ali15x3_tune_drive;
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
......@@ -436,22 +351,21 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
/*
* M1543C or newer for DMAing
*/
hwif->udma_init = ali15x3_udma_init;
hwif->modes_map = ali15x3_modes_map(hwif);
if (m5229_revision < 0xC2)
hwif->no_atapi_autodma = 1;
hwif->udma_setup = ali15x3_udma_setup;
hwif->autodma = 1;
hwif->udma_init = ali15x3_udma_init;
}
if (noautodma)
hwif->autodma = 0;
#else
hwif->autodma = 0;
#endif
}
static void __init ali15x3_init_dma(struct ata_channel *ch, unsigned long dmabase)
{
if ((dmabase) && (m5229_revision < 0x20))
if (dmabase && (m5229_revision < 0x20)) {
ch->autodma = 0;
return;
}
ata_init_dma(ch, dmabase);
}
......@@ -472,7 +386,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AL,
device: PCI_DEVICE_ID_AL_M5229,
init_chipset: ali15x3_init_chipset,
ata66_check: ali15x3_ata66_check,
init_channel: ali15x3_init_channel,
init_dma: ali15x3_init_dma,
enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} },
......
......@@ -175,21 +175,15 @@ static void amd74xx_tune_drive(struct ata_device *drive, u8 pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int amd74xx_udma_setup(struct ata_device *drive)
static int __init amd_modes_map(struct ata_channel *ch)
{
short w80 = drive->channel->udma_four;
short w80 = ch->udma_four;
int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA |
((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0);
short speed = ata_timing_mode(drive,
XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA |
((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0));
amd_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
return map;
}
#endif
......@@ -274,9 +268,10 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
{
int i;
hwif->udma_four = amd74xx_ata66_check(hwif);
hwif->tuneproc = &amd74xx_tune_drive;
hwif->speedproc = &amd_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = 1;
......@@ -289,11 +284,8 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = amd74xx_udma_setup;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
# endif
hwif->modes_map = amd_modes_map(hwif);
hwif->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -314,7 +306,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_COBRA_7401,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -324,7 +315,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7409,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -335,7 +325,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7411,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -345,7 +334,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_OPUS_7441,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -355,7 +343,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_8111_IDE,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -365,7 +352,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_NVIDIA,
device: PCI_DEVICE_ID_NVIDIA_NFORCE_IDE,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x50,0x01,0x01}, {0x50,0x02,0x02}},
......
......@@ -86,9 +86,11 @@ short ata_timing_mode(struct ata_device *drive, int map)
if ((map & XFER_UDMA_100) == XFER_UDMA_100)
if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best;
if ((map & XFER_UDMA_66) == XFER_UDMA_66)
if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 :
(id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((map & XFER_UDMA_66_4) == XFER_UDMA_66_4)
if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 : 0)) return best;
if ((map & XFER_UDMA_66_3) == XFER_UDMA_66_3)
if ((best = (id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 :
(id->dma_ultra & 0x0002) ? XFER_UDMA_1 :
......
......@@ -59,15 +59,22 @@ extern struct ata_timing ata_timing[];
#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
#define XFER_MODE 0xf0
#define XFER_UDMA_133 0x48
#define XFER_UDMA_100 0x44
#define XFER_UDMA_66 0x42
#define XFER_UDMA 0x40
#define XFER_MWDMA 0x20
#define XFER_SWDMA 0x10
#define XFER_EPIO 0x01
#define XFER_PIO 0x00
/* see hpt366.c for details */
#define XFER_UDMA_66_3 0x100
#define XFER_UDMA_66_4 0x200
#define XFER_MODE 0xff0
#define XFER_UDMA_133 0x800
#define XFER_UDMA_100 0x400
#define XFER_UDMA_66 0x300
#define XFER_UDMA 0x040
#define XFER_MWDMA 0x020
#define XFER_SWDMA 0x010
#define XFER_EPIO 0x001
#define XFER_PIO 0x000
#define XFER_UDMA_ALL 0xf40
#define XFER_UDMA_80W 0xf00
/* External interface to host chips channel timing setup.
*
......
......@@ -217,10 +217,10 @@ static void cmd64x_tuneproc(struct ata_device *drive, u8 pio)
ide_config_drive_speed(drive, speed);
}
static int cmd64x_ratemask(struct ata_device *drive)
static int __init cmd6xx_modes_map(struct ata_channel *ch)
{
struct pci_dev *dev = drive->channel->pci_dev;
int map = 0;
struct pci_dev *dev = ch->pci_dev;
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
switch(dev->device) {
case PCI_DEVICE_ID_CMD_680:
......@@ -234,10 +234,9 @@ static int cmd64x_ratemask(struct ata_device *drive)
break;
case PCI_DEVICE_ID_CMD_646:
{
u32 class_rev;
pci_read_config_dword(dev,
PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
u32 rev;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &rev);
rev &= 0xff;
/*
* UltraDMA only supported on PCI646U and PCI646U2, which
* correspond to revisions 0x03, 0x05 and 0x07 respectively.
......@@ -250,7 +249,7 @@ static int cmd64x_ratemask(struct ata_device *drive)
*
* So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
*/
switch(class_rev) {
switch(rev) {
case 0x07:
case 0x05:
map |= XFER_UDMA;
......@@ -260,11 +259,6 @@ static int cmd64x_ratemask(struct ata_device *drive)
}
}
if (!eighty_ninty_three(drive)) {
if (map & XFER_UDMA)
return XFER_UDMA;
return 0;
}
return map;
}
......@@ -515,80 +509,6 @@ speed_break :
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
{
int map;
u8 mode;
if (udma)
map = cmd64x_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
return !drive->channel->speedproc(drive, mode);
}
static int cmd6xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 1;
int verbose = 1;
hwif->tuneproc(drive, 255);
if ((id != NULL) && ((id->capability & 1) != 0) &&
hwif->autodma && (drive->type == ATA_DISK)) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4)) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hwif->tuneproc(drive, 255);
}
udma_enable(drive, on, verbose);
return 0;
}
static int cmd64x_udma_stop(struct ata_device *drive)
{
struct ata_channel *ch = drive->channel;
......@@ -822,13 +742,6 @@ static unsigned int cmd64x_ata66(struct ata_channel *hwif)
return (ata66 & mask) ? 1 : 0;
}
static unsigned int __init cmd64x_ata66_check(struct ata_channel *hwif)
{
if (hwif->pci_dev->device == PCI_DEVICE_ID_CMD_680)
return cmd680_ata66(hwif);
return cmd64x_ata66(hwif);
}
static void __init cmd64x_init_channel(struct ata_channel *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
......@@ -843,32 +756,28 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
switch(dev->device) {
case PCI_DEVICE_ID_CMD_680:
hwif->busproc = cmd680_busproc;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base)
hwif->udma_setup = cmd6xx_udma_setup;
#endif
hwif->resetproc = cmd680_reset;
hwif->speedproc = cmd680_tune_chipset;
hwif->tuneproc = cmd680_tuneproc;
hwif->udma_four = cmd680_ata66(hwif);
break;
case PCI_DEVICE_ID_CMD_649:
case PCI_DEVICE_ID_CMD_648:
case PCI_DEVICE_ID_CMD_643:
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
hwif->udma_stop = cmd64x_udma_stop;
hwif->udma_irq_status = cmd64x_udma_irq_status;
}
#endif
hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break;
case PCI_DEVICE_ID_CMD_646:
hwif->chipset = ide_cmd646;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
if (class_rev == 0x01) {
hwif->udma_stop = cmd646_1_udma_stop;
} else {
......@@ -879,6 +788,7 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#endif
hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break;
default:
break;
......@@ -887,10 +797,9 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
# endif
hwif->modes_map = cmd6xx_modes_map(hwif);
hwif->no_atapi_autodma = 1;
hwif->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -919,7 +828,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_648,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......@@ -928,7 +836,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_649,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......@@ -937,7 +844,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_680,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......
......@@ -191,7 +191,7 @@ static int cs5530_config_dma(struct ata_device *drive)
return 0;
}
static int cs5530_udma_setup(struct ata_device *drive)
static int cs5530_udma_setup(struct ata_device *drive, int map)
{
return cs5530_config_dma(drive);
}
......@@ -285,17 +285,15 @@ static unsigned int __init pci_init_cs5530(struct pci_dev *dev)
*/
static void __init ide_init_cs5530(struct ata_channel *hwif)
{
u32 basereg, d0_timings;
hwif->serialized = 1;
if (!hwif->dma_base) {
hwif->autodma = 0;
} else {
unsigned int basereg, d0_timings;
#ifdef CONFIG_BLK_DEV_IDEDMA
hwif->udma_setup = cs5530_udma_setup;
hwif->highmem = 1;
#else
hwif->autodma = 0;
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = cs5530_udma_setup;
}
#endif
hwif->tuneproc = &cs5530_tuneproc;
......@@ -311,7 +309,6 @@ static void __init ide_init_cs5530(struct ata_channel *hwif)
if (!hwif->drives[1].autotune)
hwif->drives[1].autotune = 1; /* needs autotuning later */
}
}
}
......
......@@ -237,7 +237,7 @@ static void cy82c693_dma_enable(struct ata_device *drive, int mode, int single)
/*
* used to set DMA mode for CY82C693 (single and multi modes)
*/
static int cy82c693_udma_setup(struct ata_device *drive)
static int cy82c693_udma_setup(struct ata_device *drive, int map)
{
/*
* Set dma mode for drive everything else is done by the defaul func.
......@@ -414,14 +414,11 @@ static void __init ide_init_cy82c693(struct ata_channel *hwif)
hwif->tuneproc = cy82c693_tune_drive;
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = cy82c693_udma_setup;
if (!noautodma)
hwif->autodma = 1;
}
#endif
}
......
......@@ -79,30 +79,8 @@ void ata_mask(struct ata_device *drive)
ch->maskproc(drive);
}
/*
* Spin until the drive is no longer busy.
*
* Not exported, since it's not used within any modules.
*/
int ata_busy_poll(struct ata_device *drive, unsigned long timeout)
{
/* spec allows drive 400ns to assert "BUSY" */
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout))
return 1;
}
}
return 0;
}
/*
* Check the state of the status register.
*
* FIXME: Channel lock should be held.
*/
int ata_status(struct ata_device *drive, u8 good, u8 bad)
{
......@@ -120,31 +98,33 @@ EXPORT_SYMBOL(ata_status);
* all of the "good" bits and none of the "bad" bits, and if all is okay it
* returns 0. All other cases return 1 after invoking error handler -- caller
* should just return.
*
* This routine should get fixed to not hog the cpu during extra long waits..
* That could be done by busy-waiting for the first jiffy or two, and then
* setting a timer to wake up at half second intervals thereafter, until
* timeout is achieved, before timing out.
*
* Channel lock should be held.
*/
int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
unsigned long timeout,
struct request *rq, ide_startstop_t *startstop)
unsigned long timeout, struct request *rq)
{
int i;
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
*startstop = ide_stopped;
return 1;
}
if (ata_busy_poll(drive, timeout)) {
*startstop = ata_error(drive, rq, "status timeout");
if (drive->max_failures && (drive->failures > drive->max_failures))
return ATA_OP_FINISHED;
/*
* Spin until the drive is no longer busy.
* Spec allows drive 400ns to assert "BUSY"
*/
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
unsigned long flags;
return 1;
__save_flags(flags);
ide__sti();
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout)) {
__restore_flags(flags);
return ata_error(drive, rq, "status timeout");
}
}
__restore_flags(flags);
}
/*
......@@ -156,12 +136,10 @@ int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
for (i = 0; i < 10; i++) {
udelay(1);
if (ata_status(drive, good, bad))
return 0;
return ATA_OP_READY;
}
*startstop = ata_error(drive, rq, "status error");
return 1;
return ata_error(drive, rq, "status error");
}
EXPORT_SYMBOL(ata_status_poll);
......
......@@ -72,83 +72,13 @@ static void hpt34x_tune_drive(struct ata_device *drive, u8 pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
static int hpt34x_udma_setup(struct ata_device *drive, int map)
{
int map;
u8 mode;
if (drive->type != ATA_DISK)
return 0;
if (udma)
map = XFER_UDMA;
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !hpt34x_tune_chipset(drive, mode);
}
static int hpt34x_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x0007) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt34x_tune_chipset(drive, ata_best_pio_mode(drive));
}
#ifndef CONFIG_HPT34X_AUTODMA
if (on)
on = 0;
#endif
udma_enable(drive, on, verbose);
#ifdef CONFIG_HPT34X_AUTODMA
return udma_generic_setup(drive, map);
#else
return 0;
#endif
}
static int hpt34x_udma_stop(struct ata_device *drive)
......@@ -173,7 +103,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
u8 cmd;
if (!(count = udma_new_table(drive, rq)))
return ide_stopped; /* try PIO instead of DMA */
return ATA_OP_FINISHED; /* try PIO instead of DMA */
if (rq_data_dir(rq) == READ)
cmd = 0x09;
......@@ -189,7 +119,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
OUT_BYTE((cmd == 0x09) ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
}
return ide_started;
return ATA_OP_CONTINUES;
}
#endif
......@@ -252,24 +182,21 @@ static void __init ide_init_hpt34x(struct ata_channel *hwif)
unsigned short pcicmd = 0;
pci_read_config_word(hwif->pci_dev, PCI_COMMAND, &pcicmd);
if (!noautodma)
hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
else
hwif->autodma = 0;
#ifdef CONFIG_IDEDMA_AUTO
hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
#endif
hwif->udma_stop = hpt34x_udma_stop;
hwif->udma_init = hpt34x_udma_init;
hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->no_atapi_autodma = 1;
hwif->udma_setup = hpt34x_udma_setup;
hwif->highmem = 1;
} else {
} else
#endif
{
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
}
#else
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#endif
}
......@@ -281,7 +208,7 @@ static struct ata_pci_device chipset __initdata = {
init_channel: ide_init_hpt34x,
bootable: NEVER_BOARD,
extra: 16,
flags: ATA_F_NOADMA | ATA_F_DMA
flags: ATA_F_DMA
};
int __init init_hpt34x(void)
......
......@@ -493,37 +493,23 @@ static unsigned int hpt_revision(struct pci_dev *dev)
return class_rev;
}
static int hpt3xx_ratemask(struct ata_device *drive)
static int __init hpt3xx_modes_map(struct ata_channel *ch)
{
u32 rev = hpt_revision(drive->channel->pci_dev);
int map = XFER_UDMA;
u32 rev = hpt_revision(ch->pci_dev);
int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_UDMA_66;
if (rev >= 8) { /* HPT374 */
if (HPT374_ALLOW_ATA133_6)
map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66);
map |= XFER_UDMA_100;
} else if (rev >= 5) { /* HPT372 */
if (HPT372_ALLOW_ATA133_6)
map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66);
} else if (rev >= 4) { /* HPT370A */
map |= XFER_UDMA_100;
} else if (rev >= 3) { /* HPT370A / HPT370 */
if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100;
map |= XFER_UDMA_66;
} else if (rev >= 3) { /* HPT370 */
if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100;
map |= XFER_UDMA_66;
if (check_in_drive_lists(drive, bad_ata33))
return 0;
} else { /* HPT366 and HPT368 */
map |= XFER_UDMA_66;
if (check_in_drive_lists(drive, bad_ata33))
return 0;
}
if (!eighty_ninty_three(drive))
return XFER_UDMA;
} /* HPT366 / HPT368 */
return map;
}
......@@ -662,62 +648,42 @@ static int hpt3xx_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed);
}
/* FIXME: pio == 255 -> ata_best_pio_mode(drive) --bkz */
static void hpt3xx_tune_drive(struct ata_device *drive, u8 pio)
{
(void) hpt3xx_tune_chipset(drive, XFER_PIO_0 + min_t(u8, pio, 4));
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive)
static int hpt3xx_udma_setup(struct ata_device *drive, int map)
{
int map;
u32 rev;
u8 mode;
if (drive->type != ATA_DISK)
return 0;
rev = hpt_revision(drive->channel->pci_dev);
/* FIXME: check SWDMA modes --bkz */
map = hpt3xx_ratemask(drive) | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
/* FIXME: badlists need futher investigation --bkz */
/* FIXME: badlists need futher investigation --bkz
bad_ata100_5 is for HPT370/370A,
bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368
*/
if (mode == XFER_UDMA_5 && rev < 5) {
if (check_in_drive_lists(drive, bad_ata100_5)) {
/* FIXME: make XFER_UDMA_66/100/133
independent of XFER_UDMA --bkz */
map &= ~XFER_UDMA_100;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
}
if (mode == XFER_UDMA_4 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_4)) {
if (drive->id->dma_ultra & 0x0008) {
mode = XFER_UDMA_3;
} else {
map &= ~XFER_UDMA_66;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
}
}
if (mode == XFER_UDMA_3 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_3)) {
map &= ~XFER_UDMA_66;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
/* bad_ata100_5 is for HPT370/370A,
bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368 */
if (rev < 5 && check_in_drive_lists(drive, bad_ata100_5))
map &= ~XFER_UDMA_100;
if (rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_4))
map &= ~XFER_UDMA_66_4;
if (check_in_drive_lists(drive, bad_ata66_3))
map &= ~XFER_UDMA_66_3;
if (check_in_drive_lists(drive, bad_ata33))
map &= ~XFER_UDMA_ALL;
}
if (check_in_drive_lists(drive, bad_ata33) && rev < 3)
mode = ata_timing_mode(drive, XFER_MWDMA);
return !hpt3xx_tune_chipset(drive, mode);
return udma_generic_setup(drive, map);
}
static int hpt3xx_quirkproc(struct ata_device *drive)
......@@ -754,59 +720,6 @@ static void hpt3xx_maskproc(struct ata_device *drive)
}
}
static int hpt3xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if (id->dma_mword & 0x0007) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt3xx_tune_chipset(drive, ata_best_pio_mode(drive));
}
udma_enable(drive, on, verbose);
return 0;
}
static void hpt366_udma_irq_lost(struct ata_device *drive)
{
struct pci_dev *dev = drive->channel->pci_dev;
......@@ -1232,6 +1145,8 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
struct pci_dev *dev = ch->pci_dev;
u32 rev = hpt_revision(dev);
ch->udma_four = hpt366_ata66_check(ch);
ch->tuneproc = hpt3xx_tune_drive;
ch->speedproc = hpt3xx_tune_chipset;
ch->quirkproc = hpt3xx_quirkproc;
......@@ -1272,17 +1187,12 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
// ch->resetproc = hpt3xx_reset;
// ch->busproc = hpt3xx_tristate;
}
ch->modes_map = hpt3xx_modes_map(ch);
ch->udma_setup = hpt3xx_udma_setup;
if (!noautodma)
ch->autodma = 1;
else
ch->autodma = 0;
ch->highmem = 1;
} else
#endif
{
ch->autodma = 0;
ch->drives[0].autotune = 1;
ch->drives[1].autotune = 1;
}
......@@ -1315,7 +1225,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT366,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......@@ -1326,7 +1235,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT372,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......@@ -1337,7 +1245,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT374,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......
......@@ -405,7 +405,7 @@ static void icside_dma_enable(struct ata_device *drive, int on, int verbose)
#endif
}
static int icside_dma_check(struct ata_device *drive)
static int icside_dma_check(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel;
......@@ -466,7 +466,7 @@ static ide_startstop_t icside_dmaintr(struct ata_device *drive, struct request *
if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
if (!dma_stat) {
__ide_end_request(drive, rq, 1, rq->nr_sectors);
return ide_stopped;
return ATA_OP_FINISHED;
}
printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat);
......@@ -516,10 +516,10 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
u8 int cmd;
if (icside_dma_common(drive, rq, DMA_MODE_WRITE))
return ide_stopped;
return ATA_OP_FINISHED;
if (drive->type != ATA_DISK)
return ide_started;
return ATA_OP_CONTINUES;
ata_set_handler(drive, icside_dmaintr, WAIT_CMD, NULL);
......@@ -535,7 +535,7 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
enable_dma(ch->hw.dma);
return ide_started;
return ATA_OP_CONTINUES;
}
static int icside_irq_status(struct ata_device *drive)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -257,24 +257,13 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->flags & ATA_F_NODMA)
goto no_dma;
/* Check whatever this interface is UDMA4 mode capable. */
if (ch->udma_four) {
if (ch->udma_four)
printk("%s: warning: ATA-66/100 forced bit set!\n", dev->name);
} else {
if (d->ata66_check)
ch->udma_four = d->ata66_check(ch);
}
#ifdef CONFIG_BLK_DEV_IDEDMA
/*
* Setup DMA transfers on the channel.
*/
if (d->flags & ATA_F_NOADMA)
autodma = 0;
if (autodma)
ch->autodma = 1;
if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80))))
goto no_dma;
/*
......@@ -324,6 +313,10 @@ static int __init setup_host_channel(struct pci_dev *dev,
* already enabled by the primary channel run.
*/
pci_set_master(dev);
if (autodma)
ch->autodma = 1;
if (d->init_dma)
d->init_dma(ch, dma_base);
else
......@@ -335,6 +328,11 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->init_channel)
d->init_channel(ch);
#ifdef CONFIG_BLK_DEV_IDEDMA
if ((d->flags & ATA_F_NOADMA) || noautodma)
ch->autodma = 0;
#endif
return 0;
}
......
......@@ -256,11 +256,11 @@ struct {
static void pmac_ide_setup_dma(struct device_node *np, int ix);
static void pmac_udma_enable(struct ata_device *drive, int on, int verbose);
static int pmac_udma_start(struct ata_device *drive, struct request *rq);
static void pmac_udma_start(struct ata_device *drive, struct request *rq);
static int pmac_udma_stop(struct ata_device *drive);
static int pmac_udma_init(struct ata_device *drive, struct request *rq);
static int pmac_udma_irq_status(struct ata_device *drive);
static int pmac_udma_setup(struct ata_device *drive);
static int pmac_udma_setup(struct ata_device *drive, int map);
static int pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr);
static int pmac_ide_tune_chipset(struct ata_device *drive, byte speed);
static void pmac_ide_tuneproc(struct ata_device *drive, byte pio);
......@@ -1340,7 +1340,7 @@ static void pmac_udma_enable(struct ata_device *drive, int on, int verbose)
ide_toggle_bounce(drive, 0);
}
static int pmac_udma_start(struct ata_device *drive, struct request *rq)
static void pmac_udma_start(struct ata_device *drive, struct request *rq)
{
int ix, ata4;
volatile struct dbdma_regs *dma;
......@@ -1350,7 +1350,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
*/
ix = pmac_ide_find(drive);
if (ix < 0)
return ide_stopped;
return;
dma = pmac_ide[ix].dma_regs;
ata4 = (pmac_ide[ix].kind == controller_kl_ata4 ||
......@@ -1360,7 +1360,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
/* Make sure it gets to the controller right now */
(void)in_le32(&dma->control);
return ide_started;
return;
}
static int pmac_udma_stop(struct ata_device *drive)
......@@ -1397,7 +1397,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
*/
ix = pmac_ide_find(drive);
if (ix < 0)
return ide_stopped;
return ATA_OP_FINISHED;
if (rq_data_dir(rq) == READ)
reading = 1;
......@@ -1409,7 +1409,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
pmac_ide[ix].kind == controller_kl_ata4_80);
if (!pmac_ide_build_dmatable(drive, rq, ix, !reading))
return ide_stopped;
return ATA_OP_FINISHED;
/* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) {
out_le32((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG + _IO_BASE),
......@@ -1419,7 +1419,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
}
if (drive->type != ATA_DISK)
return ide_started;
return ATA_OP_CONTINUES;
ata_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL);
if ((rq->flags & REQ_SPECIAL) &&
......@@ -1435,7 +1435,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
udma_start(drive, rq);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -1491,14 +1491,14 @@ static int pmac_udma_irq_status(struct ata_device *drive)
set_bit(IDE_DMA, drive->channel->active);
// if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
// printk(KERN_WARNING "ide%d, timeout waiting \
for dbdma command stop\n", ix);
return 1;
}
// for dbdma command stop\n", ix);
// return 1;
// }
udelay(1);
return 0;
}
static int pmac_udma_setup(struct ata_device *drive)
static int pmac_udma_setup(struct ata_device *drive, int map)
{
/* Change this to better match ide-dma.c */
pmac_ide_check_dma(drive);
......
This diff is collapsed.
......@@ -197,13 +197,14 @@ int drive_is_ready(struct ata_device *drive)
int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action)
{
unsigned long flags;
unsigned int major = drive->channel->major;
struct ata_channel *ch = drive->channel;
unsigned int major = ch->major;
request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait);
#ifdef CONFIG_BLK_DEV_PDC4030
if (drive->channel->chipset == ide_pdc4030 && rq->buffer != NULL)
if (ch->chipset == ide_pdc4030 && rq->buffer)
return -ENOSYS; /* special drive cmds not supported */
#endif
rq->errors = 0;
......@@ -212,22 +213,18 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
if (action == ide_wait)
rq->waiting = &wait;
spin_lock_irqsave(drive->channel->lock, flags);
spin_lock_irqsave(ch->lock, flags);
if (blk_queue_empty(&drive->queue) || action == ide_preempt) {
if (action == ide_preempt)
drive->rq = NULL;
} else {
if (action == ide_wait)
queue_head = queue_head->prev;
else
queue_head = queue_head->next;
}
q->elevator.elevator_add_req_fn(q, rq, queue_head);
if (action == ide_preempt)
drive->rq = NULL;
else if (!blk_queue_empty(&drive->queue))
queue_head = queue_head->prev; /* ide_end and ide_wait */
__elv_add_request(q, rq, queue_head);
do_ide_request(q);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
if (action == ide_wait) {
wait_for_completion(&wait); /* wait for it to be serviced */
......@@ -235,23 +232,20 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
}
return 0;
}
/*
* Invoked on completion of a special REQ_SPECIAL command.
*/
ide_startstop_t ata_special_intr(struct ata_device *drive, struct
static ide_startstop_t special_intr(struct ata_device *drive, struct
request *rq) {
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ide_stopped;
unsigned long flags;
struct ata_channel *ch =drive->channel;
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ATA_OP_FINISHED;
ide__sti(); /* local CPU only */
spin_lock_irqsave(drive->channel->lock, flags);
ide__sti();
if (rq->buffer && ar->taskfile.sector_number) {
if (!ata_status(drive, 0, DRQ_STAT) && ar->taskfile.sector_number) {
......@@ -283,24 +277,27 @@ ide_startstop_t ata_special_intr(struct ata_device *drive, struct
ata_in_regfile(drive, &ar->hobfile);
}
spin_lock_irqsave(ch->lock, flags);
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar)
int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *buf)
{
struct request req;
ar->command_type = IDE_DRIVE_TASK_NO_DATA;
ar->XXX_handler = ata_special_intr;
ar->XXX_handler = special_intr;
memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL;
req.buffer = buf;
req.special = ar;
return ide_do_drive_cmd(drive, &req, ide_wait);
......@@ -310,5 +307,4 @@ EXPORT_SYMBOL(drive_is_ready);
EXPORT_SYMBOL(ide_do_drive_cmd);
EXPORT_SYMBOL(ata_read);
EXPORT_SYMBOL(ata_write);
EXPORT_SYMBOL(ata_special_intr);
EXPORT_SYMBOL(ide_raw_taskfile);
This diff is collapsed.
......@@ -47,7 +47,6 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
u8 *argbuf = vals;
int argsize = 4;
struct ata_taskfile args;
struct request req;
/* Second phase.
*/
......@@ -80,15 +79,7 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
/* Issue ATA command and wait for completion.
*/
args.command_type = IDE_DRIVE_TASK_NO_DATA;
args.XXX_handler = ata_special_intr;
memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL;
req.special = &args;
req.buffer = argbuf + 4;
err = ide_do_drive_cmd(drive, &req, ide_wait);
err = ide_raw_taskfile(drive, &args, argbuf + 4);
argbuf[0] = drive->status;
argbuf[1] = args.taskfile.feature;
......@@ -131,9 +122,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_32BIT: {
unsigned long val = drive->channel->io_32bit;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
return 0;
......@@ -181,9 +169,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_UNMASKINTR: {
unsigned long val = drive->channel->unmask;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
......@@ -211,9 +196,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_DMA: {
unsigned long val = drive->using_dma;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
......@@ -245,9 +227,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
struct hd_geometry *loc = (struct hd_geometry *) arg;
unsigned short bios_cyl = drive->bios_cyl; /* truncate */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL;
......@@ -270,9 +249,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GETGEO_BIG_RAW: {
struct hd_big_geometry *loc = (struct hd_big_geometry *) arg;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL;
......@@ -293,8 +269,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
}
case HDIO_GET_IDENTITY:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (minor(inode->i_rdev) & PARTN_MASK)
return -EINVAL;
......@@ -308,8 +282,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
case HDIO_GET_NICE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return put_user(drive->dsc_overlap << IDE_NICE_DSC_OVERLAP |
drive->atapi_overlap << IDE_NICE_ATAPI_OVERLAP,
......@@ -332,8 +304,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(drive->channel->bus_state, (long *)arg))
return -EFAULT;
......
......@@ -179,14 +179,6 @@ static int it8172_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed);
}
static int it8172_udma_setup(struct ata_device *drive)
{
u8 speed = ata_timing_mode(drive, XFER_PIO | XFER_EPIO |
XFER_SWDMA | XFER_MWDMA | XFER_UDMA);
return !it8172_tune_chipset(drive, speed);
}
#endif /* defined(CONFIG_BLK_DEV_IDEDMA) && (CONFIG_IT8172_TUNING) */
......@@ -216,15 +208,11 @@ static void __init ide_init_it8172(struct ata_channel *hwif)
if (!hwif->dma_base)
return;
#ifndef CONFIG_BLK_DEV_IDEDMA
hwif->autodma = 0;
#else /* CONFIG_BLK_DEV_IDEDMA */
# ifdef CONFIG_IT8172_TUNING
hwif->autodma = 1;
hwif->dmaproc = &it8172_dmaproc;
hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->udma_setup = udma_generic_setup;
hwif->speedproc = &it8172_tune_chipset;
# endif
#endif
cmdBase = dev->resource[0].start;
ctrlBase = dev->resource[1].start;
......
......@@ -1074,7 +1074,8 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
spin_unlock_irqrestore(&ide_lock, flags);
/* Default autotune or requested autotune */
if (drive->autotune != 2) {
if (drive->channel->udma_setup) {
struct ata_channel *ch = drive->channel;
if (ch->udma_setup) {
/*
* Force DMAing for the beginning of the check. Some
......@@ -1085,7 +1086,7 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
*/
udma_enable(drive, 0, 0);
drive->channel->udma_setup(drive);
ch->udma_setup(drive, ch->modes_map);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
udma_tcq_enable(drive, 1);
#endif
......
......@@ -105,21 +105,21 @@ static int ns87415_udma_init(struct ata_device *drive, struct request *rq)
ns87415_prepare_drive(drive, 1); /* select DMA xfer */
if (udma_pci_init(drive, rq)) /* use standard DMA stuff */
return ide_started;
return ATA_OP_CONTINUES;
ns87415_prepare_drive(drive, 0); /* DMA failed: select PIO xfer */
return ide_stopped;
return ATA_OP_FINISHED;
}
static int ns87415_udma_setup(struct ata_device *drive)
static int ns87415_udma_setup(struct ata_device *drive, int map)
{
if (drive->type != ATA_DISK) {
udma_enable(drive, 0, 0);
return 0;
}
return udma_pci_setup(drive);
return udma_pci_setup(drive, map);
}
#endif
......
This diff is collapsed.
......@@ -117,7 +117,6 @@ struct ata_pci_device {
unsigned short vendor;
unsigned short device;
unsigned int (*init_chipset)(struct pci_dev *);
unsigned int (*ata66_check)(struct ata_channel *);
void (*init_channel)(struct ata_channel *);
void (*init_dma)(struct ata_channel *, unsigned long);
ide_pci_enablebit_t enablebits[2];
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -130,7 +130,7 @@ static int config_for_dma(struct ata_device *drive)
* Check to see if the drive and
* chipset is capable of DMA mode
*/
static int sl82c105_dma_setup(struct ata_device *drive)
static int sl82c105_dma_setup(struct ata_device *drive, int map)
{
int on = 0;
......@@ -333,7 +333,6 @@ static void __init sl82c105_init_dma(struct ata_channel *ch, unsigned long dma_b
dma_state &= ~0x60;
} else {
dma_state |= 0x60;
ch->autodma = 1;
}
outb(dma_state, dma_base + 2);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -283,7 +283,9 @@ extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
extern struct request *__blk_get_request(request_queue_t *, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment