Commit ffd630eb authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] kill drivers/ide TCQ support

Lets just remove it. It's been disabled some time ago, and there's no
chance of it ever getting resurrected. PATA TCQ has so many technical
short comings, that it was never really interesting I'm afraid.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <B.Zolnierkiewicz@elka.pw.edu.pl>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bb9cdaf2
......@@ -424,48 +424,6 @@ config BLK_DEV_IDEDMA_PCI
if BLK_DEV_IDEDMA_PCI
# TCQ is disabled for now
config BLK_DEV_IDE_TCQ
bool "ATA tagged command queueing (EXPERIMENTAL)"
depends on EXPERIMENTAL && n
help
Support for tagged command queueing on ATA disk drives. This enables
the IDE layer to have multiple in-flight requests on hardware that
supports it. For now this includes the IBM Deskstar series drives,
such as the 22GXP, 75GXP, 40GV, 60GXP, and 120GXP (ie any Deskstar made
in the last couple of years), and at least some of the Western
Digital drives in the Expert series (by nature of really being IBM
drives).
If you have such a drive, say Y here.
config BLK_DEV_IDE_TCQ_DEFAULT
bool "TCQ on by default"
depends on BLK_DEV_IDE_TCQ
---help---
Enable tagged command queueing unconditionally on drives that report
support for it. Regardless of the chosen value here, tagging can be
controlled at run time:
echo "using_tcq:32" > /proc/ide/hdX/settings
where any value between 1-32 selects chosen queue depth and enables
TCQ, and 0 disables it. hdparm version 4.7 an above also support
TCQ manipulations.
Generally say Y here.
config BLK_DEV_IDE_TCQ_DEPTH
int "Default queue depth"
depends on BLK_DEV_IDE_TCQ
default "8"
help
Maximum size of commands to enable per-drive. Any value between 1
and 32 is valid, with 32 being the maxium that the hardware supports.
You probably just want the default of 32 here. If you enter an invalid
number, the default value will be used.
config BLK_DEV_IDEDMA_FORCED
bool "Force enable legacy 2.0.X HOSTS to use DMA"
help
......
......@@ -120,20 +120,6 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
return 0; /* lba_capacity value may be bad */
}
static int idedisk_start_tag(ide_drive_t *drive, struct request *rq)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&ide_lock, flags);
if (ata_pending_commands(drive) < drive->queue_depth)
ret = blk_queue_start_tag(drive->queue, rq);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
}
#ifndef CONFIG_IDE_TASKFILE_IO
/*
......@@ -369,18 +355,10 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
pr_debug("%s: LBA=0x%012llx\n", drive->name, block);
if (blk_rq_tagged(rq)) {
tasklets[0] = nsectors.b.low;
tasklets[1] = nsectors.b.high;
tasklets[2] = rq->tag << 3;
tasklets[3] = 0;
} else {
tasklets[0] = 0;
tasklets[1] = 0;
tasklets[2] = nsectors.b.low;
tasklets[3] = nsectors.b.high;
}
tasklets[0] = 0;
tasklets[1] = 0;
tasklets[2] = nsectors.b.low;
tasklets[3] = nsectors.b.high;
tasklets[4] = (task_ioreg_t) block;
tasklets[5] = (task_ioreg_t) (block>>8);
tasklets[6] = (task_ioreg_t) (block>>16);
......@@ -411,14 +389,8 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
hwif->OUTB(tasklets[6], IDE_HCYL_REG);
hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG);
} else {
if (blk_rq_tagged(rq)) {
hwif->OUTB(nsectors.b.low, IDE_FEATURE_REG);
hwif->OUTB(rq->tag << 3, IDE_NSECTOR_REG);
} else {
hwif->OUTB(0x00, IDE_FEATURE_REG);
hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
}
hwif->OUTB(0x00, IDE_FEATURE_REG);
hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
hwif->OUTB(block, IDE_SECTOR_REG);
hwif->OUTB(block>>=8, IDE_LCYL_REG);
hwif->OUTB(block>>=8, IDE_HCYL_REG);
......@@ -434,23 +406,14 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
if (blk_rq_tagged(rq)) {
hwif->OUTB(nsectors.b.low, IDE_FEATURE_REG);
hwif->OUTB(rq->tag << 3, IDE_NSECTOR_REG);
} else {
hwif->OUTB(0x00, IDE_FEATURE_REG);
hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
}
hwif->OUTB(0x00, IDE_FEATURE_REG);
hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
hwif->OUTB(cyl, IDE_LCYL_REG);
hwif->OUTB(cyl>>8, IDE_HCYL_REG);
hwif->OUTB(head|drive->select.all,IDE_SELECT_REG);
}
if (rq_data_dir(rq) == READ) {
#ifdef CONFIG_BLK_DEV_IDE_TCQ
if (blk_rq_tagged(rq))
return __ide_dma_queued_read(drive);
#endif
if (drive->using_dma && !hwif->ide_dma_read(drive))
return ide_started;
......@@ -461,10 +424,7 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
return ide_started;
} else {
ide_startstop_t startstop;
#ifdef CONFIG_BLK_DEV_IDE_TCQ
if (blk_rq_tagged(rq))
return __ide_dma_queued_write(drive);
#endif
if (drive->using_dma && !(HWIF(drive)->ide_dma_write(drive)))
return ide_started;
......@@ -534,8 +494,6 @@ static u8 get_command(ide_drive_t *drive, int cmd, ide_task_t *task)
if (cmd == READ) {
task->command_type = IDE_DRIVE_TASK_IN;
if (drive->using_tcq)
return lba48 ? WIN_READDMA_QUEUED_EXT : WIN_READDMA_QUEUED;
if (drive->using_dma)
return lba48 ? WIN_READDMA_EXT : WIN_READDMA;
if (drive->mult_count) {
......@@ -546,8 +504,6 @@ static u8 get_command(ide_drive_t *drive, int cmd, ide_task_t *task)
return lba48 ? WIN_READ_EXT : WIN_READ;
} else {
task->command_type = IDE_DRIVE_TASK_RAW_WRITE;
if (drive->using_tcq)
return lba48 ? WIN_WRITEDMA_QUEUED_EXT : WIN_WRITEDMA_QUEUED;
if (drive->using_dma)
return lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
if (drive->mult_count) {
......@@ -579,12 +535,7 @@ static ide_startstop_t chs_rw_disk (ide_drive_t *drive, struct request *rq, unsi
sectors = (rq->nr_sectors == 256) ? 0x00 : rq->nr_sectors;
if (blk_rq_tagged(rq)) {
args.tfRegister[IDE_FEATURE_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = rq->tag << 3;
} else
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.tfRegister[IDE_SECTOR_OFFSET] = sect;
args.tfRegister[IDE_LCYL_OFFSET] = cyl;
args.tfRegister[IDE_HCYL_OFFSET] = (cyl>>8);
......@@ -608,12 +559,7 @@ static ide_startstop_t lba_28_rw_disk (ide_drive_t *drive, struct request *rq, u
sectors = (rq->nr_sectors == 256) ? 0x00 : rq->nr_sectors;
if (blk_rq_tagged(rq)) {
args.tfRegister[IDE_FEATURE_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = rq->tag << 3;
} else
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.tfRegister[IDE_SECTOR_OFFSET] = block;
args.tfRegister[IDE_LCYL_OFFSET] = (block>>=8);
args.tfRegister[IDE_HCYL_OFFSET] = (block>>=8);
......@@ -643,16 +589,8 @@ static ide_startstop_t lba_48_rw_disk (ide_drive_t *drive, struct request *rq, u
sectors = (rq->nr_sectors == 65536) ? 0 : rq->nr_sectors;
if (blk_rq_tagged(rq)) {
args.tfRegister[IDE_FEATURE_OFFSET] = sectors;
args.tfRegister[IDE_NSECTOR_OFFSET] = rq->tag << 3;
args.hobRegister[IDE_FEATURE_OFFSET] = sectors >> 8;
args.hobRegister[IDE_NSECTOR_OFFSET] = 0;
} else {
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.hobRegister[IDE_NSECTOR_OFFSET] = sectors >> 8;
}
args.tfRegister[IDE_NSECTOR_OFFSET] = sectors;
args.hobRegister[IDE_NSECTOR_OFFSET] = sectors >> 8;
args.tfRegister[IDE_SECTOR_OFFSET] = block; /* low lba */
args.tfRegister[IDE_LCYL_OFFSET] = (block>>=8); /* mid lba */
args.tfRegister[IDE_HCYL_OFFSET] = (block>>=8); /* hi lba */
......@@ -682,13 +620,6 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
return ide_stopped;
}
if (drive->using_tcq && idedisk_start_tag(drive, rq)) {
if (!ata_pending_commands(drive))
BUG();
return ide_started;
}
pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
block, rq->nr_sectors, (unsigned long)rq->buffer);
......@@ -1356,34 +1287,6 @@ static int set_acoustic (ide_drive_t *drive, int arg)
return 0;
}
#ifdef CONFIG_BLK_DEV_IDE_TCQ
static int set_using_tcq(ide_drive_t *drive, int arg)
{
int ret;
if (!drive->driver)
return -EPERM;
if (arg == drive->queue_depth && drive->using_tcq)
return 0;
/*
* set depth, but check also id for max supported depth
*/
drive->queue_depth = arg ? arg : 1;
if (drive->id) {
if (drive->queue_depth > drive->id->queue_depth + 1)
drive->queue_depth = drive->id->queue_depth + 1;
}
if (arg)
ret = __ide_dma_queued_on(drive);
else
ret = __ide_dma_queued_off(drive);
return ret ? -EIO : 0;
}
#endif
/*
* drive->addressing:
* 0: 28-bit
......@@ -1419,9 +1322,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
ide_add_setting(drive, "using_tcq", SETTING_RW, HDIO_GET_QDMA, HDIO_SET_QDMA, TYPE_BYTE, 0, IDE_MAX_TAG, 1, 1, &drive->using_tcq, set_using_tcq);
#endif
}
/*
......@@ -1632,11 +1532,6 @@ static void idedisk_setup (ide_drive_t *drive)
drive->wcache = 1;
write_cache(drive, 1);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
if (drive->using_dma)
__ide_dma_queued_on(drive);
#endif
}
static void ide_cacheflush_p(ide_drive_t *drive)
......
......@@ -513,9 +513,7 @@ int __ide_dma_off_quietly (ide_drive_t *drive)
if (HWIF(drive)->ide_dma_host_off(drive))
return 1;
#ifdef CONFIG_BLK_DEV_IDE_TCQ
__ide_dma_queued_off(drive);
#endif
return 0;
}
......
......@@ -97,10 +97,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
add_disk_randomness(rq->rq_disk);
if (!blk_rq_tagged(rq))
blkdev_dequeue_request(rq);
else
blk_queue_end_tag(drive->queue, rq);
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
end_that_request_last(rq);
ret = 0;
......@@ -855,18 +852,7 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
drive->sleep = 0;
drive->service_start = jiffies;
queue_next:
if (!ata_can_queue(drive)) {
if (!ata_pending_commands(drive))
hwgroup->busy = 0;
break;
}
if (blk_queue_plugged(drive->queue)) {
if (drive->using_tcq)
break;
printk(KERN_ERR "ide: huh? queue was plugged!\n");
break;
}
......@@ -877,7 +863,7 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
*/
rq = elv_next_request(drive->queue);
if (!rq) {
hwgroup->busy = !!ata_pending_commands(drive);
hwgroup->busy = 0;
break;
}
......@@ -900,9 +886,6 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break;
}
if (!rq->bio && ata_pending_commands(drive))
break;
hwgroup->rq = rq;
/*
......@@ -922,8 +905,6 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
spin_lock_irq(&ide_lock);
if (hwif->irq != masked_irq)
enable_irq(hwif->irq);
if (startstop == ide_released)
goto queue_next;
if (startstop == ide_stopped)
hwgroup->busy = 0;
}
......
......@@ -241,17 +241,6 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
drive->media = ide_disk;
printk("%s DISK drive\n", (drive->is_flash) ? "CFA" : "ATA" );
QUIRK_LIST(drive);
/* Initialize queue depth settings */
drive->queue_depth = 1;
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEPTH
drive->queue_depth = CONFIG_BLK_DEV_IDE_TCQ_DEPTH;
#else
drive->queue_depth = drive->id->queue_depth + 1;
#endif
if (drive->queue_depth < 1 || drive->queue_depth > IDE_MAX_TAG)
drive->queue_depth = IDE_MAX_TAG;
return;
err_misc:
......
......@@ -201,14 +201,6 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
if (!hwif->ide_dma_read(drive))
return ide_started;
break;
#ifdef CONFIG_BLK_DEV_IDE_TCQ
case WIN_READDMA_QUEUED:
case WIN_READDMA_QUEUED_EXT:
return __ide_dma_queued_read(drive);
case WIN_WRITEDMA_QUEUED:
case WIN_WRITEDMA_QUEUED_EXT:
return __ide_dma_queued_write(drive);
#endif
default:
if (task->handler == NULL)
return ide_stopped;
......
This diff is collapsed.
......@@ -690,7 +690,6 @@ typedef union {
typedef enum {
ide_stopped, /* no drive operation was started */
ide_started, /* a drive operation was started, handler was set */
ide_released, /* as ide_started, but bus also released */
} ide_startstop_t;
struct ide_driver_s;
......@@ -724,7 +723,6 @@ typedef struct ide_drive_s {
u8 keep_settings; /* restore settings after drive reset */
u8 autodma; /* device can safely use dma on host */
u8 using_dma; /* disk is using dma for read/write */
u8 using_tcq; /* disk is using queueing */
u8 retry_pio; /* retrying dma capable host in pio */
u8 state; /* retry state */
u8 waiting_for_dma; /* dma currently in progress */
......@@ -782,7 +780,6 @@ typedef struct ide_drive_s {
u8 sect; /* "real" sectors per track */
u8 bios_head; /* BIOS/fdisk/LILO number of heads */
u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
u8 queue_depth; /* max queue depth */
unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
unsigned int cyl; /* "real" number of cyls */
......@@ -1623,14 +1620,6 @@ extern int __ide_dma_lostirq(ide_drive_t *);
extern int __ide_dma_timeout(ide_drive_t *);
#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
#ifdef CONFIG_BLK_DEV_IDE_TCQ
extern int __ide_dma_queued_on(ide_drive_t *drive);
extern int __ide_dma_queued_off(ide_drive_t *drive);
extern ide_startstop_t __ide_dma_queued_read(ide_drive_t *drive);
extern ide_startstop_t __ide_dma_queued_write(ide_drive_t *drive);
extern ide_startstop_t __ide_dma_queued_start(ide_drive_t *drive);
#endif
#else
static inline int __ide_dma_off(ide_drive_t *drive) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
......@@ -1699,28 +1688,6 @@ extern struct semaphore ide_cfg_sem;
#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0)
#define IDE_MAX_TAG 32
#ifdef CONFIG_BLK_DEV_IDE_TCQ
static inline int ata_pending_commands(ide_drive_t *drive)
{
if (drive->using_tcq)
return blk_queue_tag_depth(drive->queue);
return 0;
}
static inline int ata_can_queue(ide_drive_t *drive)
{
if (drive->using_tcq)
return blk_queue_tag_queue(drive->queue);
return 1;
}
#else
#define ata_pending_commands(drive) (0)
#define ata_can_queue(drive) (1)
#endif
extern struct bus_type ide_bus_type;
#endif /* _IDE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment