Commit 045c90fe authored by Martin Dalecki's avatar Martin Dalecki Committed by Kai Germaschewski

[PATCH] 2.5.10 IDE 44

Reduce the "rain forest" of pointers a bit.

- Use the channel info from the drive instead of from hwgroup where possible.
   Since we have now a direct association between a channel and irq, we can fix
   the handler for unexpected interrupts to don't have to iterate over multiple
   possibilities.

- Finally it was just possible to remove the circular list of channels
   associated with a hwif. The hwif member of struct hwgroup_s as well as
   next member if struct ata_channel are gone.

- Unify ide_build_sglist() and raw_build_sglist() in to one function.  Use the
   queue associated with a request in the case of a CMD request there instead of
   referencing the device minor number. This will be pushed even further later.
parent 5ef2759c
......@@ -27,7 +27,9 @@ volatile struct baboon *baboon;
void baboon_irq(int, void *, struct pt_regs *);
#if 0
extern int macide_ack_intr(struct ata_channel *);
#endif
/*
* Baboon initialization.
......
......@@ -5,9 +5,7 @@
* May be copied or modified under the terms of the GNU General Public License
*
* Special Thanks to Mark for his Six years of work.
*/
/*
*
* This module provides support for the bus-master IDE DMA functions
* of various PCI chipsets, including the Intel PIIX (i82371FB for
* the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and
......@@ -215,33 +213,16 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
return ide_error(drive, "dma_intr", stat);
}
static int ide_build_sglist(struct ata_channel *hwif, struct request *rq)
{
request_queue_t *q = &hwif->drives[DEVICE_NR(rq->rq_dev) & 1].queue;
struct scatterlist *sg = hwif->sg_table;
int nents;
nents = blk_rq_map_sg(q, rq, hwif->sg_table);
if (rq->q && nents > rq->nr_phys_segments)
printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
else
hwif->sg_dma_direction = PCI_DMA_TODEVICE;
return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
}
/*
* FIXME: taskfiles should be a map of pages, not a long virt address... /jens
* FIXME: I agree with Jens --mdcki!
*/
static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
static int build_sglist(struct ata_channel *ch, struct request *rq)
{
struct scatterlist *sg = ch->sg_table;
int nents = 0;
if (rq->flags & REQ_DRIVE_ACB) {
struct ata_taskfile *args = rq->special;
#if 1
unsigned char *virt_addr = rq->buffer;
......@@ -258,13 +239,16 @@ static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
else
ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
/*
* FIXME: This depends upon a hard coded page size!
*/
if (sector_count > 128) {
memset(&sg[nents], 0, sizeof(*sg));
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].length = 128 * SECTOR_SIZE;
nents++;
++nents;
virt_addr = virt_addr + (128 * SECTOR_SIZE);
sector_count -= 128;
}
......@@ -272,8 +256,19 @@ static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
++nents;
} else {
nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
if (rq->q && nents > rq->nr_phys_segments)
printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
if (rq_data_dir(rq) == READ)
ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
else
ch->sg_dma_direction = PCI_DMA_TODEVICE;
}
return pci_map_sg(ch->pci_dev, sg, nents, ch->sg_dma_direction);
}
......@@ -295,11 +290,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
int i;
struct scatterlist *sg;
if (HWGROUP(drive)->rq->flags & REQ_DRIVE_ACB) {
hwif->sg_nents = i = raw_build_sglist(hwif, HWGROUP(drive)->rq);
} else {
hwif->sg_nents = i = ide_build_sglist(hwif, HWGROUP(drive)->rq);
}
hwif->sg_nents = i = build_sglist(hwif, HWGROUP(drive)->rq);
if (!i)
return 0;
......
......@@ -633,7 +633,7 @@ static void save_match(struct ata_channel *hwif, struct ata_channel *new,
static int init_irq(struct ata_channel *ch)
{
unsigned long flags;
unsigned int index;
int i;
ide_hwgroup_t *hwgroup;
ide_hwgroup_t *new_hwgroup;
struct ata_channel *match = NULL;
......@@ -650,8 +650,9 @@ static int init_irq(struct ata_channel *ch)
/*
* Group up with any other channels that share our irq(s).
*/
for (index = 0; index < MAX_HWIFS; index++) {
struct ata_channel *h = &ide_hwifs[index];
for (i = 0; i < MAX_HWIFS; ++i) {
struct ata_channel *h = &ide_hwifs[i];
if (h->hwgroup) { /* scan only initialized channels */
if (ch->irq == h->irq) {
ch->sharing_irq = h->sharing_irq = 1;
......@@ -682,7 +683,6 @@ static int init_irq(struct ata_channel *ch)
return 1;
}
memset(hwgroup, 0, sizeof(ide_hwgroup_t));
hwgroup->hwif = ch->next = ch;
hwgroup->rq = NULL;
hwgroup->handler = NULL;
hwgroup->drive = NULL;
......@@ -716,14 +716,12 @@ static int init_irq(struct ata_channel *ch)
}
/*
* Everything is okay, so link us into the hwgroup.
* Everything is okay.
*/
ch->hwgroup = hwgroup;
ch->next = hwgroup->hwif->next;
hwgroup->hwif->next = ch;
for (index = 0; index < MAX_DRIVES; ++index) {
struct ata_device *drive = &ch->drives[index];
for (i = 0; i < MAX_DRIVES; ++i) {
struct ata_device *drive = &ch->drives[i];
if (!drive->present)
continue;
......@@ -736,12 +734,6 @@ static int init_irq(struct ata_channel *ch)
init_device_queue(drive);
}
if (!hwgroup->hwif) {
hwgroup->hwif = hwgroup->drive->channel;
#ifdef DEBUG
printk("%s : Adding missed channel to hwgroup!!\n", ch->name);
#endif
}
spin_unlock_irqrestore(&ide_lock, flags);
#if !defined(__mc68000__) && !defined(CONFIG_APUS) && !defined(__sparc__)
......@@ -762,6 +754,7 @@ static int init_irq(struct ata_channel *ch)
printk(" (%sed with %s)",
ch->sharing_irq ? "shar" : "serializ", match->name);
printk("\n");
return 0;
}
......
......@@ -369,7 +369,7 @@ int drive_is_flashcard (ide_drive_t *drive)
return 0; /* no, it is not a flash memory card */
}
int __ide_end_request(ide_drive_t *drive, int uptodate, int nr_secs)
int __ide_end_request(struct ata_device *drive, int uptodate, int nr_secs)
{
struct request *rq;
unsigned long flags;
......@@ -388,12 +388,13 @@ int __ide_end_request(ide_drive_t *drive, int uptodate, int nr_secs)
nr_secs = rq->hard_cur_sectors;
/*
* decide whether to reenable DMA -- 3 is a random magic for now,
* if we DMA timeout more than 3 times, just stay in PIO
* Decide whether to reenable DMA -- 3 is a random magic for now,
* if we DMA timeout more than 3 times, just stay in PIO.
*/
if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
drive->state = 0;
HWGROUP(drive)->hwif->dmaproc(ide_dma_on, drive);
drive->channel->dmaproc(ide_dma_on, drive);
}
if (!end_that_request_first(rq, uptodate, nr_secs)) {
......@@ -614,20 +615,21 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
}
/*
* do_reset1() attempts to recover a confused drive by resetting it.
* Unfortunately, resetting a disk drive actually resets all devices on
* the same interface, so it can really be thought of as resetting the
* interface rather than resetting the drive.
* Attempt to recover a confused drive by resetting it. Unfortunately,
* resetting a disk drive actually resets all devices on the same interface, so
* it can really be thought of as resetting the interface rather than resetting
* the drive.
*
* ATAPI devices have their own reset mechanism which allows them to be
* individually reset without clobbering other devices on the same interface.
*
* Unfortunately, the IDE interface does not generate an interrupt to let
* us know when the reset operation has finished, so we must poll for this.
* Equally poor, though, is the fact that this may a very long time to complete,
* (up to 30 seconds worst case). So, instead of busy-waiting here for it,
* we set a timer to poll at 50ms intervals.
* Unfortunately, the IDE interface does not generate an interrupt to let us
* know when the reset operation has finished, so we must poll for this.
* Equally poor, though, is the fact that this may a very long time to
* complete, (up to 30 seconds worst case). So, instead of busy-waiting here
* for it, we set a timer to poll at 50ms intervals.
*/
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{
unsigned int unit;
......@@ -694,6 +696,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
#endif
__restore_flags (flags); /* local CPU only */
return ide_started;
}
......@@ -1195,12 +1198,11 @@ void ide_stall_queue(ide_drive_t *drive, unsigned long timeout)
/*
* Select the next drive which will be serviced.
*/
static inline ide_drive_t *choose_drive(ide_hwgroup_t *hwgroup)
static struct ata_device *choose_drive(struct ata_device *cur)
{
ide_drive_t *drive, *best;
struct ata_device *drive = cur;
struct ata_device *best = NULL;
best = NULL;
drive = hwgroup->drive;
do {
if (!list_empty(&drive->queue.queue_head)
&& (!drive->PADAM_sleep || time_after_eq(drive->PADAM_sleep, jiffies))) {
......@@ -1212,12 +1214,13 @@ static inline ide_drive_t *choose_drive(ide_hwgroup_t *hwgroup)
best = drive;
}
}
} while ((drive = drive->next) != hwgroup->drive);
drive = drive->next;
} while (drive != cur);
return best;
}
/*
* Issue a new request to a drive from hwgroup
* Issue a new request to a drive from hwgroup.
* Caller must have already done spin_lock_irqsave(&ide_lock, ...)
*
* A hwgroup is a serialized group of IDE interfaces. Usually there is
......@@ -1258,16 +1261,21 @@ static void ide_do_request(struct ata_channel *ch, int masked_irq)
__cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */
while (!test_and_set_bit(IDE_BUSY, &hwgroup->flags)) {
drive = choose_drive(hwgroup);
drive = choose_drive(hwgroup->drive);
if (drive == NULL) {
unsigned long sleep = 0;
hwgroup->rq = NULL;
drive = hwgroup->drive;
do {
if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep)))
sleep = drive->PADAM_sleep;
} while ((drive = drive->next) != hwgroup->drive);
drive = drive->next;
} while (drive!= hwgroup->drive);
if (sleep) {
/*
* Take a short snooze, and then wake up this
......@@ -1295,7 +1303,8 @@ static void ide_do_request(struct ata_channel *ch, int masked_irq)
return; /* no more work for this hwgroup (for now) */
}
ch = drive->channel;
if (hwgroup->hwif->sharing_irq && ch != hwgroup->hwif && ch->io_ports[IDE_CONTROL_OFFSET]) {
if (hwgroup->drive->channel->sharing_irq && ch != hwgroup->drive->channel && ch->io_ports[IDE_CONTROL_OFFSET]) {
/* set nIEN for previous channel */
if (ch->intrproc)
......@@ -1303,7 +1312,6 @@ static void ide_do_request(struct ata_channel *ch, int masked_irq)
else
OUT_BYTE((drive)->ctl|2, ch->io_ports[IDE_CONTROL_OFFSET]);
}
hwgroup->hwif = ch;
hwgroup->drive = drive;
drive->PADAM_sleep = 0;
drive->PADAM_service_start = jiffies;
......@@ -1414,29 +1422,33 @@ void ide_timer_expiry(unsigned long data)
unsigned long wait;
/*
* a global lock protects timers etc -- shouldn't get contention
* worth mentioning
* A global lock protects timers etc -- shouldn't get contention
* worth mentioning.
*/
spin_lock_irqsave(&ide_lock, flags);
del_timer(&hwgroup->timer);
if ((handler = hwgroup->handler) == NULL) {
/*
* Either a marginal timeout occurred
* (got the interrupt just as timer expired),
* or we were "sleeping" to give other devices a chance.
* Either way, we don't really want to complain about anything.
* Either a marginal timeout occurred (got the interrupt just
* as timer expired), or we were "sleeping" to give other
* devices a chance. Either way, we don't really want to
* complain about anything.
*/
if (test_and_clear_bit(IDE_SLEEP, &hwgroup->flags))
clear_bit(IDE_BUSY, &hwgroup->flags);
} else {
ide_drive_t *drive = hwgroup->drive;
struct ata_device *drive = hwgroup->drive;
if (!drive) {
printk("ide_timer_expiry: hwgroup->drive was NULL\n");
hwgroup->handler = NULL;
} else {
struct ata_channel *hwif;
struct ata_channel *ch;
ide_startstop_t startstop;
/* paranoia */
if (!test_and_set_bit(IDE_BUSY, &hwgroup->flags))
printk("%s: ide_timer_expiry: hwgroup was not busy??\n", drive->name);
......@@ -1457,19 +1469,19 @@ void ide_timer_expiry(unsigned long data)
* mask the specific IRQ:
*/
spin_unlock(&ide_lock);
hwif = drive->channel;
ch = drive->channel;
#if DISABLE_IRQ_NOSYNC
disable_irq_nosync(hwif->irq);
disable_irq_nosync(ch->irq);
#else
disable_irq(hwif->irq); /* disable_irq_nosync ?? */
disable_irq(ch->irq); /* disable_irq_nosync ?? */
#endif
__cli(); /* local CPU only, as if we were handling an interrupt */
if (hwgroup->poll_timeout != 0) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
if (drive->waiting_for_dma)
hwgroup->hwif->dmaproc(ide_dma_lostirq, drive);
(void) ide_ack_intr(hwif);
ch->dmaproc(ide_dma_lostirq, drive);
(void) ide_ack_intr(ch);
printk("%s: lost interrupt\n", drive->name);
startstop = handler(drive);
} else {
......@@ -1479,16 +1491,16 @@ void ide_timer_expiry(unsigned long data)
} else
startstop = ide_error(drive, "irq timeout", GET_STAT());
}
set_recovery_timer(hwif);
set_recovery_timer(ch);
drive->PADAM_service_time = jiffies - drive->PADAM_service_start;
enable_irq(hwif->irq);
enable_irq(ch->irq);
spin_lock_irq(&ide_lock);
if (startstop == ide_stopped)
clear_bit(IDE_BUSY, &hwgroup->flags);
}
}
ide_do_request(hwgroup->hwif, 0);
ide_do_request(hwgroup->drive->channel, 0);
spin_unlock_irqrestore(&ide_lock, flags);
}
......@@ -1516,32 +1528,34 @@ void ide_timer_expiry(unsigned long data)
* accidentally invoked as a result of any valid command completion interrupt.
*
*/
static void unexpected_intr(int irq, ide_hwgroup_t *hwgroup)
static void unexpected_irq(int irq)
{
int i;
for (i = 0; i < MAX_HWIFS; ++i) {
u8 stat;
struct ata_channel *hwif = hwgroup->hwif;
struct ata_channel *tmp = &ide_hwifs[i];
/*
* handle the unexpected interrupt
*/
do {
if (hwif->irq != irq)
if (!tmp->present)
continue;
if (tmp->irq != irq)
continue;
stat = IN_BYTE(hwif->io_ports[IDE_STATUS_OFFSET]);
stat = IN_BYTE(tmp->io_ports[IDE_STATUS_OFFSET]);
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime;
static int count;
++count;
if (time_after(jiffies, last_msgtime + HZ)) {
last_msgtime = jiffies;
printk("%s%s: unexpected interrupt, status=0x%02x, count=%d\n",
hwif->name, (hwif->next == hwgroup->hwif) ? "" : "(?)", stat, count);
printk("%s: unexpected interrupt, status=0x%02x, count=%d\n",
tmp->name, stat, count);
}
}
}
hwif = hwif->next;
} while (hwif != hwgroup->hwif);
}
/*
......@@ -1558,7 +1572,6 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
ide_startstop_t startstop;
spin_lock_irqsave(&ide_lock, flags);
ch = hwgroup->hwif;
if (!ide_ack_intr(ch))
goto out_lock;
......@@ -1583,11 +1596,10 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
if (ch->pci_dev && !ch->pci_dev->vendor)
#endif
{
/*
* Probably not a shared PCI interrupt,
* so we can safely try to do something about it:
/* Probably not a shared PCI interrupt, so we can
* safely try to do something about it:
*/
unexpected_intr(irq, hwgroup);
unexpected_irq(irq);
#ifdef CONFIG_BLK_DEV_IDEPCI
} else {
/*
......@@ -1918,37 +1930,58 @@ ide_proc_entry_t generic_subdriver_entries[] = {
* Note that we only release the standard ports, and do not even try to handle
* any extra ports allocated for weird IDE interface chipsets.
*/
static void hwif_unregister(struct ata_channel *hwif)
static void hwif_unregister(struct ata_channel *ch)
{
if (hwif->straight8) {
release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
int i;
ide_hwgroup_t *hwgroup = ch->hwgroup;
/*
* Free the irq if we were the only channel using it.
*/
int n = 0;
for (i = 0; i < MAX_HWIFS; ++i) {
struct ata_channel *tmp = &ide_hwifs[i];
if (!tmp->present)
continue;
if (tmp->irq == ch->irq)
++n;
}
if (n == 1)
free_irq(ch->irq, hwgroup);
if (ch->straight8) {
release_region(ch->io_ports[IDE_DATA_OFFSET], 8);
} else {
if (hwif->io_ports[IDE_DATA_OFFSET])
release_region(hwif->io_ports[IDE_DATA_OFFSET], 1);
if (hwif->io_ports[IDE_ERROR_OFFSET])
release_region(hwif->io_ports[IDE_ERROR_OFFSET], 1);
if (hwif->io_ports[IDE_NSECTOR_OFFSET])
release_region(hwif->io_ports[IDE_NSECTOR_OFFSET], 1);
if (hwif->io_ports[IDE_SECTOR_OFFSET])
release_region(hwif->io_ports[IDE_SECTOR_OFFSET], 1);
if (hwif->io_ports[IDE_LCYL_OFFSET])
release_region(hwif->io_ports[IDE_LCYL_OFFSET], 1);
if (hwif->io_ports[IDE_HCYL_OFFSET])
release_region(hwif->io_ports[IDE_HCYL_OFFSET], 1);
if (hwif->io_ports[IDE_SELECT_OFFSET])
release_region(hwif->io_ports[IDE_SELECT_OFFSET], 1);
if (hwif->io_ports[IDE_STATUS_OFFSET])
release_region(hwif->io_ports[IDE_STATUS_OFFSET], 1);
}
if (hwif->io_ports[IDE_CONTROL_OFFSET])
release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
if (ch->io_ports[IDE_DATA_OFFSET])
release_region(ch->io_ports[IDE_DATA_OFFSET], 1);
if (ch->io_ports[IDE_ERROR_OFFSET])
release_region(ch->io_ports[IDE_ERROR_OFFSET], 1);
if (ch->io_ports[IDE_NSECTOR_OFFSET])
release_region(ch->io_ports[IDE_NSECTOR_OFFSET], 1);
if (ch->io_ports[IDE_SECTOR_OFFSET])
release_region(ch->io_ports[IDE_SECTOR_OFFSET], 1);
if (ch->io_ports[IDE_LCYL_OFFSET])
release_region(ch->io_ports[IDE_LCYL_OFFSET], 1);
if (ch->io_ports[IDE_HCYL_OFFSET])
release_region(ch->io_ports[IDE_HCYL_OFFSET], 1);
if (ch->io_ports[IDE_SELECT_OFFSET])
release_region(ch->io_ports[IDE_SELECT_OFFSET], 1);
if (ch->io_ports[IDE_STATUS_OFFSET])
release_region(ch->io_ports[IDE_STATUS_OFFSET], 1);
}
if (ch->io_ports[IDE_CONTROL_OFFSET])
release_region(ch->io_ports[IDE_CONTROL_OFFSET], 1);
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
if (hwif->io_ports[IDE_IRQ_OFFSET])
release_region(hwif->io_ports[IDE_IRQ_OFFSET], 1);
if (ch->io_ports[IDE_IRQ_OFFSET])
release_region(ch->io_ports[IDE_IRQ_OFFSET], 1);
#endif
}
void ide_unregister(struct ata_channel *channel)
void ide_unregister(struct ata_channel *ch)
{
struct gendisk *gd;
ide_drive_t *drive, *d;
......@@ -1957,13 +1990,14 @@ void ide_unregister(struct ata_channel *channel)
unsigned long flags;
unsigned int p, minor;
struct ata_channel old_hwif;
int n = 0;
spin_lock_irqsave(&ide_lock, flags);
if (!channel->present)
if (!ch->present)
goto abort;
put_device(&channel->dev);
put_device(&ch->dev);
for (unit = 0; unit < MAX_DRIVES; ++unit) {
drive = &channel->drives[unit];
drive = &ch->drives[unit];
if (!drive->present)
continue;
if (drive->busy || drive->usage)
......@@ -1976,55 +2010,40 @@ void ide_unregister(struct ata_channel *channel)
ide_unregister_subdriver(drive);
}
}
channel->present = 0;
ch->present = 0;
/*
* All clear? Then blow away the buffer cache
*/
spin_unlock_irqrestore(&ide_lock, flags);
for (unit = 0; unit < MAX_DRIVES; ++unit) {
drive = &channel->drives[unit];
drive = &ch->drives[unit];
if (!drive->present)
continue;
minor = drive->select.b.unit << PARTN_BITS;
for (p = 0; p < (1<<PARTN_BITS); ++p) {
if (drive->part[p].nr_sects > 0) {
kdev_t devp = mk_kdev(channel->major, minor+p);
kdev_t devp = mk_kdev(ch->major, minor+p);
invalidate_device(devp, 0);
}
}
}
#ifdef CONFIG_PROC_FS
destroy_proc_ide_drives(channel);
destroy_proc_ide_drives(ch);
#endif
spin_lock_irqsave(&ide_lock, flags);
hwgroup = channel->hwgroup;
/*
* free the irq if we were the only hwif using it
*/
{
struct ata_channel *g;
int irq_count = 0;
g = hwgroup->hwif;
do {
if (g->irq == channel->irq)
++irq_count;
g = g->next;
} while (g != hwgroup->hwif);
if (irq_count == 1)
free_irq(channel->irq, hwgroup);
}
hwif_unregister(channel);
hwif_unregister(ch);
/*
* Remove us from the hwgroup, and free
* the hwgroup if we were the only member
* Remove us from the hwgroup, and free the hwgroup if we were the only
* member.
*/
hwgroup = ch->hwgroup;
d = hwgroup->drive;
for (i = 0; i < MAX_DRIVES; ++i) {
drive = &channel->drives[i];
drive = &ch->drives[i];
if (drive->de) {
devfs_unregister (drive->de);
drive->de = NULL;
......@@ -2045,27 +2064,33 @@ void ide_unregister(struct ata_channel *channel)
}
if (d->present)
hwgroup->drive = d;
while (hwgroup->hwif->next != channel)
hwgroup->hwif = hwgroup->hwif->next;
hwgroup->hwif->next = channel->next;
if (hwgroup->hwif == channel)
kfree(hwgroup);
else
hwgroup->hwif = hwgroup->drive->channel;
n = 0;
for (i = 0; i < MAX_HWIFS; ++i) {
struct ata_channel *tmp = &ide_hwifs[i];
if (!tmp->present)
continue;
if (tmp->hwgroup == ch->hwgroup)
++n;
}
if (n == 1)
kfree(ch->hwgroup);
#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
ide_release_dma(channel);
ide_release_dma(ch);
#endif
/*
* Remove us from the kernel's knowledge.
*/
unregister_blkdev(channel->major, channel->name);
kfree(blksize_size[channel->major]);
blk_dev[channel->major].data = NULL;
blk_dev[channel->major].queue = NULL;
blk_clear(channel->major);
gd = channel->gd;
unregister_blkdev(ch->major, ch->name);
kfree(blksize_size[ch->major]);
blk_dev[ch->major].data = NULL;
blk_dev[ch->major].queue = NULL;
blk_clear(ch->major);
gd = ch->gd;
if (gd) {
del_gendisk(gd);
kfree(gd->sizes);
......@@ -2075,7 +2100,7 @@ void ide_unregister(struct ata_channel *channel)
if (gd->flags)
kfree (gd->flags);
kfree(gd);
channel->gd = NULL;
ch->gd = NULL;
}
/*
......@@ -2083,42 +2108,41 @@ void ide_unregister(struct ata_channel *channel)
* it.
*/
old_hwif = *channel;
init_hwif_data(channel, channel->index);
channel->hwgroup = old_hwif.hwgroup;
channel->tuneproc = old_hwif.tuneproc;
channel->speedproc = old_hwif.speedproc;
channel->selectproc = old_hwif.selectproc;
channel->resetproc = old_hwif.resetproc;
channel->intrproc = old_hwif.intrproc;
channel->maskproc = old_hwif.maskproc;
channel->quirkproc = old_hwif.quirkproc;
channel->rwproc = old_hwif.rwproc;
channel->ata_read = old_hwif.ata_read;
channel->ata_write = old_hwif.ata_write;
channel->atapi_read = old_hwif.atapi_read;
channel->atapi_write = old_hwif.atapi_write;
channel->dmaproc = old_hwif.dmaproc;
channel->busproc = old_hwif.busproc;
channel->bus_state = old_hwif.bus_state;
channel->dma_base = old_hwif.dma_base;
channel->dma_extra = old_hwif.dma_extra;
channel->config_data = old_hwif.config_data;
channel->select_data = old_hwif.select_data;
channel->proc = old_hwif.proc;
old_hwif = *ch;
init_hwif_data(ch, ch->index);
ch->hwgroup = old_hwif.hwgroup;
ch->tuneproc = old_hwif.tuneproc;
ch->speedproc = old_hwif.speedproc;
ch->selectproc = old_hwif.selectproc;
ch->resetproc = old_hwif.resetproc;
ch->intrproc = old_hwif.intrproc;
ch->maskproc = old_hwif.maskproc;
ch->quirkproc = old_hwif.quirkproc;
ch->rwproc = old_hwif.rwproc;
ch->ata_read = old_hwif.ata_read;
ch->ata_write = old_hwif.ata_write;
ch->atapi_read = old_hwif.atapi_read;
ch->atapi_write = old_hwif.atapi_write;
ch->dmaproc = old_hwif.dmaproc;
ch->busproc = old_hwif.busproc;
ch->bus_state = old_hwif.bus_state;
ch->dma_base = old_hwif.dma_base;
ch->dma_extra = old_hwif.dma_extra;
ch->config_data = old_hwif.config_data;
ch->select_data = old_hwif.select_data;
ch->proc = old_hwif.proc;
#ifndef CONFIG_BLK_DEV_IDECS
channel->irq = old_hwif.irq;
ch->irq = old_hwif.irq;
#endif
channel->major = old_hwif.major;
channel->chipset = old_hwif.chipset;
channel->autodma = old_hwif.autodma;
channel->udma_four = old_hwif.udma_four;
ch->major = old_hwif.major;
ch->chipset = old_hwif.chipset;
ch->autodma = old_hwif.autodma;
ch->udma_four = old_hwif.udma_four;
#ifdef CONFIG_BLK_DEV_IDEPCI
channel->pci_dev = old_hwif.pci_dev;
#else
channel->pci_dev = NULL;
ch->pci_dev = old_hwif.pci_dev;
#endif
channel->straight8 = old_hwif.straight8;
ch->straight8 = old_hwif.straight8;
abort:
spin_unlock_irqrestore(&ide_lock, flags);
}
......
......@@ -73,7 +73,6 @@ typedef unsigned char byte; /* used everywhere */
*/
#define DMA_PIO_RETRY 1 /* retrying in PIO */
#define HWIF(drive) ((drive)->channel)
#define HWGROUP(drive) (drive->channel->hwgroup)
/*
......@@ -254,7 +253,7 @@ void ide_setup_ports(hw_regs_t *hw,
#endif
/*
* Now for the data we need to maintain per-drive: ide_drive_t
* Device types - the nomenclature is analogous to SCSI code.
*/
#define ATA_DISK 0x20
......@@ -266,7 +265,10 @@ void ide_setup_ports(hw_regs_t *hw,
#define ATA_NO_LUN 0x7f
struct ide_settings_s;
/* structure describing an ATA/ATAPI device */
/*
* ATA/ATAPI device structure :
*/
typedef
struct ata_device {
struct ata_channel * channel;
......@@ -401,7 +403,6 @@ struct ata_channel {
struct device dev; /* device handle */
int unit; /* channel number */
struct ata_channel *next; /* for linked-list in ide_hwgroup_t */
struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */
ide_ioreg_t io_ports[IDE_NR_PORTS]; /* task file registers */
......@@ -516,13 +517,10 @@ typedef int (ide_expiry_t)(ide_drive_t *);
#define IDE_DMA 2 /* DMA in progress */
typedef struct hwgroup_s {
ide_handler_t *handler;/* irq handler, if active */
ide_handler_t *handler; /* irq handler, if active */
unsigned long flags; /* BUSY, SLEEPING */
ide_drive_t *drive; /* current drive */
struct ata_channel *hwif; /* ptr to current hwif in linked-list */
struct ata_device *drive; /* current drive */
struct request *rq; /* current request */
struct timer_list timer; /* failsafe timer */
struct request wrq; /* local copy of current write rq */
unsigned long poll_timeout; /* timeout value during long polls */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment