Commit 33aadf57 authored by Richard Gooch's avatar Richard Gooch

Merge atnf.csiro.au:/workaholix1/kernel/v2.5/linus

into atnf.csiro.au:/workaholix1/kernel/v2.5/rgooch-2.5
parents 6148fc04 5fe41502
bk-kernel-howto.txt: Description of kernel workflow under BitKeeper
bk-make-sum: Create summary of changesets in one repository and not
another, typically in preparation to be sent to an upstream maintainer.
Typical usage:
cd my-updated-repo
bk-make-sum ~/repo/original-repo
mv /tmp/linus.txt ../original-repo.txt
bksend: Create readable text output containing summary of changes, GNU
patch of the changes, and BK metadata of changes (as needed for proper
importing into BitKeeper by an upstream maintainer). This output is
suitable for emailing BitKeeper changes. The recipient of this output
may pipe it directly to 'bk receive'.
bz64wrap: helper script. Uncompressed input is piped to this script,
which compresses its input, and then outputs the uu-/base64-encoded
version of the compressed input.
csets-to-patches: Produces a delta of two BK repositories, in the form
of individual files, each containing a single cset as a GNU patch.
Output is several files, each with the filename "/tmp/rev-$REV.patch"
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl csets-to-patches
cset-to-linus: Produces a delta of two BK repositories, in the form of
changeset descriptions, with 'diffstat' output created for each
individual changset.
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl cset-to-linus > summary.txt
unbz64wrap: Reverse an encoded, compressed data stream created by
bz64wrap into an uncompressed, typically text/plain output.
......@@ -32,7 +32,7 @@ land at the right destination... but I'm getting ahead of myself.
Let's start with this progression:
Each BitKeeper source tree on disk is a repository unto itself.
Each repository has a parent.
Each repository has a parent (except the root/original, of course).
Each repository contains a set of a changesets ("csets").
Each cset is one or more changed files, bundled together.
......
......@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) {
next = entry->next;
prefetch(next);
drq = list_entry_hash(entry);
BUG_ON(!drq->hash_valid_count);
......@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry);
prefetch(entry->prev);
BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD))
......@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
prefetch(nxt);
/*
* take it off the sort and fifo list, move
* to dispatch queue
......
......@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next);
}
/*
* add_request and next_request are required to be supported, naturally
*/
void __elv_add_request(request_queue_t *q, struct request *rq,
struct list_head *insert_here)
void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
struct list_head *insert = &q->queue_head;
if (at_end)
insert = insert->prev;
if (plug)
blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert);
}
void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
q->elevator.elevator_add_req_fn(q, rq, insert_here);
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static inline struct request *__elv_next_request(request_queue_t *q)
......@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q)
{
struct request *rq;
int ret;
while ((rq = __elv_next_request(q))) {
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge)
......@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
/*
* all ok, break and return it
*/
if (!q->prep_rq_fn(q, rq))
ret = q->prep_rq_fn(q, rq);
if (ret == BLKPREP_OK) {
break;
/*
* prep said no-go, kill it
*/
blkdev_dequeue_request(rq);
if (end_that_request_first(rq, 0, rq->nr_sectors))
BUG();
end_that_request_last(rq);
} else if (ret == BLKPREP_DEFER) {
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET;
while (end_that_request_first(rq, 0, rq->nr_sectors))
;
end_that_request_last(rq);
} else {
printk("%s: bad return=%d\n", __FUNCTION__, ret);
break;
}
}
return rq;
......@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = &q->elevator;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if (&rq->queuelist == q->last_merge)
q->last_merge = NULL;
if (e->elevator_remove_req_fn)
e->elevator_remove_req_fn(q, rq);
}
......@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request);
......
This diff is collapsed.
......@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next;
bio->bi_next = NULL;
/* should use bio_endio(), however already cleared
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio->bi_size = 0;
bio->bi_end_io(bio, bytes, 0);
bio_endio(bio, bio->bi_size, 0);
}
}
......@@ -1041,7 +1036,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
spin_lock_init(&card->lock);
dev->driver_data = card;
pci_set_drvdata(dev, card);
if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */
pci_write_cmd = 0x07; /* then Memory Write command */
......@@ -1100,7 +1095,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
*/
static void mm_pci_remove(struct pci_dev *dev)
{
struct cardinfo *card = dev->driver_data;
struct cardinfo *card = pci_get_drvdata(dev);
tasklet_kill(&card->tasklet);
iounmap(card->csr_remap);
......
......@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
#endif
}
static int idedisk_suspend(struct device *dev, u32 state, u32 level)
{
ide_drive_t *drive = dev->driver_data;
printk("Suspending device %p\n", dev->driver_data);
/* I hope that every freeze operation from the upper levels have
* already been done...
*/
if (level != SUSPEND_SAVE_STATE)
return 0;
BUG_ON(in_interrupt());
printk("Waiting for commands to finish\n");
/* wait until all commands are finished */
/* FIXME: waiting for spinlocks should be done instead. */
if (!(HWGROUP(drive)))
printk("No hwgroup?\n");
while (HWGROUP(drive)->handler)
yield();
/* set the drive to standby */
printk(KERN_INFO "suspending: %s ", drive->name);
if (drive->driver) {
if (drive->driver->standby)
drive->driver->standby(drive);
}
drive->blocked = 1;
while (HWGROUP(drive)->handler)
yield();
return 0;
}
static int idedisk_resume(struct device *dev, u32 level)
{
ide_drive_t *drive = dev->driver_data;
if (level != RESUME_RESTORE_STATE)
return 0;
if (!drive->blocked)
panic("ide: Resume but not suspended?\n");
drive->blocked = 0;
return 0;
}
/* This is just a hook for the overall driver tree.
*/
......
......@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
static int
idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc, struct request *rq)
{
/*
* just support eject for now, it would not be hard to make the
* REQ_BLOCK_PC support fully-featured
*/
if (rq->cmd[0] != IDEFLOPPY_START_STOP_CMD)
return 1;
idefloppy_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
return 0;
}
/*
* idefloppy_do_request is our request handling function.
*/
......@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_create_rw_cmd(floppy, pc, rq, block);
} else if (rq->flags & REQ_SPECIAL) {
pc = (idefloppy_pc_t *) rq->buffer;
} else if (rq->flags & REQ_BLOCK_PC) {
pc = idefloppy_next_pc_storage(drive);
if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
idefloppy_do_end_request(drive, 0, 0);
return ide_stopped;
}
} else {
blk_dump_rq_flags(rq,
"ide-floppy: unsupported command in queue");
......
......@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
unsigned long block;
ide_hwif_t *hwif = HWIF(drive);
BUG_ON(!(rq->flags & REQ_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
hwif->name, (unsigned long) rq);
HWIF(drive)->name, (unsigned long) rq);
#endif
/* bail early if we've exceeded max_failures */
......@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
block = 1; /* redirect MBR access to EZ-Drive partn table */
#if (DISK_RECOVERY_TIME > 0)
while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
while ((read_timer() - HWIF(drive)->last_time) < DISK_RECOVERY_TIME);
#endif
SELECT_DRIVE(drive);
......@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break;
}
/*
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq = elv_next_request(&drive->queue);
if (!rq)
if (!rq) {
hwgroup->busy = !!ata_pending_commands(drive);
break;
}
if (!rq->bio && ata_pending_commands(drive))
break;
......@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
{
unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait);
int insert_end = 1, err;
#ifdef CONFIG_BLK_DEV_PDC4030
if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
......@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
}
rq->rq_disk = drive->disk;
if (action == ide_wait)
/*
* we need to hold an extra reference to request for safe inspection
* after completion
*/
if (action == ide_wait) {
rq->ref_count++;
rq->waiting = &wait;
}
spin_lock_irqsave(&ide_lock, flags);
if (blk_queue_empty(q) || action == ide_preempt) {
if (action == ide_preempt)
hwgroup->rq = NULL;
} else {
if (action == ide_wait || action == ide_end) {
queue_head = queue_head->prev;
} else
queue_head = queue_head->next;
if (action == ide_preempt) {
hwgroup->rq = NULL;
insert_end = 0;
}
q->elevator.elevator_add_req_fn(q, rq, queue_head);
__elv_add_request(&drive->queue, rq, insert_end, 0);
ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&ide_lock, flags);
err = 0;
if (action == ide_wait) {
/* wait for it to be serviced */
wait_for_completion(&wait);
/* return -EIO if errors */
return rq->errors ? -EIO : 0;
if (rq->errors)
err = -EIO;
blk_put_request(rq);
}
return 0;
return err;
}
EXPORT_SYMBOL(ide_do_drive_cmd);
......@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
list_del_init(&drive->list);
ata_attach(drive);
}
driver->gen_driver.name = driver->name;
driver->gen_driver.name = (char *) driver->name;
driver->gen_driver.bus = &ide_bus_type;
driver->gen_driver.remove = ide_drive_remove;
return driver_register(&driver->gen_driver);
......
......@@ -316,7 +316,7 @@ static void hpt366_tune_chipset (ide_drive_t *drive, u8 xferspeed)
#endif
reg2 = pci_bus_clock_list(speed,
(struct chipset_bus_clock_list_entry *) dev->driver_data);
(struct chipset_bus_clock_list_entry *) pci_get_drvdata(dev));
/*
* Disable on-chip PIO FIFO/buffer
* (to avoid problems handling I/O errors later)
......@@ -369,7 +369,7 @@ static void hpt370_tune_chipset (ide_drive_t *drive, u8 xferspeed)
list_conf = pci_bus_clock_list(speed,
(struct chipset_bus_clock_list_entry *)
dev->driver_data);
pci_get_drvdata(dev));
pci_read_config_dword(dev, drive_pci, &drive_conf);
list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
......@@ -401,7 +401,7 @@ static void hpt372_tune_chipset (ide_drive_t *drive, u8 xferspeed)
list_conf = pci_bus_clock_list(speed,
(struct chipset_bus_clock_list_entry *)
dev->driver_data);
pci_get_drvdata(dev));
pci_read_config_dword(dev, drive_pci, &drive_conf);
list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
if (speed < XFER_MW_DMA_0)
......@@ -841,7 +841,7 @@ static int __init init_hpt37x(struct pci_dev *dev)
* don't like to use the PLL because it will cause glitches
* on PRST/SRST when the HPT state engine gets reset.
*/
if (dev->driver_data)
if (pci_get_drvdata(dev))
goto init_hpt37X_done;
/*
......@@ -923,7 +923,7 @@ static int __init init_hpt366 (struct pci_dev *dev)
break;
}
if (!dev->driver_data)
if (!pci_get_drvdata(dev))
{
printk(KERN_ERR "hpt366: unknown bus timing.\n");
return -EOPNOTSUPP;
......
......@@ -30,8 +30,8 @@ static int n_siimage_devs;
static char * print_siimage_get_info (char *buf, struct pci_dev *dev, int index)
{
char *p = buf;
u8 mmio = (dev->driver_data != NULL) ? 1 : 0;
u32 bmdma = (mmio) ? ((u32) dev->driver_data) :
u8 mmio = (pci_get_drvdata(dev) != NULL) ? 1 : 0;
u32 bmdma = (mmio) ? ((u32) pci_get_drvdata(dev)) :
(pci_resource_start(dev, 4));
p += sprintf(p, "\nController: %d\n", index);
......@@ -769,14 +769,14 @@ static void __init init_iops_siimage (ide_hwif_t *hwif)
if ((dev->device == PCI_DEVICE_ID_SII_3112) && (!(class_rev)))
hwif->rqsize = 16;
if (dev->driver_data == NULL)
if (pci_get_drvdata(dev) == NULL)
return;
init_mmio_iops_siimage(hwif);
}
static unsigned int __init ata66_siimage (ide_hwif_t *hwif)
{
if (hwif->pci_dev->driver_data == NULL) {
if (pci_get_drvdata(hwif->pci_dev) == NULL) {
u8 ata66 = 0;
pci_read_config_byte(hwif->pci_dev, SELREG(0), &ata66);
return (ata66 & 0x01) ? 1 : 0;
......
......@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a device boundary), 0 otherwise.
* Return amount of bytes we can take at this offset
*/
static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0, *dev1;
dev_info_t *dev0;
int maxsectors, bio_sectors = (bio->bi_size + biovec->bv_len) >> 9;
dev0 = which_dev(mddev, bio->bi_sector);
dev1 = which_dev(mddev, bio->bi_sector +
((bio->bi_size + biovec->bv_len - 1) >> 9));
maxsectors = (dev0->size << 1) - (bio->bi_sector - (dev0->offset<<1));
return dev0 != dev1;
if (bio_sectors <= maxsectors)
return biovec->bv_len;
return (maxsectors << 9) - bio->bi_size;
}
static int linear_run (mddev_t *mddev)
......
......@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a chunk boundary), 0 otherwise.
* Return amount of bytes we can accept at this offset
*/
static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{
......@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block = bio->bi_sector >> 1;
bio_sz = (bio->bi_size + biovec->bv_len) >> 10;
return chunk_size < ((block & (chunk_size - 1)) + bio_sz);
return (chunk_size - ((block & (chunk_size - 1)) + bio_sz)) << 10;
}
static int raid0_run (mddev_t *mddev)
......
......@@ -122,8 +122,7 @@ static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int debug = -1;
#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_HW | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
......@@ -568,10 +567,8 @@ static inline unsigned char wait_for_cmd_done(struct net_device *dev)
r = inb(cmd_ioaddr);
} while(r && --wait >= 0);
#ifndef final_version
if (wait < 0)
printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
#endif
return r;
}
......@@ -852,7 +849,9 @@ static int __devinit speedo_found1(struct pci_dev *pdev,
sp->phy[0] = eeprom[6];
sp->phy[1] = eeprom[7];
sp->mii_if.phy_id = eeprom[6];
sp->mii_if.phy_id = eeprom[6] & 0x1f;
sp->mii_if.phy_id_mask = 0x1f;
sp->mii_if.reg_num_mask = 0x1f;
sp->mii_if.dev = dev;
sp->mii_if.mdio_read = mdio_read;
sp->mii_if.mdio_write = mdio_write;
......@@ -1207,7 +1206,7 @@ static void speedo_timer(unsigned long data)
/* We haven't received a packet in a Long Time. We might have been
bitten by the receiver hang bug. This can be cleared by sending
a set multicast list command. */
if (netif_msg_rx_err(sp))
if (netif_msg_timer(sp))
printk(KERN_DEBUG "%s: Sending a multicast list set command"
" from a timer routine,"
" m=%d, j=%ld, l=%ld.\n",
......@@ -1224,25 +1223,26 @@ static void speedo_show_state(struct net_device *dev)
struct speedo_private *sp = (struct speedo_private *)dev->priv;
int i;
/* Print a few items for debugging. */
printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n", dev->name,
sp->cur_tx, sp->dirty_tx);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
i, sp->tx_ring[i].status);
printk(KERN_DEBUG "%s: Printing Rx ring"
" (next to receive into %u, dirty index %u).\n",
dev->name, sp->cur_rx, sp->dirty_rx);
for (i = 0; i < RX_RING_SIZE; i++)
printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
i, (sp->rx_ringp[i] != NULL) ?
(unsigned)sp->rx_ringp[i]->status : 0);
if (netif_msg_pktdata(sp)) {
printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
dev->name, sp->cur_tx, sp->dirty_tx);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
i, sp->tx_ring[i].status);
printk(KERN_DEBUG "%s: Printing Rx ring"
" (next to receive into %u, dirty index %u).\n",
dev->name, sp->cur_rx, sp->dirty_rx);
for (i = 0; i < RX_RING_SIZE; i++)
printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
i, (sp->rx_ringp[i] != NULL) ?
(unsigned)sp->rx_ringp[i]->status : 0);
}
#if 0
{
......@@ -1378,8 +1378,8 @@ static void speedo_tx_timeout(struct net_device *dev)
sp->dirty_tx, sp->cur_tx,
sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
speedo_show_state(dev);
}
speedo_show_state(dev);
#if 0
if ((status & 0x00C0) != 0x0080
&& (status & 0x003C) == 0x0010) {
......@@ -1564,13 +1564,6 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
long ioaddr, boguscnt = max_interrupt_work;
unsigned short status;
#ifndef final_version
if (dev == NULL) {
printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
return;
}
#endif
ioaddr = dev->base_addr;
sp = (struct speedo_private *)dev->priv;
......@@ -1717,9 +1710,9 @@ static int speedo_refill_rx_buf(struct net_device *dev, int force)
if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
dev->name, force);
speedo_show_state(dev);
sp->rx_ring_state |= RrOOMReported;
}
speedo_show_state(dev);
if (!force)
return -1; /* Better luck next time! */
/* Borrow an skb from one of next entries. */
......@@ -1792,7 +1785,7 @@ speedo_rx(struct net_device *dev)
break;
}
if (netif_msg_intr(sp))
if (netif_msg_rx_status(sp))
printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
pkt_len);
if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
......@@ -1894,10 +1887,7 @@ speedo_close(struct net_device *dev)
udelay(10);
free_irq(dev->irq, dev);
/* Print a few items for debugging. */
if (netif_msg_ifdown(sp))
speedo_show_state(dev);
speedo_show_state(dev);
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
......
This diff is collapsed.
......@@ -1412,8 +1412,7 @@ static int i596_close(struct net_device *dev)
DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, lp->scb.status));
save_flags(flags);
cli();
spin_lock_irqsave(&lp->lock, flags);
wait_cmd(dev,lp,100,"close1 timed out");
lp->scb.command = CUC_ABORT | RX_ABORT;
......@@ -1422,7 +1421,7 @@ static int i596_close(struct net_device *dev)
CA(dev);
wait_cmd(dev,lp,100,"close2 timed out");
restore_flags(flags);
spin_unlock_irqrestore(&lp->lock, flags);
DEB(DEB_STRUCT,i596_display_data(dev));
i596_cleanup_cmd(dev,lp);
......
......@@ -299,9 +299,9 @@ int generic_mii_ioctl(struct mii_if_info *mii_if,
case MII_BMCR: {
unsigned int new_duplex = 0;
if (val & (BMCR_RESET|BMCR_ANENABLE))
mii_if->force_media = 1;
else
mii_if->force_media = 0;
else
mii_if->force_media = 1;
if (mii_if->force_media &&
(val & BMCR_FULLDPLX))
new_duplex = 1;
......
......@@ -22,8 +22,8 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.27a"
#define DRV_RELDATE "10.02.2002"
#define DRV_VERSION "1.27b"
#define DRV_RELDATE "01.10.2002"
#define PFX DRV_NAME ": "
static const char *version =
......@@ -96,6 +96,8 @@ static int rx_copybreak = 200;
#define PCNET32_DMA_MASK 0xffffffff
#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
/*
* table to translate option values from tulip
* to internal options
......@@ -211,6 +213,8 @@ static int full_duplex[MAX_UNITS];
* fix pci probe not increment cards_found
* FD auto negotiate error workaround for xSeries250
* clean up and using new mii module
* v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
* Added timer for cable connection state changes.
*/
......@@ -318,6 +322,7 @@ struct pcnet32_private {
mii:1; /* mii port available */
struct net_device *next;
struct mii_if_info mii_if;
struct timer_list watchdog_timer;
};
static void pcnet32_probe_vlbus(void);
......@@ -333,6 +338,7 @@ static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
static void pcnet32_set_multicast_list(struct net_device *);
static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
static void pcnet32_watchdog(struct net_device *);
static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
......@@ -777,6 +783,13 @@ pcnet32_probe1(unsigned long ioaddr, unsigned int irq_line, int shared,
}
}
/* Set the mii phy_id so that we can query the link state */
if (lp->mii)
lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f;
init_timer (&lp->watchdog_timer);
lp->watchdog_timer.data = (unsigned long) dev;
lp->watchdog_timer.function = (void *) &pcnet32_watchdog;
/* The PCNET32-specific entries in the device structure. */
dev->open = &pcnet32_open;
......@@ -901,6 +914,12 @@ pcnet32_open(struct net_device *dev)
netif_start_queue(dev);
/* If we have mii, print the link status and start the watchdog */
if (lp->mii) {
mii_check_media (&lp->mii_if, 1, 1);
mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
}
i = 0;
while (i++ < 100)
if (lp->a.read_csr (ioaddr, 0) & 0x0100)
......@@ -1371,6 +1390,8 @@ pcnet32_close(struct net_device *dev)
struct pcnet32_private *lp = dev->priv;
int i;
del_timer_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
......@@ -1651,6 +1672,17 @@ static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EOPNOTSUPP;
}
static void pcnet32_watchdog(struct net_device *dev)
{
struct pcnet32_private *lp = dev->priv;
/* Print the link status if it has changed */
if (lp->mii)
mii_check_media (&lp->mii_if, 1, 0);
mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
}
static struct pci_driver pcnet32_driver = {
name: DRV_NAME,
probe: pcnet32_probe_pci,
......
......@@ -1780,7 +1780,7 @@ static int xl_change_mtu(struct net_device *dev, int mtu)
static void __devexit xl_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pdev->driver_data;
struct net_device *dev = pci_get_drvdata(pdev);
struct xl_private *xl_priv=(struct xl_private *)dev->priv;
unregister_trdev(dev);
......
......@@ -65,6 +65,7 @@
* 11/05/01 - Restructured the interrupt function, added delays, reduced the
* the number of TX descriptors to 1, which together can prevent
* the card from locking up the box - <yoder1@us.ibm.com>
* 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
*
* To Do:
*
......@@ -136,7 +137,7 @@
*/
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.1 03/04/02 - Kent Yoder";
" v0.5.2 09/30/02 - Kent Yoder";
static struct pci_device_id streamer_pci_tbl[] __initdata = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
......@@ -250,6 +251,12 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
dev_streamer=streamer_priv;
#endif
#endif
if(pci_set_dma_mask(pdev, 0xFFFFFFFF)) {
printk(KERN_ERR "%s: No suitable PCI mapping available.\n", dev->name);
rc = -ENODEV;
goto err_out;
}
if (pci_enable_device(pdev)) {
printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
......@@ -481,9 +488,11 @@ static int streamer_reset(struct net_device *dev)
data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
rx_ring->forward=0;
rx_ring->status=0;
rx_ring->buffer=virt_to_bus(data);
rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
512, PCI_DMA_FROMDEVICE));
rx_ring->framelen_buflen=512;
writel(virt_to_bus(rx_ring),streamer_mmio+RXBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
streamer_mmio+RXBDA);
}
#if STREAMER_DEBUG
......@@ -499,6 +508,8 @@ static int streamer_reset(struct net_device *dev)
printk(KERN_ERR
"IBM PCI tokenring card not responding\n");
release_region(dev->base_addr, STREAMER_IO_SPACE);
if (skb)
dev_kfree_skb(skb);
return -1;
}
}
......@@ -773,14 +784,19 @@ static int streamer_open(struct net_device *dev)
skb->dev = dev;
streamer_priv->streamer_rx_ring[i].forward = virt_to_bus(&streamer_priv->streamer_rx_ring[i + 1]);
streamer_priv->streamer_rx_ring[i].forward =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
streamer_priv->streamer_rx_ring[i].status = 0;
streamer_priv->streamer_rx_ring[i].buffer = virt_to_bus(skb->data);
streamer_priv->streamer_rx_ring[i].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
streamer_priv->rx_ring_skb[i] = skb;
}
streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
virt_to_bus(&streamer_priv->streamer_rx_ring[0]);
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
if (i == 0) {
printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
......@@ -790,8 +806,12 @@ static int streamer_open(struct net_device *dev)
streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
writel(virt_to_bus(&streamer_priv->streamer_rx_ring[0]), streamer_mmio + RXBDA);
writel(virt_to_bus(&streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1]), streamer_mmio + RXLBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + RXBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + RXLBDA);
/* set bus master interrupt event mask */
writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
......@@ -807,7 +827,10 @@ static int streamer_open(struct net_device *dev)
writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
streamer_priv->streamer_tx_ring[i].forward = virt_to_bus(&streamer_priv->streamer_tx_ring[i + 1]);
streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_tx_ring[i + 1],
sizeof(struct streamer_tx_desc),
PCI_DMA_TODEVICE));
streamer_priv->streamer_tx_ring[i].status = 0;
streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
streamer_priv->streamer_tx_ring[i].buffer = 0;
......@@ -817,7 +840,8 @@ static int streamer_open(struct net_device *dev)
streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
}
streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
virt_to_bus(&streamer_priv->streamer_tx_ring[0]);
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
......@@ -915,6 +939,11 @@ static void streamer_rx(struct net_device *dev)
skb->dev = dev;
if (buffer_cnt == 1) {
/* release the DMA mapping */
pci_unmap_single(streamer_priv->pci_dev,
le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
streamer_priv->pkt_buf_sz,
PCI_DMA_FROMDEVICE);
skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
#if STREAMER_DEBUG_PACKETS
{
......@@ -934,20 +963,29 @@ static void streamer_rx(struct net_device *dev)
/* recycle this descriptor */
streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer = virt_to_bus(skb->data);
streamer_priv-> rx_ring_skb[rx_ring_last_received] = skb;
streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
PCI_DMA_FROMDEVICE));
streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
/* place recycled descriptor back on the adapter */
writel(virt_to_bus(&streamer_priv->streamer_rx_ring[rx_ring_last_received]),streamer_mmio + RXLBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_rx_ring[rx_ring_last_received],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
streamer_mmio + RXLBDA);
/* pass the received skb up to the protocol */
netif_rx(skb2);
} else {
do { /* Walk the buffers */
memcpy(skb_put(skb, length),bus_to_virt(rx_desc->buffer), length); /* copy this fragment */
pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
/* give descriptor back to the adapter */
writel(virt_to_bus(&streamer_priv->streamer_rx_ring[rx_ring_last_received]), streamer_mmio + RXLBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_rx_ring[rx_ring_last_received],
length, PCI_DMA_FROMDEVICE)),
streamer_mmio + RXLBDA);
if (rx_desc->status & 0x80000000)
break; /* this descriptor completes the frame */
......@@ -1114,7 +1152,8 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
if (streamer_priv->free_tx_ring_entries) {
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer = virt_to_bus(skb->data);
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
......@@ -1135,7 +1174,10 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
writel(virt_to_bus (&streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free]),streamer_mmio + TX2LFDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + TX2LFDA);
(void)readl(streamer_mmio + TX2LFDA);
streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
......
......@@ -210,6 +210,7 @@ static struct pci_device_id tulip_pci_tbl[] __devinitdata = {
{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
......@@ -1409,7 +1410,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
dev->base_addr = ioaddr;
dev->irq = irq;
#ifdef CONFIG_TULIP_MWI
if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
......@@ -1553,6 +1553,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
for (i = 0; i < 6; i++)
last_phys_addr[i] = dev->dev_addr[i];
last_irq = irq;
dev->irq = irq;
/* The lower four bits are the media type. */
if (board_idx >= 0 && board_idx < MAX_UNITS) {
......
......@@ -299,7 +299,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
dev->get_stats = &xircom_get_stats;
dev->priv = private;
dev->do_ioctl = &private_ioctl;
pdev->driver_data = dev;
pci_set_drvdata(pdev, dev);
/* start the transmitter to get a heartbeat */
......@@ -326,7 +326,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
static void __devexit xircom_remove(struct pci_dev *pdev)
{
struct net_device *dev = pdev->driver_data;
struct net_device *dev = pci_get_drvdata(pdev);
struct xircom_private *card;
enter("xircom_remove");
if (dev!=NULL) {
......
......@@ -3556,7 +3556,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
/* Set PCI drv pointer to the card structure */
pdev->driver_data = card;
pci_set_drvdata(pdev, card);
/* Set board type */
switch (device_id) {
......@@ -3631,7 +3631,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static void __devexit cpc_remove_one(struct pci_dev *pdev)
{
pc300_t *card = (pc300_t *) pdev->driver_data;
pc300_t *card = pci_get_drvdata(pdev);
if (card->hw.rambase != 0) {
int i;
......
......@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt->request->special = (void *) SCpnt;
if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0);
__elv_add_request(q, SCpnt->request, 0, 0);
}
/*
......@@ -514,6 +514,12 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
}
}
if (blk_pc_request(req)) {
req->errors = result & 0xff;
if (!result)
req->data_len -= SCpnt->bufflen;
}
/*
* Zero these out. They now point to freed memory, and it is
* dangerous to hang onto the pointers.
......@@ -527,7 +533,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
* Next deal with any sectors which we were able to correctly
* handle.
*/
if (good_sectors > 0) {
if (good_sectors >= 0) {
SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
req->nr_sectors, good_sectors));
SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
......@@ -951,7 +957,7 @@ void scsi_request_fn(request_queue_t * q)
SCpnt->request->flags |= REQ_SPECIAL;
if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0);
__elv_add_request(q, SCpnt->request, 0, 0);
break;
}
......
......@@ -62,15 +62,9 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
int count, gfp_mask;
/*
* non-sg block request. FIXME: check bouncing for isa hosts!
* if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
*/
if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
/*
* FIXME: isa bouncing
*/
if (SCpnt->host->unchecked_isa_dma)
goto fail;
SCpnt->request_bufflen = req->data_len;
SCpnt->request_buffer = req->data;
req->buffer = req->data;
......@@ -100,6 +94,8 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
SCpnt->request_buffer = (char *) sgpnt;
SCpnt->request_bufflen = req->nr_sectors << 9;
if (blk_pc_request(req))
SCpnt->request_bufflen = req->data_len;
req->buffer = NULL;
/*
......@@ -123,7 +119,6 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
/*
* kill it. there should be no leftover blocks in this request
*/
fail:
SCpnt = scsi_end_request(SCpnt, 0, req->nr_sectors);
BUG_ON(SCpnt);
return 0;
......
......@@ -308,6 +308,8 @@ static int sd_init_command(Scsi_Cmnd * SCpnt)
if (rq->timeout)
timeout = rq->timeout;
SCpnt->transfersize = rq->data_len;
SCpnt->underflow = rq->data_len;
goto queue;
}
......@@ -431,10 +433,10 @@ static int sd_init_command(Scsi_Cmnd * SCpnt)
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
queue:
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
queue:
SCpnt->allowed = MAX_RETRIES;
SCpnt->timeout_per_command = timeout;
......
......@@ -287,6 +287,8 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
if (rq->timeout)
timeout = rq->timeout;
SCpnt->transfersize = rq->data_len;
SCpnt->underflow = rq->data_len;
goto queue;
}
......@@ -360,10 +362,10 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
queue:
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
queue:
SCpnt->allowed = MAX_RETRIES;
SCpnt->timeout_per_command = timeout;
......
......@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc)
if (!cgc->quiet)
printk(KERN_ERR "%s: CDROM (ioctl) reports ILLEGAL "
"REQUEST.\n", cd->cdi.name);
err = -EIO;
if (SRpnt->sr_sense_buffer[12] == 0x20 &&
SRpnt->sr_sense_buffer[13] == 0x00) {
SRpnt->sr_sense_buffer[13] == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
} else {
err = -EINVAL;
}
#ifdef DEBUG
print_command(cgc->cmd);
print_req_sense("sr", SRpnt);
......
......@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio)
bio->bi_max_vecs = 0;
bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1);
bio->bi_private = NULL;
}
/**
......@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
request_queue_t *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = q->max_sectors >> (PAGE_SHIFT - 9);
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages > q->max_phys_segments)
nr_pages = q->max_phys_segments;
if (nr_pages > q->max_hw_segments)
......@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* cloned bio must not modify vec list
*/
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 1;
return 0;
if (bio->bi_vcnt >= bio->bi_max_vecs)
return 1;
return 0;
if (((bio->bi_size + len) >> 9) > q->max_sectors)
return 1;
return 0;
/*
* we might loose a segment or two here, but rather that than
......@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
if (fail_segments) {
if (retried_segments)
return 1;
return 0;
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
retried_segments = 1;
......@@ -425,20 +426,151 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control
*/
if (q->merge_bvec_fn && q->merge_bvec_fn(q, bio, bvec)) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
return 1;
if (q->merge_bvec_fn) {
/*
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
if (q->merge_bvec_fn(q, bio, bvec) < len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
return 0;
}
}
bio->bi_vcnt++;
bio->bi_phys_segments++;
bio->bi_hw_segments++;
bio->bi_size += len;
return 0;
return len;
}
/**
* bio_map_user - map user address into bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
* device. Caller should check the size of the returned bio, we might
* not have mapped the entire range specified.
*/
struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
request_queue_t *q = bdev_get_queue(bdev);
int ret, offset, i;
struct page **pages;
struct bio *bio;
/*
* transfer and buffer must be aligned to at least hardsector
* size for now, in the future we can relax this restriction
*/
if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
return NULL;
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
return NULL;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, uaddr, nr_pages,
write_to_vm, 0, pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret < nr_pages)
goto out;
bio->bi_bdev = bdev;
offset = uaddr & ~PAGE_MASK;
for (i = 0; i < nr_pages; i++) {
unsigned int bytes = PAGE_SIZE - offset;
if (len <= 0)
break;
if (bytes > len)
bytes = len;
/*
* sorry...
*/
if (bio_add_page(bio, pages[i], bytes, offset) < bytes)
break;
len -= bytes;
offset = 0;
}
/*
* release the pages we didn't map into the bio, if any
*/
while (i < nr_pages)
page_cache_release(pages[i++]);
kfree(pages);
/*
* check if the mapped pages need bouncing for an isa host.
*/
blk_queue_bounce(q, &bio);
return bio;
out:
kfree(pages);
bio_put(bio);
return NULL;
}
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
* @write_to_vm: bool indicating whether pages were written to
*
* Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
* must be the same as passed into bio_map_user(). Must be called with
* a process context.
*/
void bio_unmap_user(struct bio *bio, int write_to_vm)
{
struct bio_vec *bvec;
int i;
/*
* find original bio if it was bounced
*/
if (bio->bi_private) {
/*
* someone stole our bio, must not happen
*/
BUG_ON(!bio_flagged(bio, BIO_BOUNCED));
bio = bio->bi_private;
}
/*
* make sure we dirty pages we wrote to
*/
__bio_for_each_segment(bvec, bio, i, 0) {
if (write_to_vm)
set_page_dirty(bvec->bv_page);
page_cache_release(bvec->bv_page);
}
bio_put(bio);
}
/**
* bio_endio - end I/O on a bio
* @bio: bio
......@@ -446,14 +578,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* @error: error, if any
*
* Description:
* bio_endio() will end I/O @bytes_done number of bytes. This may be just
* a partial part of the bio, or it may be the whole bio. bio_endio() is
* the preferred way to end I/O on a bio, it takes care of decrementing
* bio_endio() will end I/O on @bytes_done number of bytes. This may be
* just a partial part of the bio, or it may be the whole bio. bio_endio()
* is the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong.
* case something went wrong. Noone should call bi_end_io() directly on
* a bio unless they own it and thus know that it has an end_io function.
**/
int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
......@@ -465,7 +598,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
}
bio->bi_size -= bytes_done;
return bio->bi_end_io(bio, bytes_done, error);
if (bio->bi_end_io)
bio->bi_end_io(bio, bytes_done, error);
}
static void __init biovec_init_pools(void)
......@@ -537,7 +672,7 @@ static int __init init_bio(void)
return 0;
}
module_init(init_bio);
subsys_initcall(init_bio);
EXPORT_SYMBOL(bio_alloc);
EXPORT_SYMBOL(bio_put);
......@@ -550,3 +685,5 @@ EXPORT_SYMBOL(bio_phys_segments);
EXPORT_SYMBOL(bio_hw_segments);
EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
......@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
/* Take a ref against the page each time it is placed into a BIO */
page_cache_get(page);
if (bio_add_page(dio->bio, page, bv_len, bv_offset)) {
if (bio_add_page(dio->bio, page, bv_len, bv_offset) < bv_len) {
dio_bio_submit(dio);
ret = dio_new_bio(dio, blkno);
if (ret == 0) {
ret = bio_add_page(dio->bio, page, bv_len, bv_offset);
BUG_ON(ret != 0);
BUG_ON(ret < bv_len);
} else {
/* The page didn't make it into a BIO */
page_cache_release(page);
......
......@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL;
struct buffer_head bh;
int length;
if (page_has_buffers(page))
goto confused;
......@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused;
}
if (bio_add_page(bio, page, first_hole << blkbits, 0)) {
length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(READ, bio);
goto alloc_new;
}
......@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
int boundary = 0;
sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL;
int length;
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
......@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers(page);
}
if (bio_add_page(bio, page, first_unmapped << blkbits, 0)) {
length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(WRITE, bio);
goto alloc_new;
}
......
......@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
break;
}
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
pipe_wait(inode);
......@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
up(PIPE_SEM(*inode));
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
if (ret > 0)
......
......@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
if (nbytes > size)
nbytes = size;
if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset))
if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset) < nbytes)
break;
offset = 0;
......
......@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void)
int index;
for(index = 0; index < MAX_HWIFS; index++) {
memset(&hw, 0, sizeof hw);
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL);
......
......@@ -101,6 +101,7 @@ struct bio {
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
......@@ -131,6 +132,7 @@ struct bio {
#define bio_page(bio) bio_iovec((bio))->bv_page
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
......@@ -201,7 +203,7 @@ struct bio {
extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, unsigned int, int);
extern void bio_endio(struct bio *, unsigned int, int);
struct request_queue;
extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
......@@ -214,6 +216,9 @@ extern inline void bio_init(struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct block_device *, unsigned long,
unsigned int, int);
extern void bio_unmap_user(struct bio *, int);
#ifdef CONFIG_HIGHMEM
/*
......
......@@ -39,33 +39,20 @@ void initrd_init(void);
*/
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *);
struct request *elv_next_request(request_queue_t *q);
static inline void blkdev_dequeue_request(struct request *req)
{
list_del(&req->queuelist);
BUG_ON(list_empty(&req->queuelist));
list_del_init(&req->queuelist);
if (req->q)
elv_remove_request(req->q, req);
}
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER)
......
......@@ -26,6 +26,8 @@ struct request {
struct list_head queuelist; /* looking for ->queue? you must _not_
* access it directly, use
* blkdev_dequeue_request! */
int ref_count;
void *elevator_private;
unsigned char cmd[16];
......@@ -64,7 +66,10 @@ struct request {
/* For packet commands */
unsigned int data_len;
void *data, *sense;
void *data;
unsigned int sense_len;
void *sense;
unsigned int timeout;
struct completion *waiting;
......@@ -150,12 +155,6 @@ struct blk_queue_tag {
int max_depth;
};
/*
* Default nr free requests per queue, ll_rw_blk will scale it down
* according to available RAM at init time
*/
#define QUEUE_NR_REQUESTS 8192
struct request_queue
{
/*
......@@ -215,10 +214,17 @@ struct request_queue
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
unsigned int dma_alignment;
wait_queue_head_t queue_wait;
struct blk_queue_tag *queue_tags;
/*
* sg stuff
*/
unsigned int sg_timeout;
unsigned int sg_reserved_size;
};
#define RQ_INACTIVE (-1)
......@@ -254,6 +260,13 @@ struct request_queue
*/
#define blk_queue_headactive(q, head_active)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLKPREP_KILL 1 /* fatal error, kill */
#define BLKPREP_DEFER 2 /* leave on queue */
extern unsigned long blk_max_low_pfn, blk_max_pfn;
/*
......@@ -268,7 +281,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void);
void blk_queue_bounce(request_queue_t *q, struct bio **bio);
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
......@@ -339,6 +352,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
......@@ -385,6 +399,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev));
}
static inline int queue_dma_alignment(request_queue_t *q)
{
int retval = 511;
if (q && q->dma_alignment)
retval = q->dma_alignment;
return retval;
}
static inline int bdev_dma_aligment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
......
......@@ -40,8 +40,8 @@ struct elevator_s
/*
* block elevator interface
*/
extern void __elv_add_request(request_queue_t *, struct request *,
struct list_head *);
extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct list_head **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
......@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/*
* noop I/O scheduler. always merges, always inserts new request at tail
*/
......
......@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
mempool_t *pool)
{
struct page *page;
struct bio *bio = NULL;
int i, rw = bio_data_dir(*bio_orig), bio_gfp;
int i, rw = bio_data_dir(*bio_orig);
struct bio_vec *to, *from;
mempool_t *pool;
unsigned long pfn = q->bounce_pfn;
int gfp = q->bounce_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(gfp & GFP_DMA)) {
if (pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
bio_for_each_segment(from, *bio_orig, i) {
page = from->bv_page;
......@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
/*
* is destination page below bounce pfn?
*/
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < pfn)
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < q->bounce_pfn)
continue;
/*
......@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
to = bio->bi_io_vec + i;
to->bv_page = mempool_alloc(pool, gfp);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
if (rw & WRITE) {
if (rw == WRITE) {
char *vto, *vfrom;
vto = page_address(to->bv_page) + to->bv_offset;
......@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
* pages
*/
bio_for_each_segment(from, *bio_orig, i) {
to = &bio->bi_io_vec[i];
to = bio_iovec_idx(bio, i);
if (!to->bv_page) {
to->bv_page = from->bv_page;
to->bv_len = from->bv_len;
to->bv_offset = to->bv_offset;
to->bv_offset = from->bv_offset;
}
}
bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_flags |= (1 << BIO_BOUNCED);
bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw;
......@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
bio->bi_size = (*bio_orig)->bi_size;
if (pool == page_pool) {
if (rw & WRITE)
bio->bi_end_io = bounce_end_io_write;
else
bio->bi_end_io = bounce_end_io_write;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read;
} else {
if (rw & WRITE)
bio->bi_end_io = bounce_end_io_write_isa;
else
bio->bi_end_io = bounce_end_io_write_isa;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read_isa;
}
......@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
*bio_orig = bio;
}
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
{
mempool_t *pool;
int bio_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
if (q->bounce_pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
/*
* slow path
*/
__blk_queue_bounce(q, bio_orig, bio_gfp, pool);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
void check_highmem_ptes(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment