Commit 77ae7b5e authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/axboe/linus-merge-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents ce51eb88 5e5bffab
...@@ -54,6 +54,14 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); ...@@ -54,6 +54,14 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int hlt_counter; int hlt_counter;
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.esp)[3];
}
/* /*
* Powermanagement idle function, if any.. * Powermanagement idle function, if any..
*/ */
......
...@@ -44,12 +44,12 @@ ...@@ -44,12 +44,12 @@
#include <linux/genhd.h> #include <linux/genhd.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
#define DRIVER_NAME "Compaq CISS Driver (v 2.4.5)" #define DRIVER_NAME "Compaq CISS Driver (v 2.5.0)"
#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,5) #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,5,0)
/* Embedded module documentation macros - see modules.h */ /* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Charles M. White III - Compaq Computer Corporation"); MODULE_AUTHOR("Charles M. White III - Compaq Computer Corporation");
MODULE_DESCRIPTION("Driver for Compaq Smart Array Controller 5300"); MODULE_DESCRIPTION("Driver for Compaq Smart Array Controller 5xxx v. 2.5.0");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#include "cciss_cmd.h" #include "cciss_cmd.h"
...@@ -372,21 +372,18 @@ static int cciss_open(struct inode *inode, struct file *filep) ...@@ -372,21 +372,18 @@ static int cciss_open(struct inode *inode, struct file *filep)
if (ctlr > MAX_CTLR || hba[ctlr] == NULL) if (ctlr > MAX_CTLR || hba[ctlr] == NULL)
return -ENXIO; return -ENXIO;
if (!suser() && hba[ctlr]->sizes[minor(inode->i_rdev)] == 0)
return -ENXIO;
/* /*
* Root is allowed to open raw volume zero even if its not configured * Root is allowed to open raw volume zero even if its not configured
* so array config can still work. I don't think I really like this, * so array config can still work. I don't think I really like this,
* but I'm already using way to many device nodes to claim another one * but I'm already using way to many device nodes to claim another one
* for "raw controller". * for "raw controller".
*/ */
if (suser() if (hba[ctlr]->sizes[minor(inode->i_rdev)] == 0) {
&& (hba[ctlr]->sizes[minor(inode->i_rdev)] == 0) if (minor(inode->i_rdev) != 0)
&& (minor(inode->i_rdev)!= 0)) return -ENXIO;
return -ENXIO; if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
hba[ctlr]->drv[dsk].usage_count++; hba[ctlr]->drv[dsk].usage_count++;
hba[ctlr]->usage_count++; hba[ctlr]->usage_count++;
return 0; return 0;
...@@ -647,6 +644,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, ...@@ -647,6 +644,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
char *buff = NULL; char *buff = NULL;
u64bit temp64; u64bit temp64;
unsigned long flags; unsigned long flags;
DECLARE_COMPLETION(wait);
if (!arg) return -EINVAL; if (!arg) return -EINVAL;
...@@ -712,6 +710,8 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, ...@@ -712,6 +710,8 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
c->SG[0].Len = iocommand.buf_size; c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; // we are not chaining c->SG[0].Ext = 0; // we are not chaining
} }
c->waiting = &wait;
/* Put the request on the tail of the request queue */ /* Put the request on the tail of the request queue */
spin_lock_irqsave(CCISS_LOCK(ctlr), flags); spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
addQ(&h->reqQ, c); addQ(&h->reqQ, c);
...@@ -719,9 +719,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, ...@@ -719,9 +719,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
start_io(h); start_io(h);
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
/* Wait for completion */ wait_for_completion(&wait);
while(c->cmd_type != CMD_IOCTL_DONE)
schedule_timeout(1);
/* unlock the buffers from DMA */ /* unlock the buffers from DMA */
temp64.val32.lower = c->SG[0].Addr.lower; temp64.val32.lower = c->SG[0].Addr.lower;
...@@ -933,6 +931,7 @@ static int sendcmd_withirq(__u8 cmd, ...@@ -933,6 +931,7 @@ static int sendcmd_withirq(__u8 cmd,
u64bit buff_dma_handle; u64bit buff_dma_handle;
unsigned long flags; unsigned long flags;
int return_status = IO_OK; int return_status = IO_OK;
DECLARE_COMPLETION(wait);
if ((c = cmd_alloc(h , 0)) == NULL) if ((c = cmd_alloc(h , 0)) == NULL)
{ {
...@@ -1026,6 +1025,7 @@ static int sendcmd_withirq(__u8 cmd, ...@@ -1026,6 +1025,7 @@ static int sendcmd_withirq(__u8 cmd,
c->SG[0].Len = size; c->SG[0].Len = size;
c->SG[0].Ext = 0; // we are not chaining c->SG[0].Ext = 0; // we are not chaining
} }
c->waiting = &wait;
/* Put the request on the tail of the queue and send it */ /* Put the request on the tail of the queue and send it */
spin_lock_irqsave(CCISS_LOCK(ctlr), flags); spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
...@@ -1034,9 +1034,8 @@ static int sendcmd_withirq(__u8 cmd, ...@@ -1034,9 +1034,8 @@ static int sendcmd_withirq(__u8 cmd,
start_io(h); start_io(h);
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
/* wait for completion */ wait_for_completion(&wait);
while(c->cmd_type != CMD_IOCTL_DONE)
schedule_timeout(1);
/* unlock the buffers from DMA */ /* unlock the buffers from DMA */
pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val, pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
size, PCI_DMA_BIDIRECTIONAL); size, PCI_DMA_BIDIRECTIONAL);
...@@ -1128,6 +1127,7 @@ static int register_new_disk(kdev_t dev, int ctlr) ...@@ -1128,6 +1127,7 @@ static int register_new_disk(kdev_t dev, int ctlr)
__u32 lunid = 0; __u32 lunid = 0;
unsigned int block_size; unsigned int block_size;
unsigned int total_size; unsigned int total_size;
kdev_t kdev;
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
...@@ -1340,7 +1340,7 @@ static int register_new_disk(kdev_t dev, int ctlr) ...@@ -1340,7 +1340,7 @@ static int register_new_disk(kdev_t dev, int ctlr)
for(i=max_p-1; i>=0; i--) { for(i=max_p-1; i>=0; i--) {
int minor = start+i; int minor = start+i;
kdev_t kdev = mk_kdev(MAJOR_NR + ctlr, minor); kdev = mk_kdev(MAJOR_NR + ctlr, minor);
invalidate_device(kdev, 1); invalidate_device(kdev, 1);
gdev->part[minor].start_sect = 0; gdev->part[minor].start_sect = 0;
gdev->part[minor].nr_sects = 0; gdev->part[minor].nr_sects = 0;
...@@ -1352,7 +1352,8 @@ static int register_new_disk(kdev_t dev, int ctlr) ...@@ -1352,7 +1352,8 @@ static int register_new_disk(kdev_t dev, int ctlr)
++hba[ctlr]->num_luns; ++hba[ctlr]->num_luns;
gdev->nr_real = hba[ctlr]->highest_lun + 1; gdev->nr_real = hba[ctlr]->highest_lun + 1;
/* setup partitions per disk */ /* setup partitions per disk */
grok_partitions(dev, hba[ctlr]->drv[logvol].nr_blocks); kdev = mk_kdev(MAJOR_NR + ctlr, logvol<< gdev->minor_shift);
grok_partitions(kdev, hba[ctlr]->drv[logvol].nr_blocks);
kfree(ld_buff); kfree(ld_buff);
kfree(size_buff); kfree(size_buff);
...@@ -1672,12 +1673,11 @@ static void start_io( ctlr_info_t *h) ...@@ -1672,12 +1673,11 @@ static void start_io( ctlr_info_t *h)
static inline void complete_buffers(struct bio *bio, int status) static inline void complete_buffers(struct bio *bio, int status)
{ {
while (bio) { while (bio) {
int nsecs = bio_sectors(bio);
struct bio *xbh = bio->bi_next; struct bio *xbh = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
blk_finished_io(nsecs); blk_finished_io(bio_sectors(bio));
bio_endio(bio, status, nsecs); bio_endio(bio, status);
bio = xbh; bio = xbh;
} }
...@@ -1957,7 +1957,7 @@ static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) ...@@ -1957,7 +1957,7 @@ static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
complete_command(c, 0); complete_command(c, 0);
cmd_free(h, c, 1); cmd_free(h, c, 1);
} else if (c->cmd_type == CMD_IOCTL_PEND) { } else if (c->cmd_type == CMD_IOCTL_PEND) {
c->cmd_type = CMD_IOCTL_DONE; complete(c->waiting);
} }
# ifdef CONFIG_CISS_SCSI_TAPE # ifdef CONFIG_CISS_SCSI_TAPE
else if (c->cmd_type == CMD_SCSI) else if (c->cmd_type == CMD_SCSI)
...@@ -2466,7 +2466,8 @@ static int __init cciss_init_one(struct pci_dev *pdev, ...@@ -2466,7 +2466,8 @@ static int __init cciss_init_one(struct pci_dev *pdev,
/* make sure the board interrupts are off */ /* make sure the board interrupts are off */
hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
if( request_irq(hba[i]->intr, do_cciss_intr, if( request_irq(hba[i]->intr, do_cciss_intr,
SA_INTERRUPT|SA_SHIRQ, hba[i]->devname, hba[i])) SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
hba[i]->devname, hba[i]))
{ {
printk(KERN_ERR "ciss: Unable to get irq %d for %s\n", printk(KERN_ERR "ciss: Unable to get irq %d for %s\n",
hba[i]->intr, hba[i]->devname); hba[i]->intr, hba[i]->devname);
......
...@@ -222,7 +222,6 @@ typedef struct _ErrorInfo_struct { ...@@ -222,7 +222,6 @@ typedef struct _ErrorInfo_struct {
/* Command types */ /* Command types */
#define CMD_RWREQ 0x00 #define CMD_RWREQ 0x00
#define CMD_IOCTL_PEND 0x01 #define CMD_IOCTL_PEND 0x01
#define CMD_IOCTL_DONE 0x02
#define CMD_SCSI 0x03 #define CMD_SCSI 0x03
#define CMD_MSG_DONE 0x04 #define CMD_MSG_DONE 0x04
#define CMD_MSG_TIMEOUT 0x05 #define CMD_MSG_TIMEOUT 0x05
...@@ -240,6 +239,7 @@ typedef struct _CommandList_struct { ...@@ -240,6 +239,7 @@ typedef struct _CommandList_struct {
struct _CommandList_struct *prev; struct _CommandList_struct *prev;
struct _CommandList_struct *next; struct _CommandList_struct *next;
struct request * rq; struct request * rq;
struct completion *waiting;
#ifdef CONFIG_CISS_SCSI_TAPE #ifdef CONFIG_CISS_SCSI_TAPE
void * scsi_cmd; void * scsi_cmd;
#endif #endif
......
...@@ -897,6 +897,7 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c, ...@@ -897,6 +897,7 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c,
int direction) int direction)
{ {
unsigned long flags; unsigned long flags;
DECLARE_COMPLETION(wait);
cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl
cp->scsi_cmd = NULL; cp->scsi_cmd = NULL;
...@@ -922,6 +923,8 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c, ...@@ -922,6 +923,8 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c,
(unsigned char *) buf, bufsize, (unsigned char *) buf, bufsize,
scsi_to_pci_dma_dir(SCSI_DATA_READ)); scsi_to_pci_dma_dir(SCSI_DATA_READ));
cp->waiting = &wait;
/* Put the request on the tail of the request queue */ /* Put the request on the tail of the request queue */
spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
addQ(&c->reqQ, cp); addQ(&c->reqQ, cp);
...@@ -929,9 +932,7 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c, ...@@ -929,9 +932,7 @@ cciss_scsi_do_simple_cmd(ctlr_info_t *c,
start_io(c); start_io(c);
spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
/* Wait for the request to complete */ wait_for_completion(&wait);
while(cp->cmd_type != CMD_IOCTL_DONE)
schedule_timeout(1);
/* undo the dma mapping */ /* undo the dma mapping */
cciss_unmap_one(c->pdev, cp, bufsize, cciss_unmap_one(c->pdev, cp, bufsize,
...@@ -1086,10 +1087,10 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c, ...@@ -1086,10 +1087,10 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
cdb[3] = 0; cdb[3] = 0;
cdb[4] = 0; cdb[4] = 0;
cdb[5] = 0; cdb[5] = 0;
cdb[6] = (sizeof(*buf) >> 24) & 0xFF; //MSB cdb[6] = (bufsize >> 24) & 0xFF; //MSB
cdb[7] = (sizeof(*buf) >> 16) & 0xFF; cdb[7] = (bufsize >> 16) & 0xFF;
cdb[8] = (sizeof(*buf) >> 8) & 0xFF; cdb[8] = (bufsize >> 8) & 0xFF;
cdb[9] = sizeof(*buf) & 0xFF; cdb[9] = bufsize & 0xFF;
cdb[10] = 0; cdb[10] = 0;
cdb[11] = 0; cdb[11] = 0;
......
...@@ -970,13 +970,11 @@ static inline void complete_buffers(struct bio *bio, int ok) ...@@ -970,13 +970,11 @@ static inline void complete_buffers(struct bio *bio, int ok)
{ {
struct bio *xbh; struct bio *xbh;
while(bio) { while(bio) {
int nsecs = bio_sectors(bio);
xbh = bio->bi_next; xbh = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
blk_finished_io(nsecs); blk_finished_io(bio_sectors(bio));
bio_endio(bio, ok, nsecs); bio_endio(bio, ok);
bio = xbh; bio = xbh;
} }
......
...@@ -1227,7 +1227,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1227,7 +1227,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
return 0; return 0;
end_io: end_io:
bio->bi_end_io(bio, nr_sectors); bio->bi_end_io(bio);
return 0; return 0;
} }
...@@ -1329,7 +1329,7 @@ void generic_make_request(struct bio *bio) ...@@ -1329,7 +1329,7 @@ void generic_make_request(struct bio *bio)
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n", "generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
kdevname(bio->bi_dev), (long long) bio->bi_sector); kdevname(bio->bi_dev), (long long) bio->bi_sector);
end_io: end_io:
bio->bi_end_io(bio, nr_sectors); bio->bi_end_io(bio);
break; break;
} }
...@@ -1350,15 +1350,12 @@ void generic_make_request(struct bio *bio) ...@@ -1350,15 +1350,12 @@ void generic_make_request(struct bio *bio)
/* /*
* our default bio end_io callback handler for a buffer_head mapping. * our default bio end_io callback handler for a buffer_head mapping.
*/ */
static int end_bio_bh_io_sync(struct bio *bio, int nr_sectors) static void end_bio_bh_io_sync(struct bio *bio)
{ {
struct buffer_head *bh = bio->bi_private; struct buffer_head *bh = bio->bi_private;
BIO_BUG_ON(nr_sectors != (bh->b_size >> 9));
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio); bio_put(bio);
return 0;
} }
/** /**
...@@ -1641,8 +1638,7 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors) ...@@ -1641,8 +1638,7 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
if (!bio->bi_size) { if (!bio->bi_size) {
req->bio = bio->bi_next; req->bio = bio->bi_next;
if (unlikely(bio_endio(bio, uptodate, total_nsect))) bio_endio(bio, uptodate);
BUG();
total_nsect = 0; total_nsect = 0;
} }
......
...@@ -319,7 +319,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ...@@ -319,7 +319,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
return ret; return ret;
} }
static int loop_end_io_transfer(struct bio *, int); static void loop_end_io_transfer(struct bio *);
static void loop_put_buffer(struct bio *bio) static void loop_put_buffer(struct bio *bio)
{ {
/* /*
...@@ -377,21 +377,19 @@ static struct bio *loop_get_bio(struct loop_device *lo) ...@@ -377,21 +377,19 @@ static struct bio *loop_get_bio(struct loop_device *lo)
* bi_end_io context (we don't want to do decrypt of a page with irqs * bi_end_io context (we don't want to do decrypt of a page with irqs
* disabled) * disabled)
*/ */
static int loop_end_io_transfer(struct bio *bio, int nr_sectors) static void loop_end_io_transfer(struct bio *bio)
{ {
struct bio *rbh = bio->bi_private; struct bio *rbh = bio->bi_private;
struct loop_device *lo = &loop_dev[minor(rbh->bi_dev)]; struct loop_device *lo = &loop_dev[minor(rbh->bi_dev)];
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
if (!uptodate || bio_rw(bio) == WRITE) { if (!uptodate || bio_rw(bio) == WRITE) {
bio_endio(rbh, uptodate, nr_sectors); bio_endio(rbh, uptodate);
if (atomic_dec_and_test(&lo->lo_pending)) if (atomic_dec_and_test(&lo->lo_pending))
up(&lo->lo_bh_mutex); up(&lo->lo_bh_mutex);
loop_put_buffer(bio); loop_put_buffer(bio);
} else } else
loop_add_bio(lo, bio); loop_add_bio(lo, bio);
return 0;
} }
static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh) static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh)
...@@ -511,13 +509,13 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) ...@@ -511,13 +509,13 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
*/ */
if (lo->lo_flags & LO_FLAGS_DO_BMAP) { if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
ret = do_bio_filebacked(lo, bio); ret = do_bio_filebacked(lo, bio);
bio_endio(bio, !ret, bio_sectors(bio)); bio_endio(bio, !ret);
} else { } else {
struct bio *rbh = bio->bi_private; struct bio *rbh = bio->bi_private;
ret = do_bio_blockbacked(lo, bio, rbh); ret = do_bio_blockbacked(lo, bio, rbh);
bio_endio(rbh, !ret, bio_sectors(rbh)); bio_endio(rbh, !ret);
loop_put_buffer(bio); loop_put_buffer(bio);
} }
} }
......
...@@ -123,29 +123,24 @@ static ide_startstop_t lba_48_rw_disk (ide_drive_t *drive, struct request *rq, u ...@@ -123,29 +123,24 @@ static ide_startstop_t lba_48_rw_disk (ide_drive_t *drive, struct request *rq, u
*/ */
static ide_startstop_t do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block) static ide_startstop_t do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
{ {
if (rq->flags & REQ_CMD) if (!(rq->flags & REQ_CMD)) {
goto good_command; blk_dump_rq_flags(rq, "do_rw_disk, bad command");
ide_end_request(0, HWGROUP(drive));
blk_dump_rq_flags(rq, "do_rw_disk, bad command"); return ide_stopped;
ide_end_request(0, HWGROUP(drive)); }
return ide_stopped;
good_command:
#ifdef CONFIG_BLK_DEV_PDC4030
if (IS_PDC4030_DRIVE) { if (IS_PDC4030_DRIVE) {
extern ide_startstop_t promise_rw_disk(ide_drive_t *, struct request *, unsigned long); extern ide_startstop_t promise_rw_disk(ide_drive_t *, struct request *, unsigned long);
return promise_rw_disk(drive, rq, block); return promise_rw_disk(drive, rq, block);
} }
#endif /* CONFIG_BLK_DEV_PDC4030 */
if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing)) /* 48-bit LBA */ if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing)) /* 48-bit LBA */
return lba_48_rw_disk(drive, rq, (unsigned long long) block); return lba_48_rw_disk(drive, rq, block);
if (drive->select.b.lba) /* 28-bit LBA */ if (drive->select.b.lba) /* 28-bit LBA */
return lba_28_rw_disk(drive, rq, (unsigned long) block); return lba_28_rw_disk(drive, rq, block);
/* 28-bit CHS : DIE DIE DIE piece of legacy crap!!! */ /* 28-bit CHS : DIE DIE DIE piece of legacy crap!!! */
return chs_rw_disk(drive, rq, (unsigned long) block); return chs_rw_disk(drive, rq, block);
} }
static task_ioreg_t get_command (ide_drive_t *drive, int cmd) static task_ioreg_t get_command (ide_drive_t *drive, int cmd)
......
...@@ -629,7 +629,7 @@ static int proc_ide_read_driver ...@@ -629,7 +629,7 @@ static int proc_ide_read_driver
(char *page, char **start, off_t off, int count, int *eof, void *data) (char *page, char **start, off_t off, int count, int *eof, void *data)
{ {
ide_drive_t *drive = (ide_drive_t *) data; ide_drive_t *drive = (ide_drive_t *) data;
ide_driver_t *driver = (ide_driver_t *) drive->driver; ide_driver_t *driver = drive->driver;
int len; int len;
if (!driver) if (!driver)
...@@ -746,7 +746,6 @@ void recreate_proc_ide_device(ide_hwif_t *hwif, ide_drive_t *drive) ...@@ -746,7 +746,6 @@ void recreate_proc_ide_device(ide_hwif_t *hwif, ide_drive_t *drive)
struct proc_dir_entry *ent; struct proc_dir_entry *ent;
struct proc_dir_entry *parent = hwif->proc; struct proc_dir_entry *parent = hwif->proc;
char name[64]; char name[64];
// ide_driver_t *driver = drive->driver;
if (drive->present && !drive->proc) { if (drive->present && !drive->proc) {
drive->proc = proc_mkdir(drive->name, parent); drive->proc = proc_mkdir(drive->name, parent);
......
...@@ -225,14 +225,12 @@ static void reschedule_retry(r1bio_t *r1_bio) ...@@ -225,14 +225,12 @@ static void reschedule_retry(r1bio_t *r1_bio)
* operation and are ready to return a success/failure code to the buffer * operation and are ready to return a success/failure code to the buffer
* cache layer. * cache layer.
*/ */
static int raid_end_bio_io(r1bio_t *r1_bio, int uptodate, int nr_sectors) static void raid_end_bio_io(r1bio_t *r1_bio, int uptodate)
{ {
struct bio *bio = r1_bio->master_bio; struct bio *bio = r1_bio->master_bio;
bio_endio(bio, uptodate, nr_sectors); bio_endio(bio, uptodate);
free_r1bio(r1_bio); free_r1bio(r1_bio);
return 0;
} }
/* /*
...@@ -247,7 +245,7 @@ static void inline update_head_pos(int disk, r1bio_t *r1_bio) ...@@ -247,7 +245,7 @@ static void inline update_head_pos(int disk, r1bio_t *r1_bio)
atomic_dec(&conf->mirrors[disk].nr_pending); atomic_dec(&conf->mirrors[disk].nr_pending);
} }
static int end_request(struct bio *bio, int nr_sectors) static void end_request(struct bio *bio)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
...@@ -278,8 +276,8 @@ static int end_request(struct bio *bio, int nr_sectors) ...@@ -278,8 +276,8 @@ static int end_request(struct bio *bio, int nr_sectors)
* we have only one bio on the read side * we have only one bio on the read side
*/ */
if (uptodate) { if (uptodate) {
raid_end_bio_io(r1_bio, uptodate, nr_sectors); raid_end_bio_io(r1_bio, uptodate);
return 0; return;
} }
/* /*
* oops, read error: * oops, read error:
...@@ -287,7 +285,7 @@ static int end_request(struct bio *bio, int nr_sectors) ...@@ -287,7 +285,7 @@ static int end_request(struct bio *bio, int nr_sectors)
printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n", printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n",
partition_name(bio->bi_dev), r1_bio->sector); partition_name(bio->bi_dev), r1_bio->sector);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
return 0; return;
} }
if (r1_bio->read_bio) if (r1_bio->read_bio)
...@@ -307,8 +305,7 @@ static int end_request(struct bio *bio, int nr_sectors) ...@@ -307,8 +305,7 @@ static int end_request(struct bio *bio, int nr_sectors)
* already. * already.
*/ */
if (atomic_dec_and_test(&r1_bio->remaining)) if (atomic_dec_and_test(&r1_bio->remaining))
raid_end_bio_io(r1_bio, uptodate, nr_sectors); raid_end_bio_io(r1_bio, uptodate);
return 0;
} }
/* /*
...@@ -518,7 +515,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) ...@@ -518,7 +515,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
* If all mirrors are non-operational * If all mirrors are non-operational
* then return an IO error: * then return an IO error:
*/ */
raid_end_bio_io(r1_bio, 0, 0); raid_end_bio_io(r1_bio, 0);
return 0; return 0;
} }
atomic_set(&r1_bio->remaining, sum_bios); atomic_set(&r1_bio->remaining, sum_bios);
...@@ -930,7 +927,7 @@ static int diskop(mddev_t *mddev, mdp_disk_t **d, int state) ...@@ -930,7 +927,7 @@ static int diskop(mddev_t *mddev, mdp_disk_t **d, int state)
#define REDIRECT_SECTOR KERN_ERR \ #define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %lu to another mirror\n" "raid1: %s: redirecting sector %lu to another mirror\n"
static int end_sync_read(struct bio *bio, int nr_sectors) static void end_sync_read(struct bio *bio)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
...@@ -948,11 +945,9 @@ static int end_sync_read(struct bio *bio, int nr_sectors) ...@@ -948,11 +945,9 @@ static int end_sync_read(struct bio *bio, int nr_sectors)
else else
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
return 0;
} }
static int end_sync_write(struct bio *bio, int nr_sectors) static void end_sync_write(struct bio *bio)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
...@@ -974,7 +969,6 @@ static int end_sync_write(struct bio *bio, int nr_sectors) ...@@ -974,7 +969,6 @@ static int end_sync_write(struct bio *bio, int nr_sectors)
resume_device(conf); resume_device(conf);
put_buf(r1_bio); put_buf(r1_bio);
} }
return 0;
} }
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
...@@ -1100,7 +1094,7 @@ static void raid1d(void *data) ...@@ -1100,7 +1094,7 @@ static void raid1d(void *data)
map(mddev, &bio->bi_dev); map(mddev, &bio->bi_dev);
if (kdev_same(bio->bi_dev, dev)) { if (kdev_same(bio->bi_dev, dev)) {
printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector); printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector);
raid_end_bio_io(r1_bio, 0, 0); raid_end_bio_io(r1_bio, 0);
break; break;
} }
printk(REDIRECT_SECTOR, printk(REDIRECT_SECTOR,
......
...@@ -112,8 +112,6 @@ static int setup_buson[MAXBOARDS]; ...@@ -112,8 +112,6 @@ static int setup_buson[MAXBOARDS];
static int setup_busoff[MAXBOARDS]; static int setup_busoff[MAXBOARDS];
static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 }; static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
static char *setup_str[MAXBOARDS] __initdata;
/* /*
* LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]] * LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
* *
...@@ -962,6 +960,7 @@ static int __init aha1542_query(int base_io, int *transl) ...@@ -962,6 +960,7 @@ static int __init aha1542_query(int base_io, int *transl)
} }
#ifndef MODULE #ifndef MODULE
static char *setup_str[MAXBOARDS] __initdata;
static int setup_idx = 0; static int setup_idx = 0;
void __init aha1542_setup(char *str, int *ints) void __init aha1542_setup(char *str, int *ints)
......
...@@ -316,13 +316,12 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy) ...@@ -316,13 +316,12 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
return NULL; return NULL;
} }
static int bio_end_io_kio(struct bio *bio, int nr_sectors) static void bio_end_io_kio(struct bio *bio)
{ {
struct kiobuf *kio = (struct kiobuf *) bio->bi_private; struct kiobuf *kio = (struct kiobuf *) bio->bi_private;
end_kio_request(kio, test_bit(BIO_UPTODATE, &bio->bi_flags)); end_kio_request(kio, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio); bio_put(bio);
return 0;
} }
/** /**
...@@ -441,7 +440,7 @@ void ll_rw_kio(int rw, struct kiobuf *kio, kdev_t dev, sector_t sector) ...@@ -441,7 +440,7 @@ void ll_rw_kio(int rw, struct kiobuf *kio, kdev_t dev, sector_t sector)
end_kio_request(kio, !err); end_kio_request(kio, !err);
} }
int bio_endio(struct bio *bio, int uptodate, int nr_sectors) void bio_endio(struct bio *bio, int uptodate)
{ {
if (uptodate) if (uptodate)
set_bit(BIO_UPTODATE, &bio->bi_flags); set_bit(BIO_UPTODATE, &bio->bi_flags);
...@@ -449,9 +448,7 @@ int bio_endio(struct bio *bio, int uptodate, int nr_sectors) ...@@ -449,9 +448,7 @@ int bio_endio(struct bio *bio, int uptodate, int nr_sectors)
clear_bit(BIO_UPTODATE, &bio->bi_flags); clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (bio->bi_end_io) if (bio->bi_end_io)
return bio->bi_end_io(bio, nr_sectors); bio->bi_end_io(bio);
return 0;
} }
static void __init biovec_init_pool(void) static void __init biovec_init_pool(void)
......
...@@ -436,13 +436,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); ...@@ -436,13 +436,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern void copy_segments(struct task_struct *p, struct mm_struct * mm); extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm); extern void release_segments(struct mm_struct * mm);
/* extern unsigned long thread_saved_pc(struct task_struct *tsk);
* Return saved PC of a blocked thread.
*/
static inline unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread->esp)[3];
}
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019]) #define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
......
...@@ -50,7 +50,7 @@ struct bio_vec { ...@@ -50,7 +50,7 @@ struct bio_vec {
* weee, c forward decl... * weee, c forward decl...
*/ */
struct bio; struct bio;
typedef int (bio_end_io_t) (struct bio *, int); typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *);
/* /*
...@@ -159,7 +159,7 @@ struct bio { ...@@ -159,7 +159,7 @@ struct bio {
#define BIO_SEG_BOUNDARY(q, b1, b2) \ #define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
#define bio_io_error(bio) bio_endio((bio), 0, bio_sectors((bio))) #define bio_io_error(bio) bio_endio((bio), 0)
/* /*
* drivers should not use the __ version unless they _really_ want to * drivers should not use the __ version unless they _really_ want to
...@@ -192,7 +192,7 @@ struct bio { ...@@ -192,7 +192,7 @@ struct bio {
extern struct bio *bio_alloc(int, int); extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, int, int); extern void bio_endio(struct bio *, int);
struct request_queue; struct request_queue;
extern inline int bio_phys_segments(struct request_queue *, struct bio *); extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *); extern inline int bio_hw_segments(struct request_queue *, struct bio *);
......
...@@ -424,12 +424,12 @@ typedef struct ide_drive_s { ...@@ -424,12 +424,12 @@ typedef struct ide_drive_s {
unsigned long capacity; /* total number of sectors */ unsigned long capacity; /* total number of sectors */
unsigned long long capacity48; /* total number of sectors */ unsigned long long capacity48; /* total number of sectors */
unsigned int drive_data; /* for use by tuneproc/selectproc as needed */ unsigned int drive_data; /* for use by tuneproc/selectproc as needed */
void *hwif; /* actually (ide_hwif_t *) */ struct hwif_s *hwif; /* actually (ide_hwif_t *) */
wait_queue_head_t wqueue; /* used to wait for drive in open() */ wait_queue_head_t wqueue; /* used to wait for drive in open() */
struct hd_driveid *id; /* drive model identification info */ struct hd_driveid *id; /* drive model identification info */
struct hd_struct *part; /* drive partition table */ struct hd_struct *part; /* drive partition table */
char name[4]; /* drive name, such as "hda" */ char name[4]; /* drive name, such as "hda" */
void *driver; /* (ide_driver_t *) */ struct ide_driver_s *driver; /* (ide_driver_t *) */
void *driver_data; /* extra driver data */ void *driver_data; /* extra driver data */
devfs_handle_t de; /* directory for device */ devfs_handle_t de; /* directory for device */
struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
......
...@@ -52,7 +52,7 @@ nbd_end_request(struct request *req) ...@@ -52,7 +52,7 @@ nbd_end_request(struct request *req)
blk_finished_io(nsect); blk_finished_io(nsect);
req->bio = bio->bi_next; req->bio = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
bio_endio(bio, uptodate, nsect); bio_endio(bio, uptodate);
} }
blkdev_release_request(req); blkdev_release_request(req);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
......
...@@ -288,11 +288,11 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from) ...@@ -288,11 +288,11 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
} }
} }
static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool) static inline void bounce_end_io(struct bio *bio, mempool_t *pool)
{ {
struct bio *bio_orig = bio->bi_private; struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec; struct bio_vec *bvec, *org_vec;
int ret, i; int i;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
goto out_eio; goto out_eio;
...@@ -311,41 +311,38 @@ static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *poo ...@@ -311,41 +311,38 @@ static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *poo
} }
out_eio: out_eio:
ret = bio_orig->bi_end_io(bio_orig, nr_sectors); bio_orig->bi_end_io(bio_orig);
bio_put(bio); bio_put(bio);
return ret;
} }
static int bounce_end_io_write(struct bio *bio, int nr_sectors) static void bounce_end_io_write(struct bio *bio)
{ {
return bounce_end_io(bio, nr_sectors, page_pool); bounce_end_io(bio, page_pool);
} }
static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors) static void bounce_end_io_write_isa(struct bio *bio)
{ {
return bounce_end_io(bio, nr_sectors, isa_page_pool); bounce_end_io(bio, isa_page_pool);
} }
static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors, static inline void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
mempool_t *pool)
{ {
struct bio *bio_orig = bio->bi_private; struct bio *bio_orig = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags)) if (test_bit(BIO_UPTODATE, &bio->bi_flags))
copy_to_high_bio_irq(bio_orig, bio); copy_to_high_bio_irq(bio_orig, bio);
return bounce_end_io(bio, nr_sectors, pool); bounce_end_io(bio, pool);
} }
static int bounce_end_io_read(struct bio *bio, int nr_sectors) static void bounce_end_io_read(struct bio *bio)
{ {
return __bounce_end_io_read(bio, nr_sectors, page_pool); __bounce_end_io_read(bio, page_pool);
} }
static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors) static void bounce_end_io_read_isa(struct bio *bio)
{ {
return __bounce_end_io_read(bio, nr_sectors, isa_page_pool); return __bounce_end_io_read(bio, isa_page_pool);
} }
void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig) void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment