Commit 4f596034 authored by Len Brown's avatar Len Brown

Merge intel.com:/home/lenb/bk/linux-2.6.0

into intel.com:/home/lenb/src/linux-acpi-test-2.6.0
parents 7af0f30f 67e9bb60
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -test11
EXTRAVERSION =
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
......
......@@ -216,12 +216,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure
*/
rq->cmd_len = hdr->cmd_len;
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) {
blk_put_request(rq);
return -EFAULT;
}
memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len);
if (sizeof(rq->cmd) != hdr->cmd_len)
memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
......@@ -437,12 +432,23 @@ int scsi_cmd_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long ar
break;
case SG_IO: {
struct sg_io_hdr hdr;
unsigned char cdb[BLK_MAX_CDB], *old_cdb;
if (copy_from_user(&hdr, (struct sg_io_hdr *) arg, sizeof(hdr))) {
err = -EFAULT;
err = -EFAULT;
if (copy_from_user(&hdr, (struct sg_io_hdr *) arg, sizeof(hdr)))
break;
}
err = -EINVAL;
if (hdr.cmd_len > sizeof(rq->cmd))
break;
err = -EFAULT;
if (copy_from_user(cdb, hdr.cmdp, hdr.cmd_len))
break;
old_cdb = hdr.cmdp;
hdr.cmdp = cdb;
err = sg_io(q, bdev, &hdr);
hdr.cmdp = old_cdb;
if (copy_to_user((struct sg_io_hdr *) arg, &hdr, sizeof(hdr)))
err = -EFAULT;
break;
......
......@@ -799,6 +799,10 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
* sector... If we got here the error is not correctable */
ide_dump_status (drive, "media error (bad sector)", stat);
do_end_request = 1;
} else if (sense_key == BLANK_CHECK) {
/* Disk appears blank ?? */
ide_dump_status (drive, "media error (blank)", stat);
do_end_request = 1;
} else if ((err & ~ABRT_ERR) != 0) {
/* Go to the default handler
for other errors. */
......
......@@ -501,6 +501,7 @@ struct cdrom_info {
#define ILLEGAL_REQUEST 0x05
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
#define ABORTED_COMMAND 0x0b
#define MISCOMPARE 0x0e
......@@ -578,7 +579,7 @@ const char * const sense_key_texts[16] = {
"Illegal request",
"Unit attention",
"Data protect",
"(reserved)",
"Blank check",
"(reserved)",
"(reserved)",
"Aborted command",
......
......@@ -40,6 +40,16 @@
#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This macro is used to determine the 'next' bio in the list, given the sector
* of the current stripe+device
*/
#define r5_next_bio(bio, sect) ( ( bio->bi_sector + (bio->bi_size>>9) < sect + STRIPE_SECTORS) ? bio->bi_next : NULL)
/*
* The following can be used to debug the driver
*/
......@@ -613,7 +623,7 @@ static void copy_data(int frombio, struct bio *bio,
int i;
for (;bio && bio->bi_sector < sector+STRIPE_SECTORS;
bio = bio->bi_next) {
bio = r5_next_bio(bio, sector) ) {
int page_offset;
if (bio->bi_sector >= sector)
page_offset = (signed)(bio->bi_sector - sector) * 512;
......@@ -738,7 +748,11 @@ static void compute_parity(struct stripe_head *sh, int method)
for (i = disks; i--;)
if (sh->dev[i].written) {
sector_t sector = sh->dev[i].sector;
copy_data(1, sh->dev[i].written, sh->dev[i].page, sector);
struct bio *wbi = sh->dev[i].written;
while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
copy_data(1, wbi, sh->dev[i].page, sector);
wbi = r5_next_bio(wbi, sector);
}
set_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(R5_UPTODATE, &sh->dev[i].flags);
......@@ -791,8 +805,10 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
bip = &sh->dev[dd_idx].towrite;
else
bip = &sh->dev[dd_idx].toread;
while (*bip && (*bip)->bi_sector < bi->bi_sector)
while (*bip && (*bip)->bi_sector < bi->bi_sector) {
BUG_ON((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector);
bip = & (*bip)->bi_next;
}
/* FIXME do I need to worry about overlapping bion */
if (*bip && bi->bi_next && (*bip) != bi->bi_next)
BUG();
......@@ -813,7 +829,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
bi && bi->bi_sector <= sector;
bi = bi->bi_next) {
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bi->bi_sector + (bi->bi_size>>9) >= sector)
sector = bi->bi_sector + (bi->bi_size>>9);
}
......@@ -883,7 +899,7 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock_irq(&conf->device_lock);
while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
copy_data(0, rbi, dev->page, dev->sector);
rbi2 = rbi->bi_next;
rbi2 = r5_next_bio(rbi, dev->sector);
spin_lock_irq(&conf->device_lock);
if (--rbi->bi_phys_segments == 0) {
rbi->bi_next = return_bi;
......@@ -928,7 +944,7 @@ static void handle_stripe(struct stripe_head *sh)
if (bi) to_write--;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
struct bio *nextbi = bi->bi_next;
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) {
md_write_end(conf->mddev);
......@@ -941,7 +957,7 @@ static void handle_stripe(struct stripe_head *sh)
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = bi->bi_next;
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) {
md_write_end(conf->mddev);
......@@ -957,7 +973,7 @@ static void handle_stripe(struct stripe_head *sh)
sh->dev[i].toread = NULL;
if (bi) to_read--;
while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
struct bio *nextbi = bi->bi_next;
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) {
bi->bi_next = return_bi;
......@@ -1000,7 +1016,7 @@ static void handle_stripe(struct stripe_head *sh)
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
wbi2 = wbi->bi_next;
wbi2 = r5_next_bio(wbi, dev->sector);
if (--wbi->bi_phys_segments == 0) {
md_write_end(conf->mddev);
wbi->bi_next = return_bi;
......
......@@ -16,9 +16,13 @@
* General Public License for more details.
*
******************************************************************************/
#define QLA1280_VERSION "3.23.37"
#define QLA1280_VERSION "3.23.37.1"
/*****************************************************************************
Revision History:
Rev 3.23.37.1 December 17, 2003, Jes Sorensen
- Delete completion queue from srb if mailbox command failed to
to avoid qla1280_done completeting qla1280_error_action's
obsolete context
Rev 3.23.37 October 1, 2003, Jes Sorensen
- Make MMIO depend on CONFIG_X86_VISWS instead of yet another
random CONFIG option
......@@ -1464,8 +1468,15 @@ qla1280_error_action(Scsi_Cmnd * cmd, enum action action)
/* If we didn't manage to issue the action, or we have no
* command to wait for, exit here */
if (result == FAILED || handle == NULL ||
handle == (unsigned char *)INVALID_HANDLE)
handle == (unsigned char *)INVALID_HANDLE) {
/*
* Clear completion queue to avoid qla1280_done() trying
* to complete the command at a later stage after we
* have exited the current context
*/
sp->wait = NULL;
goto leave;
}
/* set up a timer just in case we're really jammed */
init_timer(&timer);
......
......@@ -261,7 +261,6 @@ static void async_completed(struct urb *urb, struct pt_regs *regs)
spin_lock(&ps->lock);
list_move_tail(&as->asynclist, &ps->async_completed);
spin_unlock(&ps->lock);
wake_up(&ps->wait);
if (as->signr) {
sinfo.si_signo = as->signr;
sinfo.si_errno = as->urb->status;
......@@ -269,6 +268,7 @@ static void async_completed(struct urb *urb, struct pt_regs *regs)
sinfo.si_addr = (void *)as->userurb;
send_sig_info(as->signr, &sinfo, as->task);
}
wake_up(&ps->wait);
}
static void destroy_async (struct dev_state *ps, struct list_head *list)
......
......@@ -692,6 +692,9 @@ static int hub_port_status(struct usb_device *dev, int port,
struct usb_hub *hub = usb_get_intfdata(dev->actconfig->interface[0]);
int ret;
if (!hub)
return -ENODEV;
ret = get_port_status(dev, port + 1, &hub->status->port);
if (ret < 0)
dev_err (hubdev (dev),
......@@ -926,7 +929,6 @@ static void hub_port_connect_change(struct usb_hub *hubstate, int port,
break;
}
hub->children[port] = dev;
dev->state = USB_STATE_POWERED;
/* Reset the device, and detect its speed */
......@@ -979,8 +981,10 @@ static void hub_port_connect_change(struct usb_hub *hubstate, int port,
dev->dev.parent = dev->parent->dev.parent->parent;
/* Run it through the hoops (find a driver, etc) */
if (!usb_new_device(dev, &hub->dev))
if (!usb_new_device(dev, &hub->dev)) {
hub->children[port] = dev;
goto done;
}
/* Free the configuration if there was an error */
usb_put_dev(dev);
......@@ -989,7 +993,6 @@ static void hub_port_connect_change(struct usb_hub *hubstate, int port,
delay = HUB_LONG_RESET_TIME;
}
hub->children[port] = NULL;
hub_port_disable(hub, port);
done:
up(&usb_address0_sem);
......@@ -1342,6 +1345,7 @@ int usb_physical_reset_device(struct usb_device *dev)
dev->devpath, ret);
return ret;
}
dev->state = USB_STATE_CONFIGURED;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *intf = dev->actconfig->interface[i];
......
......@@ -1120,6 +1120,7 @@ int usb_new_device(struct usb_device *dev, struct device *parent)
if (err) {
dev_err(&dev->dev, "can't set config #%d, error %d\n",
dev->config[0].desc.bConfigurationValue, err);
device_del(&dev->dev);
goto fail;
}
......
......@@ -18,13 +18,15 @@ config USB_MDC800
module will be called mdc800.
config USB_SCANNER
tristate "USB Scanner support"
tristate "USB Scanner support (OBSOLETE)"
depends on USB
help
Say Y here if you want to connect a USB scanner to your computer's
USB port. Please read <file:Documentation/usb/scanner.txt> for more
information.
This driver has been obsoleted by support via libusb.
To compile this driver as a module, choose M here: the
module will be called scanner.
......
......@@ -324,7 +324,7 @@ static void auerchain_complete (struct urb * urb, struct pt_regs *regs)
urb = acep->urbp;
dbg ("auerchain_complete: submitting next urb from chain");
urb->status = 0; /* needed! */
result = usb_submit_urb(urb, GFP_KERNEL);
result = usb_submit_urb(urb, GFP_ATOMIC);
/* check for submit errors */
if (result) {
......@@ -402,7 +402,7 @@ static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int ea
if (acep) {
dbg("submitting urb immediate");
urb->status = 0; /* needed! */
result = usb_submit_urb(urb, GFP_KERNEL);
result = usb_submit_urb(urb, GFP_ATOMIC);
/* check for submit errors */
if (result) {
urb->status = result;
......
......@@ -493,12 +493,15 @@ static int serial_open (struct tty_struct *tty, struct file * filp)
return retval;
}
static void __serial_close(struct usb_serial_port *port, struct file *filp)
static void serial_close(struct tty_struct *tty, struct file * filp)
{
if (!port->open_count) {
dbg ("%s - port not opened", __FUNCTION__);
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
struct usb_serial *serial = get_usb_serial (port, __FUNCTION__);
if (!serial)
return;
}
dbg("%s - port %d", __FUNCTION__, port->number);
--port->open_count;
if (port->open_count <= 0) {
......@@ -506,30 +509,18 @@ static void __serial_close(struct usb_serial_port *port, struct file *filp)
* port is being closed by the last owner */
port->serial->type->close(port, filp);
port->open_count = 0;
if (port->tty) {
if (port->tty->driver_data)
port->tty->driver_data = NULL;
port->tty = NULL;
}
}
module_put(port->serial->type->owner);
kobject_put(&port->serial->kobj);
}
static void serial_close(struct tty_struct *tty, struct file * filp)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
struct usb_serial *serial = get_usb_serial (port, __FUNCTION__);
if (!serial)
return;
dbg("%s - port %d", __FUNCTION__, port->number);
/* if disconnect beat us to the punch here, there's nothing to do */
if (tty && tty->driver_data) {
__serial_close(port, filp);
tty->driver_data = NULL;
}
port->tty = NULL;
}
static int serial_write (struct tty_struct * tty, int from_user, const unsigned char *buf, int count)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
......@@ -848,19 +839,6 @@ static void destroy_serial (struct kobject *kobj)
dbg ("%s - %s", __FUNCTION__, kobj->name);
serial = to_usb_serial(kobj);
/* fail all future close/read/write/ioctl/etc calls */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (port->tty != NULL) {
port->tty->driver_data = NULL;
while (port->open_count > 0) {
__serial_close(port, NULL);
}
port->tty = NULL;
}
}
serial_shutdown (serial);
/* return the minor range that this device had */
......@@ -1242,7 +1220,7 @@ int usb_serial_probe(struct usb_interface *interface,
/* register all of the individual ports with the driver core */
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
port->dev.parent = &serial->dev->dev;
port->dev.parent = &interface->dev;
port->dev.driver = NULL;
port->dev.bus = &usb_serial_bus_type;
port->dev.release = &port_release;
......
......@@ -387,7 +387,7 @@ static int datafab_id_device(struct us_data *us,
// we'll go ahead and extract the media capacity while we're here...
//
rc = datafab_bulk_read(us, reply, sizeof(reply));
rc = datafab_bulk_read(us, reply, 512);
if (rc == USB_STOR_XFER_GOOD) {
// capacity is at word offset 57-58
//
......
......@@ -317,7 +317,7 @@ static int jumpshot_id_device(struct us_data *us,
}
// read the reply
rc = jumpshot_bulk_read(us, reply, sizeof(reply));
rc = jumpshot_bulk_read(us, reply, 512);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
......
......@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int c1, c2 = 0;
int ret = 0;
lock_kernel();
if (hpfs_sb(inode->i_sb)->sb_chk) {
if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) {
ret = -EFSERROR;
......
......@@ -82,6 +82,8 @@ struct request_list {
wait_queue_head_t wait[2];
};
#define BLK_MAX_CDB 16
/*
* try to put the fields that are referenced together in the same cacheline
*/
......@@ -147,7 +149,7 @@ struct request {
* when request is used as a packet command carrier
*/
unsigned int cmd_len;
unsigned char cmd[16];
unsigned char cmd[BLK_MAX_CDB];
unsigned int data_len;
void *data;
......
......@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return head->next == head;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
......
......@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p)
void release_task(struct task_struct * p)
{
int zap_leader;
task_t *leader;
struct dentry *proc_dentry;
repeat:
BUG_ON(p->state < TASK_ZOMBIE);
atomic_dec(&p->user->processes);
......@@ -70,10 +72,21 @@ void release_task(struct task_struct * p)
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) &&
leader->state == TASK_ZOMBIE && leader->exit_signal != -1)
if (leader != p && thread_group_empty(leader) && leader->state == TASK_ZOMBIE) {
BUG_ON(leader->exit_signal == -1);
do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader = (leader->exit_signal == -1);
}
p->parent->cutime += p->utime + p->cutime;
p->parent->cstime += p->stime + p->cstime;
......@@ -88,6 +101,10 @@ void release_task(struct task_struct * p)
proc_pid_flush(proc_dentry);
release_thread(p);
put_task_struct(p);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
/* we are using it only for SMP init */
......
......@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(remove_wait_queue);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
__set_current_state(state);
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
......@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
__set_current_state(state);
wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
......@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned long flags;
__set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list)) {
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags);
......
......@@ -236,8 +236,6 @@ static void unlink(struct kobject * kobj)
list_del_init(&kobj->entry);
up_write(&kobj->kset->subsys->rwsem);
}
if (kobj->parent)
kobject_put(kobj->parent);
kobject_put(kobj);
}
......@@ -274,9 +272,11 @@ int kobject_add(struct kobject * kobj)
kobj->parent = parent;
error = create_dir(kobj);
if (error)
if (error) {
unlink(kobj);
else {
if (parent)
kobject_put(parent);
} else {
/* If this kobj does not belong to a kset,
try to find a parent that does. */
top_kobj = kobj;
......@@ -452,6 +452,7 @@ void kobject_cleanup(struct kobject * kobj)
{
struct kobj_type * t = get_ktype(kobj);
struct kset * s = kobj->kset;
struct kobject * parent = kobj->parent;
pr_debug("kobject %s: cleaning up\n",kobject_name(kobj));
if (kobj->k_name != kobj->name)
......@@ -461,6 +462,8 @@ void kobject_cleanup(struct kobject * kobj)
t->release(kobj);
if (s)
kset_put(s);
if (parent)
kobject_put(parent);
}
/**
......
......@@ -52,6 +52,13 @@ static int do_usb_entry(const char *filename,
id->bcdDevice_lo = TO_NATIVE(id->bcdDevice_lo);
id->bcdDevice_hi = TO_NATIVE(id->bcdDevice_hi);
/*
* Some modules (visor) have empty slots as placeholder for
* run-time specification that results in catch-all alias
*/
if (!(id->idVendor | id->bDeviceClass | id->bInterfaceClass))
return 1;
strcpy(alias, "usb:");
ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR,
id->idVendor);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment