Commit fd0a1c61 authored by James Bottomley's avatar James Bottomley

Merge mulgrave.(none):/home/jejb/BK/linux-2.5

into mulgrave.(none):/home/jejb/BK/scsi-for-linus-2.5
parents 1c6232c3 9b46c836
......@@ -284,6 +284,7 @@ NCR_700_detect(Scsi_Host_Template *tpnt,
tpnt->use_clustering = DISABLE_CLUSTERING;
tpnt->proc_info = NCR_700_proc_directory_info;
tpnt->use_blk_tcq = 1;
tpnt->highmem_io = 1;
if(tpnt->name == NULL)
tpnt->name = "53c700";
......
......@@ -259,6 +259,48 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j)
return retval;
}
void scsi_host_busy_inc(struct Scsi_Host *shost, Scsi_Device *sdev)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy++;
sdev->device_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
}
void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
sdev->device_busy--;
if (shost->in_recovery && (shost->host_busy == shost->host_failed)) {
up(shost->eh_wait);
SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler"
"thread (%d)\n",
atomic_read(&shost->eh_wait->count)));
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
void scsi_host_failed_inc_and_test(struct Scsi_Host *shost)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->in_recovery = 1;
shost->host_failed++;
if (shost->host_busy == shost->host_failed) {
up(shost->eh_wait);
SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler"
"thread (%d)\n",
atomic_read(&shost->eh_wait->count)));
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
......
......@@ -543,6 +543,13 @@ extern int scsi_unregister_device(struct Scsi_Device_Template *);
extern int scsi_register_host(Scsi_Host_Template *);
extern int scsi_unregister_host(Scsi_Host_Template *);
/*
* host_busy inc/dec/test functions
*/
extern void scsi_host_busy_inc(struct Scsi_Host *, Scsi_Device *);
extern void scsi_host_busy_dec_and_test(struct Scsi_Host *, Scsi_Device *);
extern void scsi_host_failed_inc_and_test(struct Scsi_Host *);
/*
* This is an ugly hack. If we expect to be able to load devices at run time,
......
......@@ -566,22 +566,6 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
SCpnt->target,
atomic_read(&SCpnt->host->host_active),
SCpnt->host->host_failed));
if (SCpnt->host->host_failed != 0) {
SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
SCpnt->host->in_recovery,
SCpnt->host->eh_active));
}
/*
* If the host is having troubles, then look to see if this was the last
* command that might have failed. If so, wake up the error handler.
*/
if (SCpnt->host->in_recovery
&& !SCpnt->host->eh_active
&& SCpnt->host->host_busy == SCpnt->host->host_failed) {
SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
atomic_read(&SCpnt->host->eh_wait->count)));
up(SCpnt->host->eh_wait);
}
spin_unlock_irqrestore(&device_request_lock, flags);
......@@ -1217,28 +1201,11 @@ void scsi_done(Scsi_Cmnd * SCpnt)
* etc, etc.
*/
if (!tstatus) {
SCpnt->done_late = 1;
return;
}
/* Set the serial numbers back to zero */
SCpnt->serial_number = 0;
/*
* First, see whether this command already timed out. If so, we ignore
* the response. We treat it as if the command never finished.
*
* Since serial_number is now 0, the error handler cound detect this
* situation and avoid to call the low level driver abort routine.
* (DB)
*
* FIXME(eric) - I believe that this test is now redundant, due to
* the test of the return status of del_timer().
*/
if (SCpnt->state == SCSI_STATE_TIMEOUT) {
SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
return;
}
SCpnt->serial_number_at_timeout = 0;
SCpnt->state = SCSI_STATE_BHQUEUE;
SCpnt->owner = SCSI_OWNER_BH_HANDLER;
......@@ -1349,21 +1316,11 @@ static void scsi_softirq(struct softirq_action *h)
SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
}
if (SCpnt->host->eh_wait != NULL) {
SCpnt->host->host_failed++;
scsi_eh_eflags_set(SCpnt, SCSI_EH_CMD_FAILED | SCSI_EH_CMD_ERR);
SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
SCpnt->state = SCSI_STATE_FAILED;
SCpnt->host->in_recovery = 1;
/*
* If the host is having troubles, then
* look to see if this was the last
* command that might have failed. If
* so, wake up the error handler.
*/
if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
atomic_read(&SCpnt->host->eh_wait->count)));
up(SCpnt->host->eh_wait);
}
scsi_host_failed_inc_and_test(SCpnt->host);
} else {
/*
* We only get here if the error
......@@ -1418,7 +1375,6 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
struct Scsi_Host *host;
Scsi_Device *device;
Scsi_Request * SRpnt;
unsigned long flags;
host = SCpnt->host;
device = SCpnt->device;
......@@ -1432,10 +1388,7 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* one execution context, but the device and host structures are
* shared.
*/
spin_lock_irqsave(host->host_lock, flags);
host->host_busy--; /* Indicate that we are free */
device->device_busy--; /* Decrement device usage counter. */
spin_unlock_irqrestore(host->host_lock, flags);
scsi_host_busy_dec_and_test(host, device);
/*
* Clear the flags which say that the device/host is no longer
......@@ -1450,7 +1403,7 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* If we have valid sense information, then some kind of recovery
* must have taken place. Make a note of this.
*/
if (scsi_sense_valid(SCpnt)) {
if (SCSI_SENSE_VALID(SCpnt)) {
SCpnt->result |= (DRIVER_SENSE << 24);
}
SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
......
......@@ -428,7 +428,6 @@ extern void scsi_add_timer(Scsi_Cmnd * SCset, int timeout,
void (*complete) (Scsi_Cmnd *));
extern int scsi_delete_timer(Scsi_Cmnd * SCset);
extern void scsi_error_handler(void *host);
extern int scsi_sense_valid(Scsi_Cmnd *);
extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt);
extern int scsi_block_when_processing_errors(Scsi_Device *);
extern void scsi_sleep(int);
......@@ -701,6 +700,7 @@ struct scsi_cmnd {
struct scsi_cmnd *reset_chain;
int eh_state; /* Used for state tracking in error handlr */
int eh_eflags; /* Used by error handlr */
void (*done) (struct scsi_cmnd *); /* Mid-level done function */
/*
A SCSI Command is assigned a nonzero serial_number when internal_cmnd
......@@ -940,4 +940,26 @@ static inline Scsi_Cmnd *scsi_find_tag(Scsi_Device *SDpnt, int tag) {
return (Scsi_Cmnd *)req->special;
}
#define scsi_eh_eflags_chk(scp, flags) (scp->eh_eflags & flags)
#define scsi_eh_eflags_set(scp, flags) do { \
scp->eh_eflags |= flags; \
} while(0)
#define scsi_eh_eflags_clr(scp, flags) do { \
scp->eh_eflags &= ~flags; \
} while(0)
#define scsi_eh_eflags_clr_all(scp) (scp->eh_eflags = 0)
/*
* Scsi Error Handler Flags
*/
#define SCSI_EH_CMD_ERR 0x0001 /* Orig cmd error'd */
#define SCSI_EH_CMD_FAILED 0x0002 /* Orig cmd error type failed */
#define SCSI_EH_CMD_TIMEOUT 0x0004 /* Orig cmd error type timeout */
#define SCSI_EH_REC_TIMEOUT 0x0008 /* Recovery cmd timeout */
#define SCSI_SENSE_VALID(scmd) ((scmd->sense_buffer[0] & 0x70) == 0x70)
#endif
This diff is collapsed.
......@@ -140,7 +140,7 @@ void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
* Enable highmem I/O, if appropriate.
*/
bounce_limit = BLK_BOUNCE_HIGH;
if (SHpnt->highmem_io && (SDpnt->type == TYPE_DISK)) {
if (SHpnt->highmem_io) {
if (!PCI_DMA_BUS_IS_PHYS)
/* Platforms with virtual-DMA translation
* hardware have no practical limit.
......
......@@ -82,6 +82,19 @@ static void sg_proc_cleanup(void);
#define SG_MAX_DEVS_MASK ((1U << KDEV_MINOR_BITS) - 1)
/*
* Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
* Then when using 32 bit integers x * m may overflow during the calculation.
* Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
* calculates the same, but prevents the overflow when both m and d
* are "small" numbers (like HZ and USER_HZ).
* Of course an overflow is inavoidable if the result of muldiv doesn't fit
* in 32 bits.
*/
#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
......@@ -150,7 +163,8 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
unsigned save_scat_len; /* original length of trunc. scat. element */
Sg_request *headrp; /* head of request slist, NULL->empty */
......@@ -790,10 +804,15 @@ sg_ioctl(struct inode *inode, struct file *filp,
return result;
if (val < 0)
return -EIO;
sfp->timeout = val;
if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
val = MULDIV (INT_MAX, USER_HZ, HZ);
sfp->timeout_user = val;
sfp->timeout = MULDIV (val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
return sfp->timeout; /* strange ..., for backward compatibility */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
result = get_user(val, (int *) arg);
if (result)
......@@ -2432,6 +2451,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->rq_list_lock = RW_LOCK_UNLOCKED;
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
sdp->device->host->unchecked_isa_dma : 1;
......
......@@ -304,7 +304,12 @@ struct sg_header
/* Defaults, commented if they differ from original sg driver */
#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */
#ifdef __KERNEL__
#define SG_DEFAULT_TIMEOUT_USER (60*USER_HZ) /* HZ == 'jiffies in 1 second' */
#else
#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */
#endif
#define SG_DEF_COMMAND_Q 0 /* command queuing is always on when
the new interface is used */
#define SG_DEF_UNDERRUN_FLAG 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment