Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
99d60d5d
Commit
99d60d5d
authored
Apr 09, 2003
by
James Bottomley
Browse files
Options
Browse Files
Download
Plain Diff
Merge patmans/axboe
parents
f01d7733
099616a4
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
280 additions
and
218 deletions
+280
-218
drivers/scsi/hosts.c
drivers/scsi/hosts.c
+1
-1
drivers/scsi/hosts.h
drivers/scsi/hosts.h
+1
-6
drivers/scsi/scsi.c
drivers/scsi/scsi.c
+4
-6
drivers/scsi/scsi.h
drivers/scsi/scsi.h
+14
-3
drivers/scsi/scsi_error.c
drivers/scsi/scsi_error.c
+5
-13
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_lib.c
+183
-143
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_scan.c
+70
-46
drivers/scsi/scsi_syms.c
drivers/scsi/scsi_syms.c
+2
-0
No files found.
drivers/scsi/hosts.c
View file @
99d60d5d
...
@@ -383,6 +383,7 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
...
@@ -383,6 +383,7 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
scsi_assign_lock
(
shost
,
&
shost
->
default_lock
);
scsi_assign_lock
(
shost
,
&
shost
->
default_lock
);
INIT_LIST_HEAD
(
&
shost
->
my_devices
);
INIT_LIST_HEAD
(
&
shost
->
my_devices
);
INIT_LIST_HEAD
(
&
shost
->
eh_cmd_q
);
INIT_LIST_HEAD
(
&
shost
->
eh_cmd_q
);
INIT_LIST_HEAD
(
&
shost
->
starved_list
);
init_waitqueue_head
(
&
shost
->
host_wait
);
init_waitqueue_head
(
&
shost
->
host_wait
);
shost
->
dma_channel
=
0xff
;
shost
->
dma_channel
=
0xff
;
...
@@ -619,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
...
@@ -619,7 +620,6 @@ void scsi_host_busy_dec_and_test(struct Scsi_Host *shost, Scsi_Device *sdev)
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
shost
->
host_busy
--
;
shost
->
host_busy
--
;
sdev
->
device_busy
--
;
if
(
shost
->
in_recovery
&&
shost
->
host_failed
&&
if
(
shost
->
in_recovery
&&
shost
->
host_failed
&&
(
shost
->
host_busy
==
shost
->
host_failed
))
(
shost
->
host_busy
==
shost
->
host_failed
))
{
{
...
...
drivers/scsi/hosts.h
View file @
99d60d5d
...
@@ -380,6 +380,7 @@ struct Scsi_Host
...
@@ -380,6 +380,7 @@ struct Scsi_Host
struct
scsi_host_cmd_pool
*
cmd_pool
;
struct
scsi_host_cmd_pool
*
cmd_pool
;
spinlock_t
free_list_lock
;
spinlock_t
free_list_lock
;
struct
list_head
free_list
;
/* backup store of cmd structs */
struct
list_head
free_list
;
/* backup store of cmd structs */
struct
list_head
starved_list
;
spinlock_t
default_lock
;
spinlock_t
default_lock
;
spinlock_t
*
host_lock
;
spinlock_t
*
host_lock
;
...
@@ -470,12 +471,6 @@ struct Scsi_Host
...
@@ -470,12 +471,6 @@ struct Scsi_Host
*/
*/
unsigned
reverse_ordering
:
1
;
unsigned
reverse_ordering
:
1
;
/*
* Indicates that one or more devices on this host were starved, and
* when the device becomes less busy that we need to feed them.
*/
unsigned
some_device_starved
:
1
;
/*
/*
* Host has rejected a command because it was busy.
* Host has rejected a command because it was busy.
*/
*/
...
...
drivers/scsi/scsi.c
View file @
99d60d5d
...
@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
...
@@ -447,8 +447,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
host
=
SCpnt
->
device
->
host
;
host
=
SCpnt
->
device
->
host
;
ASSERT_LOCK
(
host
->
host_lock
,
0
);
/* Assign a unique nonzero serial_number. */
/* Assign a unique nonzero serial_number. */
if
(
++
serial_number
==
0
)
if
(
++
serial_number
==
0
)
serial_number
=
1
;
serial_number
=
1
;
...
@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
...
@@ -574,8 +572,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
{
{
struct
Scsi_Host
*
host
=
SCpnt
->
device
->
host
;
struct
Scsi_Host
*
host
=
SCpnt
->
device
->
host
;
ASSERT_LOCK
(
host
->
host_lock
,
0
);
SCpnt
->
owner
=
SCSI_OWNER_MIDLEVEL
;
SCpnt
->
owner
=
SCSI_OWNER_MIDLEVEL
;
SRpnt
->
sr_command
=
SCpnt
;
SRpnt
->
sr_command
=
SCpnt
;
...
@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
...
@@ -819,12 +815,11 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
struct
Scsi_Host
*
host
;
struct
Scsi_Host
*
host
;
Scsi_Device
*
device
;
Scsi_Device
*
device
;
Scsi_Request
*
SRpnt
;
Scsi_Request
*
SRpnt
;
unsigned
int
flags
;
host
=
SCpnt
->
device
->
host
;
host
=
SCpnt
->
device
->
host
;
device
=
SCpnt
->
device
;
device
=
SCpnt
->
device
;
ASSERT_LOCK
(
host
->
host_lock
,
0
);
/*
/*
* We need to protect the decrement, as otherwise a race condition
* We need to protect the decrement, as otherwise a race condition
* would exist. Fiddling with SCpnt isn't a problem as the
* would exist. Fiddling with SCpnt isn't a problem as the
...
@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
...
@@ -833,6 +828,9 @@ void scsi_finish_command(Scsi_Cmnd * SCpnt)
* shared.
* shared.
*/
*/
scsi_host_busy_dec_and_test
(
host
,
device
);
scsi_host_busy_dec_and_test
(
host
,
device
);
spin_lock_irqsave
(
SCpnt
->
device
->
request_queue
->
queue_lock
,
flags
);
SCpnt
->
device
->
device_busy
--
;
spin_unlock_irqrestore
(
SCpnt
->
device
->
request_queue
->
queue_lock
,
flags
);
/*
/*
* Clear the flags which say that the device/host is no longer
* Clear the flags which say that the device/host is no longer
...
...
drivers/scsi/scsi.h
View file @
99d60d5d
...
@@ -417,7 +417,8 @@ extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt);
...
@@ -417,7 +417,8 @@ extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt);
extern
void
scsi_io_completion
(
Scsi_Cmnd
*
SCpnt
,
int
good_sectors
,
extern
void
scsi_io_completion
(
Scsi_Cmnd
*
SCpnt
,
int
good_sectors
,
int
block_sectors
);
int
block_sectors
);
extern
int
scsi_queue_insert
(
struct
scsi_cmnd
*
cmd
,
int
reason
);
extern
int
scsi_queue_insert
(
struct
scsi_cmnd
*
cmd
,
int
reason
);
extern
request_queue_t
*
scsi_alloc_queue
(
struct
Scsi_Host
*
shost
);
extern
void
scsi_queue_next_request
(
request_queue_t
*
q
,
struct
scsi_cmnd
*
cmd
);
extern
request_queue_t
*
scsi_alloc_queue
(
struct
scsi_device
*
sdev
);
extern
void
scsi_free_queue
(
request_queue_t
*
q
);
extern
void
scsi_free_queue
(
request_queue_t
*
q
);
extern
int
scsi_init_queue
(
void
);
extern
int
scsi_init_queue
(
void
);
extern
void
scsi_exit_queue
(
void
);
extern
void
scsi_exit_queue
(
void
);
...
@@ -530,6 +531,15 @@ struct scsi_dev_info_list {
...
@@ -530,6 +531,15 @@ struct scsi_dev_info_list {
extern
struct
list_head
scsi_dev_info_list
;
extern
struct
list_head
scsi_dev_info_list
;
extern
int
scsi_dev_info_list_add_str
(
char
*
);
extern
int
scsi_dev_info_list_add_str
(
char
*
);
/*
* scsi_target: representation of a scsi target, for now, this is only
* used for single_lun devices.
*/
struct
scsi_target
{
unsigned
int
starget_busy
;
unsigned
int
starget_refcnt
;
};
/*
/*
* The scsi_device struct contains what we know about each given scsi
* The scsi_device struct contains what we know about each given scsi
* device.
* device.
...
@@ -554,8 +564,10 @@ struct scsi_device {
...
@@ -554,8 +564,10 @@ struct scsi_device {
struct
Scsi_Host
*
host
;
struct
Scsi_Host
*
host
;
request_queue_t
*
request_queue
;
request_queue_t
*
request_queue
;
volatile
unsigned
short
device_busy
;
/* commands actually active on low-level */
volatile
unsigned
short
device_busy
;
/* commands actually active on low-level */
spinlock_t
sdev_lock
;
/* also the request queue_lock */
spinlock_t
list_lock
;
spinlock_t
list_lock
;
struct
list_head
cmd_list
;
/* queue of in use SCSI Command structures */
struct
list_head
cmd_list
;
/* queue of in use SCSI Command structures */
struct
list_head
starved_entry
;
Scsi_Cmnd
*
current_cmnd
;
/* currently active command */
Scsi_Cmnd
*
current_cmnd
;
/* currently active command */
unsigned
short
queue_depth
;
/* How deep of a queue we want */
unsigned
short
queue_depth
;
/* How deep of a queue we want */
unsigned
short
last_queue_full_depth
;
/* These two are used by */
unsigned
short
last_queue_full_depth
;
/* These two are used by */
...
@@ -586,6 +598,7 @@ struct scsi_device {
...
@@ -586,6 +598,7 @@ struct scsi_device {
unsigned
char
current_tag
;
/* current tag */
unsigned
char
current_tag
;
/* current tag */
// unsigned char sync_min_period; /* Not less than this period */
// unsigned char sync_min_period; /* Not less than this period */
// unsigned char sync_max_offset; /* Not greater than this offset */
// unsigned char sync_max_offset; /* Not greater than this offset */
struct
scsi_target
*
sdev_target
;
/* used only for single_lun */
unsigned
online
:
1
;
unsigned
online
:
1
;
unsigned
writeable
:
1
;
unsigned
writeable
:
1
;
...
@@ -616,8 +629,6 @@ struct scsi_device {
...
@@ -616,8 +629,6 @@ struct scsi_device {
* because we did a bus reset. */
* because we did a bus reset. */
unsigned
ten
:
1
;
/* support ten byte read / write */
unsigned
ten
:
1
;
/* support ten byte read / write */
unsigned
remap
:
1
;
/* support remapping */
unsigned
remap
:
1
;
/* support remapping */
unsigned
starved
:
1
;
/* unable to process commands because
host busy */
// unsigned sync:1; /* Sync transfer state, managed by host */
// unsigned sync:1; /* Sync transfer state, managed by host */
// unsigned wide:1; /* WIDE transfer state, managed by host */
// unsigned wide:1; /* WIDE transfer state, managed by host */
...
...
drivers/scsi/scsi_error.c
View file @
99d60d5d
...
@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
...
@@ -431,8 +431,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
unsigned
long
flags
;
unsigned
long
flags
;
int
rtn
=
SUCCESS
;
int
rtn
=
SUCCESS
;
ASSERT_LOCK
(
host
->
host_lock
,
0
);
/*
/*
* we will use a queued command if possible, otherwise we will
* we will use a queued command if possible, otherwise we will
* emulate the queuing and calling of completion function ourselves.
* emulate the queuing and calling of completion function ourselves.
...
@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
...
@@ -1405,8 +1403,6 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
struct
scsi_device
*
sdev
;
struct
scsi_device
*
sdev
;
unsigned
long
flags
;
unsigned
long
flags
;
ASSERT_LOCK
(
shost
->
host_lock
,
0
);
/*
/*
* If the door was locked, we need to insert a door lock request
* If the door was locked, we need to insert a door lock request
* onto the head of the SCSI request queue for the device. There
* onto the head of the SCSI request queue for the device. There
...
@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
...
@@ -1434,18 +1430,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* now that error recovery is done, we will need to ensure that these
* now that error recovery is done, we will need to ensure that these
* requests are started.
* requests are started.
*/
*/
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
list_for_each_entry
(
sdev
,
&
shost
->
my_devices
,
siblings
)
{
list_for_each_entry
(
sdev
,
&
shost
->
my_devices
,
siblings
)
{
if
((
shost
->
can_queue
>
0
&&
spin_lock_irqsave
(
sdev
->
request_queue
->
queue_lock
,
flags
);
(
shost
->
host_busy
>=
shost
->
can_queue
))
||
(
shost
->
host_blocked
)
||
(
shost
->
host_self_blocked
))
{
break
;
}
__blk_run_queue
(
sdev
->
request_queue
);
__blk_run_queue
(
sdev
->
request_queue
);
spin_unlock_irqrestore
(
sdev
->
request_queue
->
queue_lock
,
flags
);
}
}
spin_unlock_irqrestore
(
shost
->
host_lock
,
flags
);
}
}
/**
/**
...
@@ -1681,6 +1670,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
...
@@ -1681,6 +1670,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
struct
scsi_cmnd
*
scmd
=
scsi_get_command
(
dev
,
GFP_KERNEL
);
struct
scsi_cmnd
*
scmd
=
scsi_get_command
(
dev
,
GFP_KERNEL
);
struct
request
req
;
struct
request
req
;
int
rtn
;
int
rtn
;
struct
request_queue
*
q
;
scmd
->
request
=
&
req
;
scmd
->
request
=
&
req
;
memset
(
&
scmd
->
eh_timeout
,
0
,
sizeof
(
scmd
->
eh_timeout
));
memset
(
&
scmd
->
eh_timeout
,
0
,
sizeof
(
scmd
->
eh_timeout
));
...
@@ -1735,6 +1725,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
...
@@ -1735,6 +1725,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
}
}
scsi_delete_timer
(
scmd
);
scsi_delete_timer
(
scmd
);
q
=
scmd
->
device
->
request_queue
;
scsi_put_command
(
scmd
);
scsi_put_command
(
scmd
);
scsi_queue_next_request
(
q
,
NULL
);
return
rtn
;
return
rtn
;
}
}
drivers/scsi/scsi_lib.c
View file @
99d60d5d
...
@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
...
@@ -92,6 +92,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
{
struct
Scsi_Host
*
host
=
cmd
->
device
->
host
;
struct
Scsi_Host
*
host
=
cmd
->
device
->
host
;
struct
scsi_device
*
device
=
cmd
->
device
;
struct
scsi_device
*
device
=
cmd
->
device
;
unsigned
long
flags
;
SCSI_LOG_MLQUEUE
(
1
,
SCSI_LOG_MLQUEUE
(
1
,
printk
(
"Inserting command %p into mlqueue
\n
"
,
cmd
));
printk
(
"Inserting command %p into mlqueue
\n
"
,
cmd
));
...
@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
...
@@ -130,6 +131,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Decrement the counters, since these commands are no longer
* Decrement the counters, since these commands are no longer
* active on the host/device.
* active on the host/device.
*/
*/
spin_lock_irqsave
(
device
->
request_queue
->
queue_lock
,
flags
);
device
->
device_busy
--
;
spin_unlock_irqrestore
(
device
->
request_queue
->
queue_lock
,
flags
);
scsi_host_busy_dec_and_test
(
host
,
device
);
scsi_host_busy_dec_and_test
(
host
,
device
);
/*
/*
...
@@ -174,14 +178,18 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
...
@@ -174,14 +178,18 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
void
(
*
done
)(
struct
scsi_cmnd
*
),
void
(
*
done
)(
struct
scsi_cmnd
*
),
int
timeout
,
int
retries
)
int
timeout
,
int
retries
)
{
{
struct
request_queue
*
q
;
/*
/*
* If the upper level driver is reusing these things, then
* If the upper level driver is reusing these things, then
* we should release the low-level block now. Another one will
* we should release the low-level block now. Another one will
* be allocated later when this request is getting queued.
* be allocated later when this request is getting queued.
*/
*/
if
(
sreq
->
sr_command
)
{
if
(
sreq
->
sr_command
)
{
q
=
sreq
->
sr_command
->
device
->
request_queue
;
scsi_put_command
(
sreq
->
sr_command
);
scsi_put_command
(
sreq
->
sr_command
);
sreq
->
sr_command
=
NULL
;
sreq
->
sr_command
=
NULL
;
scsi_queue_next_request
(
q
,
NULL
);
}
}
/*
/*
...
@@ -228,6 +236,7 @@ static void scsi_wait_done(struct scsi_cmnd *cmd)
...
@@ -228,6 +236,7 @@ static void scsi_wait_done(struct scsi_cmnd *cmd)
void
scsi_wait_req
(
struct
scsi_request
*
sreq
,
const
void
*
cmnd
,
void
*
buffer
,
void
scsi_wait_req
(
struct
scsi_request
*
sreq
,
const
void
*
cmnd
,
void
*
buffer
,
unsigned
bufflen
,
int
timeout
,
int
retries
)
unsigned
bufflen
,
int
timeout
,
int
retries
)
{
{
struct
request_queue
*
q
;
DECLARE_COMPLETION
(
wait
);
DECLARE_COMPLETION
(
wait
);
sreq
->
sr_request
->
waiting
=
&
wait
;
sreq
->
sr_request
->
waiting
=
&
wait
;
...
@@ -239,7 +248,9 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
...
@@ -239,7 +248,9 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
sreq
->
sr_request
->
waiting
=
NULL
;
sreq
->
sr_request
->
waiting
=
NULL
;
if
(
sreq
->
sr_command
)
{
if
(
sreq
->
sr_command
)
{
q
=
sreq
->
sr_command
->
device
->
request_queue
;
scsi_put_command
(
sreq
->
sr_command
);
scsi_put_command
(
sreq
->
sr_command
);
scsi_queue_next_request
(
q
,
NULL
);
sreq
->
sr_command
=
NULL
;
sreq
->
sr_command
=
NULL
;
}
}
}
}
...
@@ -315,6 +326,39 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
...
@@ -315,6 +326,39 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
cmd
->
underflow
=
cmd
->
old_underflow
;
cmd
->
underflow
=
cmd
->
old_underflow
;
}
}
/*
* Called for single_lun devices on IO completion. Clear starget_busy, and
* Call __blk_run_queue for all the scsi_devices on the target - including
* current_sdev first.
*
* Called with *no* scsi locks held.
*/
static
void
scsi_single_lun_run
(
struct
scsi_device
*
current_sdev
)
{
struct
scsi_device
*
sdev
;
unsigned
int
flags
,
flags2
;
spin_lock_irqsave
(
current_sdev
->
request_queue
->
queue_lock
,
flags2
);
spin_lock_irqsave
(
current_sdev
->
host
->
host_lock
,
flags
);
WARN_ON
(
!
current_sdev
->
sdev_target
->
starget_busy
);
if
(
current_sdev
->
device_busy
==
0
)
current_sdev
->
sdev_target
->
starget_busy
=
0
;
spin_unlock_irqrestore
(
current_sdev
->
host
->
host_lock
,
flags
);
/*
* Call __blk_run_queue for all LUNs on the target, starting with
* current_sdev.
*/
__blk_run_queue
(
current_sdev
->
request_queue
);
spin_unlock_irqrestore
(
current_sdev
->
request_queue
->
queue_lock
,
flags2
);
list_for_each_entry
(
sdev
,
&
current_sdev
->
same_target_siblings
,
same_target_siblings
)
{
spin_lock_irqsave
(
sdev
->
request_queue
->
queue_lock
,
flags2
);
__blk_run_queue
(
sdev
->
request_queue
);
spin_unlock_irqrestore
(
sdev
->
request_queue
->
queue_lock
,
flags2
);
}
}
/*
/*
* Function: scsi_queue_next_request()
* Function: scsi_queue_next_request()
*
*
...
@@ -351,16 +395,12 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
...
@@ -351,16 +395,12 @@ void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
* permutations grows as 2**N, and if too many more special cases
* permutations grows as 2**N, and if too many more special cases
* get added, we start to get screwed.
* get added, we start to get screwed.
*/
*/
static
void
scsi_queue_next_request
(
request_queue_t
*
q
,
struct
scsi_cmnd
*
cmd
)
void
scsi_queue_next_request
(
request_queue_t
*
q
,
struct
scsi_cmnd
*
cmd
)
{
{
struct
scsi_device
*
sdev
,
*
sdev2
;
struct
scsi_device
*
sdev
,
*
sdev2
;
struct
Scsi_Host
*
shost
;
struct
Scsi_Host
*
shost
;
unsigned
long
flags
;
unsigned
long
flags
;
int
all_clear
;
ASSERT_LOCK
(
q
->
queue_lock
,
0
);
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
if
(
cmd
!=
NULL
)
{
if
(
cmd
!=
NULL
)
{
/*
/*
...
@@ -369,6 +409,7 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
...
@@ -369,6 +409,7 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
* in which case we need to request the blocks that come after
* in which case we need to request the blocks that come after
* the bad sector.
* the bad sector.
*/
*/
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
cmd
->
request
->
special
=
cmd
;
cmd
->
request
->
special
=
cmd
;
if
(
blk_rq_tagged
(
cmd
->
request
))
if
(
blk_rq_tagged
(
cmd
->
request
))
blk_queue_end_tag
(
q
,
cmd
->
request
);
blk_queue_end_tag
(
q
,
cmd
->
request
);
...
@@ -381,62 +422,45 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
...
@@ -381,62 +422,45 @@ static void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
cmd
->
request
->
flags
|=
REQ_SPECIAL
;
cmd
->
request
->
flags
|=
REQ_SPECIAL
;
cmd
->
request
->
flags
&=
~
REQ_DONTPREP
;
cmd
->
request
->
flags
&=
~
REQ_DONTPREP
;
__elv_add_request
(
q
,
cmd
->
request
,
0
,
0
);
__elv_add_request
(
q
,
cmd
->
request
,
0
,
0
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
/*
* Just hit the requeue function for the queue.
*/
__blk_run_queue
(
q
);
sdev
=
q
->
queuedata
;
sdev
=
q
->
queuedata
;
shost
=
sdev
->
host
;
/*
if
(
sdev
->
single_lun
)
* If this is a single-lun device, and we are currently finished
scsi_single_lun_run
(
sdev
);
* with this device, then see if we need to get another device
* started. FIXME(eric) - if this function gets too cluttered
* with special case code, then spin off separate versions and
* use function pointers to pick the right one.
*/
if
(
sdev
->
single_lun
&&
sdev
->
device_busy
==
0
&&
!
shost
->
host_blocked
&&
!
shost
->
host_self_blocked
&&
!
((
shost
->
can_queue
>
0
)
&&
(
shost
->
host_busy
>=
shost
->
can_queue
))
&&
elv_queue_empty
(
q
))
{
list_for_each_entry
(
sdev2
,
&
sdev
->
same_target_siblings
,
same_target_siblings
)
{
if
(
!
sdev2
->
device_blocked
&&
!
elv_queue_empty
(
sdev2
->
request_queue
))
{
__blk_run_queue
(
sdev2
->
request_queue
);
break
;
}
}
}
/*
shost
=
sdev
->
host
;
* Now see whether there are other devices on the bus which
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
* might be starved. If so, hit the request function. If we
while
(
!
list_empty
(
&
shost
->
starved_list
)
&&
* don't find any, then it is safe to reset the flag. If we
!
shost
->
host_blocked
&&
!
shost
->
host_self_blocked
&&
* find any device that it is starved, it isn't safe to reset the
!
((
shost
->
can_queue
>
0
)
&&
* flag as the queue function releases the lock and thus some
(
shost
->
host_busy
>=
shost
->
can_queue
)))
{
* other device might have become starved along the way.
/*
*/
* As long as shost is accepting commands and we have
all_clear
=
1
;
* starved queues, call __blk_run_queue. scsi_request_fn
if
(
shost
->
some_device_starved
)
{
* drops the queue_lock and can add us back to the
list_for_each_entry
(
sdev
,
&
shost
->
my_devices
,
siblings
)
{
* starved_list.
if
(
shost
->
can_queue
>
0
&&
*
shost
->
host_busy
>=
shost
->
can_queue
)
* host_lock protects the starved_list and starved_entry.
break
;
* scsi_request_fn must get the host_lock before checking
if
(
shost
->
host_blocked
||
shost
->
host_self_blocked
)
* or modifying starved_list or starved_entry.
break
;
*/
if
(
sdev
->
device_blocked
||
!
sdev
->
starved
)
sdev2
=
list_entry
(
shost
->
starved_list
.
next
,
continue
;
struct
scsi_device
,
starved_entry
);
__blk_run_queue
(
sdev
->
request_queue
);
list_del_init
(
&
sdev2
->
starved_entry
);
all_clear
=
0
;
spin_unlock_irqrestore
(
shost
->
host_lock
,
flags
);
}
spin_lock_irqsave
(
sdev2
->
request_queue
->
queue_lock
,
flags
);
__blk_run_queue
(
sdev2
->
request_queue
);
spin_unlock_irqrestore
(
sdev2
->
request_queue
->
queue_lock
,
flags
);
if
(
sdev
==
NULL
&&
all_clear
)
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
shost
->
some_device_starved
=
0
;
}
}
spin_unlock_irqrestore
(
shost
->
host_lock
,
flags
);
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
__blk_run_queue
(
q
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
...
@@ -470,8 +494,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
...
@@ -470,8 +494,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
struct
request
*
req
=
cmd
->
request
;
struct
request
*
req
=
cmd
->
request
;
unsigned
long
flags
;
unsigned
long
flags
;
ASSERT_LOCK
(
q
->
queue_lock
,
0
);
/*
/*
* If there are blocks left over at the end, set up the command
* If there are blocks left over at the end, set up the command
* to queue the remainder of them.
* to queue the remainder of them.
...
@@ -569,8 +591,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
...
@@ -569,8 +591,6 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
{
struct
request
*
req
=
cmd
->
request
;
struct
request
*
req
=
cmd
->
request
;
ASSERT_LOCK
(
cmd
->
device
->
host
->
host_lock
,
0
);
/*
/*
* Free up any indirection buffers we allocated for DMA purposes.
* Free up any indirection buffers we allocated for DMA purposes.
*/
*/
...
@@ -651,8 +671,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
...
@@ -651,8 +671,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, int good_sectors,
* would be used if we just wanted to retry, for example.
* would be used if we just wanted to retry, for example.
*
*
*/
*/
ASSERT_LOCK
(
q
->
queue_lock
,
0
);
/*
/*
* Free up any indirection buffers we allocated for DMA purposes.
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
* For the case of a READ, we need to copy the data out of the
...
@@ -923,22 +941,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
...
@@ -923,22 +941,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return
BLKPREP_KILL
;
return
BLKPREP_KILL
;
}
}
/*
* The target associated with myself can only handle one active command at
* a time. Scan through all of the luns on the same target as myself,
* return 1 if any are active.
*/
static
int
check_all_luns
(
struct
scsi_device
*
myself
)
{
struct
scsi_device
*
sdev
;
list_for_each_entry
(
sdev
,
&
myself
->
same_target_siblings
,
same_target_siblings
)
if
(
sdev
->
device_busy
)
return
1
;
return
0
;
}
static
int
scsi_prep_fn
(
struct
request_queue
*
q
,
struct
request
*
req
)
static
int
scsi_prep_fn
(
struct
request_queue
*
q
,
struct
request
*
req
)
{
{
struct
Scsi_Device_Template
*
sdt
;
struct
Scsi_Device_Template
*
sdt
;
...
@@ -1039,6 +1041,81 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
...
@@ -1039,6 +1041,81 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
return
BLKPREP_OK
;
return
BLKPREP_OK
;
}
}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
* return 0.
*
* Called with the queue_lock held.
*/
static
inline
int
scsi_dev_queue_ready
(
struct
request_queue
*
q
,
struct
scsi_device
*
sdev
)
{
if
(
sdev
->
device_busy
>=
sdev
->
queue_depth
)
return
0
;
if
(
sdev
->
device_busy
==
0
&&
sdev
->
device_blocked
)
{
/*
* unblock after device_blocked iterates to zero
*/
if
(
--
sdev
->
device_blocked
==
0
)
{
SCSI_LOG_MLQUEUE
(
3
,
printk
(
"scsi%d (%d:%d) unblocking device at"
" zero depth
\n
"
,
sdev
->
host
->
host_no
,
sdev
->
id
,
sdev
->
lun
));
}
else
{
blk_plug_device
(
q
);
return
0
;
}
}
if
(
sdev
->
device_blocked
)
return
0
;
return
1
;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0.
*
* Called with queue_lock and host_lock held.
*/
static
inline
int
scsi_host_queue_ready
(
struct
request_queue
*
q
,
struct
Scsi_Host
*
shost
,
struct
scsi_device
*
sdev
)
{
if
(
shost
->
in_recovery
)
return
0
;
if
(
shost
->
host_busy
==
0
&&
shost
->
host_blocked
)
{
/*
* unblock after host_blocked iterates to zero
*/
if
(
--
shost
->
host_blocked
==
0
)
{
SCSI_LOG_MLQUEUE
(
3
,
printk
(
"scsi%d unblocking host at zero depth
\n
"
,
shost
->
host_no
));
}
else
{
blk_plug_device
(
q
);
return
0
;
}
}
if
(
!
list_empty
(
&
sdev
->
starved_entry
))
return
0
;
if
((
shost
->
can_queue
>
0
&&
shost
->
host_busy
>=
shost
->
can_queue
)
||
shost
->
host_blocked
||
shost
->
host_self_blocked
)
{
SCSI_LOG_MLQUEUE
(
3
,
printk
(
"add starved dev <%d,%d,%d,%d>; host "
"limit %d, busy %d, blocked %d selfblocked %d
\n
"
,
sdev
->
host
->
host_no
,
sdev
->
channel
,
sdev
->
id
,
sdev
->
lun
,
shost
->
can_queue
,
shost
->
host_busy
,
shost
->
host_blocked
,
shost
->
host_self_blocked
));
list_add_tail
(
&
sdev
->
starved_entry
,
&
shost
->
starved_list
);
return
0
;
}
return
1
;
}
/*
/*
* Function: scsi_request_fn()
* Function: scsi_request_fn()
*
*
...
@@ -1056,74 +1133,26 @@ static void scsi_request_fn(request_queue_t *q)
...
@@ -1056,74 +1133,26 @@ static void scsi_request_fn(request_queue_t *q)
struct
Scsi_Host
*
shost
=
sdev
->
host
;
struct
Scsi_Host
*
shost
=
sdev
->
host
;
struct
scsi_cmnd
*
cmd
;
struct
scsi_cmnd
*
cmd
;
struct
request
*
req
;
struct
request
*
req
;
unsigned
int
flags
;
ASSERT_LOCK
(
q
->
queue_lock
,
1
);
/*
/*
* To start with, we keep looping until the queue is empty, or until
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
* the host is no longer able to accept any more requests.
*/
*/
for
(;;)
{
for
(;;)
{
/*
if
(
blk_queue_plugged
(
q
))
* Check this again - each time we loop through we will have
goto
completed
;
* released the lock and grabbed it again, so each time
* we need to check to see if the queue is plugged or not.
*/
if
(
shost
->
in_recovery
||
blk_queue_plugged
(
q
))
return
;
if
(
sdev
->
device_busy
>=
sdev
->
queue_depth
)
break
;
if
(
sdev
->
single_lun
&&
check_all_luns
(
sdev
))
if
(
!
scsi_dev_queue_ready
(
q
,
sdev
))
break
;
goto
completed
;
if
(
shost
->
host_busy
==
0
&&
shost
->
host_blocked
)
{
spin_lock_irqsave
(
shost
->
host_lock
,
flags
);
/* unblock after host_blocked iterates to zero */
if
(
!
scsi_host_queue_ready
(
q
,
shost
,
sdev
))
if
(
--
shost
->
host_blocked
==
0
)
{
goto
after_host_lock
;
SCSI_LOG_MLQUEUE
(
3
,
printk
(
"scsi%d unblocking host at zero depth
\n
"
,
shost
->
host_no
));
}
else
{
blk_plug_device
(
q
);
break
;
}
}
if
(
sdev
->
device_busy
==
0
&&
sdev
->
device_blocked
)
{
/* unblock after device_blocked iterates to zero */
if
(
--
sdev
->
device_blocked
==
0
)
{
SCSI_LOG_MLQUEUE
(
3
,
printk
(
"scsi%d (%d:%d) unblocking device at zero depth
\n
"
,
shost
->
host_no
,
sdev
->
id
,
sdev
->
lun
));
}
else
{
blk_plug_device
(
q
);
break
;
}
}
/*
if
(
sdev
->
single_lun
&&
!
sdev
->
device_busy
&&
* If the device cannot accept another request, then quit.
sdev
->
sdev_target
->
starget_busy
)
*/
goto
after_host_lock
;
if
(
sdev
->
device_blocked
)
break
;
if
((
shost
->
can_queue
>
0
&&
shost
->
host_busy
>=
shost
->
can_queue
)
||
shost
->
host_blocked
||
shost
->
host_self_blocked
)
{
/*
* If we are unable to process any commands at all for
* this device, then we consider it to be starved.
* What this means is that there are no outstanding
* commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy.
*/
if
(
sdev
->
device_busy
==
0
)
{
sdev
->
starved
=
1
;
shost
->
some_device_starved
=
1
;
}
break
;
}
else
sdev
->
starved
=
0
;
/*
/*
* get next queueable request. We do this early to make sure
* get next queueable request. We do this early to make sure
...
@@ -1137,9 +1166,9 @@ static void scsi_request_fn(request_queue_t *q)
...
@@ -1137,9 +1166,9 @@ static void scsi_request_fn(request_queue_t *q)
/* If the device is busy, a returning I/O
/* If the device is busy, a returning I/O
* will restart the queue. Otherwise, we have
* will restart the queue. Otherwise, we have
* to plug the queue */
* to plug the queue */
if
(
sdev
->
device_busy
==
0
)
if
(
sdev
->
device_busy
==
1
)
blk_plug_device
(
q
);
blk_plug_device
(
q
);
brea
k
;
goto
after_host_loc
k
;
}
}
cmd
=
req
->
special
;
cmd
=
req
->
special
;
...
@@ -1161,11 +1190,12 @@ static void scsi_request_fn(request_queue_t *q)
...
@@ -1161,11 +1190,12 @@ static void scsi_request_fn(request_queue_t *q)
if
(
!
(
blk_queue_tagged
(
q
)
&&
(
blk_queue_start_tag
(
q
,
req
)
==
0
)))
if
(
!
(
blk_queue_tagged
(
q
)
&&
(
blk_queue_start_tag
(
q
,
req
)
==
0
)))
blkdev_dequeue_request
(
req
);
blkdev_dequeue_request
(
req
);
/*
if
(
sdev
->
single_lun
)
* Now bump the usage count for both the host and the
sdev
->
sdev_target
->
starget_busy
=
1
;
* device.
*/
shost
->
host_busy
++
;
shost
->
host_busy
++
;
spin_unlock_irqrestore
(
shost
->
host_lock
,
flags
);
sdev
->
device_busy
++
;
sdev
->
device_busy
++
;
spin_unlock_irq
(
q
->
queue_lock
);
spin_unlock_irq
(
q
->
queue_lock
);
...
@@ -1186,6 +1216,11 @@ static void scsi_request_fn(request_queue_t *q)
...
@@ -1186,6 +1216,11 @@ static void scsi_request_fn(request_queue_t *q)
*/
*/
spin_lock_irq
(
q
->
queue_lock
);
spin_lock_irq
(
q
->
queue_lock
);
}
}
completed:
return
;
after_host_lock:
spin_unlock_irqrestore
(
shost
->
host_lock
,
flags
);
}
}
u64
scsi_calculate_bounce_limit
(
struct
Scsi_Host
*
shost
)
u64
scsi_calculate_bounce_limit
(
struct
Scsi_Host
*
shost
)
...
@@ -1207,15 +1242,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
...
@@ -1207,15 +1242,20 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return
BLK_BOUNCE_HIGH
;
return
BLK_BOUNCE_HIGH
;
}
}
request_queue_t
*
scsi_alloc_queue
(
struct
Scsi_Host
*
shost
)
request_queue_t
*
scsi_alloc_queue
(
struct
scsi_device
*
sdev
)
{
{
request_queue_t
*
q
;
request_queue_t
*
q
;
struct
Scsi_Host
*
shost
;
q
=
kmalloc
(
sizeof
(
*
q
),
GFP_ATOMIC
);
q
=
kmalloc
(
sizeof
(
*
q
),
GFP_ATOMIC
);
if
(
!
q
)
if
(
!
q
)
return
NULL
;
return
NULL
;
memset
(
q
,
0
,
sizeof
(
*
q
));
memset
(
q
,
0
,
sizeof
(
*
q
));
/*
* XXX move host code to scsi_register
*/
shost
=
sdev
->
host
;
if
(
!
shost
->
max_sectors
)
{
if
(
!
shost
->
max_sectors
)
{
/*
/*
* Driver imposes no hard sector transfer limit.
* Driver imposes no hard sector transfer limit.
...
@@ -1224,7 +1264,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
...
@@ -1224,7 +1264,7 @@ request_queue_t *scsi_alloc_queue(struct Scsi_Host *shost)
shost
->
max_sectors
=
SCSI_DEFAULT_MAX_SECTORS
;
shost
->
max_sectors
=
SCSI_DEFAULT_MAX_SECTORS
;
}
}
blk_init_queue
(
q
,
scsi_request_fn
,
shost
->
host
_lock
);
blk_init_queue
(
q
,
scsi_request_fn
,
&
sdev
->
sdev
_lock
);
blk_queue_prep_rq
(
q
,
scsi_prep_fn
);
blk_queue_prep_rq
(
q
,
scsi_prep_fn
);
blk_queue_max_hw_segments
(
q
,
shost
->
sg_tablesize
);
blk_queue_max_hw_segments
(
q
,
shost
->
sg_tablesize
);
...
...
drivers/scsi/scsi_scan.c
View file @
99d60d5d
...
@@ -387,7 +387,7 @@ static void print_inquiry(unsigned char *inq_result)
...
@@ -387,7 +387,7 @@ static void print_inquiry(unsigned char *inq_result)
* Scsi_Device pointer, or NULL on failure.
* Scsi_Device pointer, or NULL on failure.
**/
**/
static
struct
scsi_device
*
scsi_alloc_sdev
(
struct
Scsi_Host
*
shost
,
static
struct
scsi_device
*
scsi_alloc_sdev
(
struct
Scsi_Host
*
shost
,
struct
request_queue
**
q
,
uint
channel
,
uint
id
,
uint
lun
)
uint
channel
,
uint
id
,
uint
lun
)
{
{
struct
scsi_device
*
sdev
,
*
device
;
struct
scsi_device
*
sdev
,
*
device
;
...
@@ -407,6 +407,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
...
@@ -407,6 +407,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
INIT_LIST_HEAD
(
&
sdev
->
siblings
);
INIT_LIST_HEAD
(
&
sdev
->
siblings
);
INIT_LIST_HEAD
(
&
sdev
->
same_target_siblings
);
INIT_LIST_HEAD
(
&
sdev
->
same_target_siblings
);
INIT_LIST_HEAD
(
&
sdev
->
cmd_list
);
INIT_LIST_HEAD
(
&
sdev
->
cmd_list
);
INIT_LIST_HEAD
(
&
sdev
->
starved_entry
);
spin_lock_init
(
&
sdev
->
list_lock
);
spin_lock_init
(
&
sdev
->
list_lock
);
/*
/*
...
@@ -421,14 +422,10 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
...
@@ -421,14 +422,10 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
*/
*/
sdev
->
borken
=
1
;
sdev
->
borken
=
1
;
if
(
!
q
||
*
q
==
NULL
)
{
spin_lock_init
(
&
sdev
->
sdev_lock
);
sdev
->
request_queue
=
scsi_alloc_queue
(
shost
);
sdev
->
request_queue
=
scsi_alloc_queue
(
sdev
);
if
(
!
sdev
->
request_queue
)
if
(
!
sdev
->
request_queue
)
goto
out_free_dev
;
goto
out_free_dev
;
}
else
{
sdev
->
request_queue
=
*
q
;
*
q
=
NULL
;
}
sdev
->
request_queue
->
queuedata
=
sdev
;
sdev
->
request_queue
->
queuedata
=
sdev
;
scsi_adjust_queue_depth
(
sdev
,
0
,
sdev
->
host
->
cmd_per_lun
);
scsi_adjust_queue_depth
(
sdev
,
0
,
sdev
->
host
->
cmd_per_lun
);
...
@@ -468,10 +465,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
...
@@ -468,10 +465,7 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
return
sdev
;
return
sdev
;
out_free_queue:
out_free_queue:
if
(
q
&&
sdev
->
request_queue
)
{
if
(
sdev
->
request_queue
)
*
q
=
sdev
->
request_queue
;
sdev
->
request_queue
=
NULL
;
}
else
if
(
sdev
->
request_queue
)
scsi_free_queue
(
sdev
->
request_queue
);
scsi_free_queue
(
sdev
->
request_queue
);
out_free_dev:
out_free_dev:
...
@@ -491,6 +485,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
...
@@ -491,6 +485,8 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
**/
**/
static
void
scsi_free_sdev
(
struct
scsi_device
*
sdev
)
static
void
scsi_free_sdev
(
struct
scsi_device
*
sdev
)
{
{
unsigned
int
flags
;
list_del
(
&
sdev
->
siblings
);
list_del
(
&
sdev
->
siblings
);
list_del
(
&
sdev
->
same_target_siblings
);
list_del
(
&
sdev
->
same_target_siblings
);
...
@@ -500,6 +496,14 @@ static void scsi_free_sdev(struct scsi_device *sdev)
...
@@ -500,6 +496,14 @@ static void scsi_free_sdev(struct scsi_device *sdev)
sdev
->
host
->
hostt
->
slave_destroy
(
sdev
);
sdev
->
host
->
hostt
->
slave_destroy
(
sdev
);
if
(
sdev
->
inquiry
)
if
(
sdev
->
inquiry
)
kfree
(
sdev
->
inquiry
);
kfree
(
sdev
->
inquiry
);
if
(
sdev
->
single_lun
)
{
spin_lock_irqsave
(
sdev
->
host
->
host_lock
,
flags
);
sdev
->
sdev_target
->
starget_refcnt
--
;
if
(
sdev
->
sdev_target
->
starget_refcnt
==
0
)
kfree
(
sdev
->
sdev_target
);
spin_unlock_irqrestore
(
sdev
->
host
->
host_lock
,
flags
);
}
kfree
(
sdev
);
kfree
(
sdev
);
}
}
...
@@ -1135,6 +1139,10 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result,
...
@@ -1135,6 +1139,10 @@ static void scsi_probe_lun(Scsi_Request *sreq, char *inq_result,
static
int
scsi_add_lun
(
Scsi_Device
*
sdev
,
Scsi_Request
*
sreq
,
static
int
scsi_add_lun
(
Scsi_Device
*
sdev
,
Scsi_Request
*
sreq
,
char
*
inq_result
,
int
*
bflags
)
char
*
inq_result
,
int
*
bflags
)
{
{
struct
scsi_device
*
sdev_sibling
;
struct
scsi_target
*
starget
;
unsigned
int
flags
;
/*
/*
* XXX do not save the inquiry, since it can change underneath us,
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
* save just vendor/model/rev.
...
@@ -1256,10 +1264,38 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
...
@@ -1256,10 +1264,38 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
/*
/*
* If we need to allow I/O to only one of the luns attached to
* If we need to allow I/O to only one of the luns attached to
* this target id at a time, then we set this flag.
* this target id at a time set single_lun, and allocate or modify
* sdev_target.
*/
*/
if
(
*
bflags
&
BLIST_SINGLELUN
)
if
(
*
bflags
&
BLIST_SINGLELUN
)
{
sdev
->
single_lun
=
1
;
sdev
->
single_lun
=
1
;
spin_lock_irqsave
(
sdev
->
host
->
host_lock
,
flags
);
starget
=
NULL
;
/*
* Search for an existing target for this sdev.
*/
list_for_each_entry
(
sdev_sibling
,
&
sdev
->
same_target_siblings
,
same_target_siblings
)
{
if
(
sdev_sibling
->
sdev_target
!=
NULL
)
{
starget
=
sdev_sibling
->
sdev_target
;
break
;
}
}
if
(
!
starget
)
{
starget
=
kmalloc
(
sizeof
(
*
starget
),
GFP_KERNEL
);
if
(
!
starget
)
{
printk
(
ALLOC_FAILURE_MSG
,
__FUNCTION__
);
spin_unlock_irqrestore
(
sdev
->
host
->
host_lock
,
flags
);
return
SCSI_SCAN_NO_RESPONSE
;
}
starget
->
starget_refcnt
=
0
;
starget
->
starget_busy
=
0
;
}
starget
->
starget_refcnt
++
;
sdev
->
sdev_target
=
starget
;
spin_unlock_irqrestore
(
sdev
->
host
->
host_lock
,
flags
);
}
/* if the device needs this changing, it may do so in the detect
/* if the device needs this changing, it may do so in the detect
* function */
* function */
...
@@ -1288,15 +1324,15 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
...
@@ -1288,15 +1324,15 @@ static int scsi_add_lun(Scsi_Device *sdev, Scsi_Request *sreq,
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
* SCSI_SCAN_LUN_PRESENT: a new Scsi_Device was allocated and initialized
**/
**/
static
int
scsi_probe_and_add_lun
(
struct
Scsi_Host
*
host
,
static
int
scsi_probe_and_add_lun
(
struct
Scsi_Host
*
host
,
struct
request_queue
**
q
,
uint
channel
,
uint
id
,
uint
lun
,
uint
channel
,
uint
id
,
uint
lun
,
int
*
bflagsp
,
int
*
bflagsp
,
struct
scsi_device
**
sdevp
)
struct
scsi_device
**
sdevp
)
{
{
struct
scsi_device
*
sdev
;
struct
scsi_device
*
sdev
;
struct
scsi_request
*
sreq
;
struct
scsi_request
*
sreq
;
unsigned
char
*
result
;
unsigned
char
*
result
;
int
bflags
,
res
=
SCSI_SCAN_NO_RESPONSE
;
int
bflags
,
res
=
SCSI_SCAN_NO_RESPONSE
;
sdev
=
scsi_alloc_sdev
(
host
,
q
,
channel
,
id
,
lun
);
sdev
=
scsi_alloc_sdev
(
host
,
channel
,
id
,
lun
);
if
(
!
sdev
)
if
(
!
sdev
)
goto
out
;
goto
out
;
sreq
=
scsi_allocate_request
(
sdev
);
sreq
=
scsi_allocate_request
(
sdev
);
...
@@ -1350,13 +1386,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
...
@@ -1350,13 +1386,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
{
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
{
if
(
sdevp
)
if
(
sdevp
)
*
sdevp
=
sdev
;
*
sdevp
=
sdev
;
}
else
{
}
else
if
(
q
)
{
*
q
=
sdev
->
request_queue
;
sdev
->
request_queue
=
NULL
;
}
scsi_free_sdev
(
sdev
);
scsi_free_sdev
(
sdev
);
}
out:
out:
return
res
;
return
res
;
}
}
...
@@ -1374,9 +1405,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
...
@@ -1374,9 +1405,8 @@ static int scsi_probe_and_add_lun(struct Scsi_Host *host,
*
*
* Modifies sdevscan->lun.
* Modifies sdevscan->lun.
**/
**/
static
void
scsi_sequential_lun_scan
(
struct
Scsi_Host
*
shost
,
static
void
scsi_sequential_lun_scan
(
struct
Scsi_Host
*
shost
,
uint
channel
,
struct
request_queue
**
q
,
uint
channel
,
uint
id
,
uint
id
,
int
bflags
,
int
lun0_res
,
int
scsi_level
)
int
bflags
,
int
lun0_res
,
int
scsi_level
)
{
{
unsigned
int
sparse_lun
,
lun
,
max_dev_lun
;
unsigned
int
sparse_lun
,
lun
,
max_dev_lun
;
...
@@ -1444,7 +1474,7 @@ static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
...
@@ -1444,7 +1474,7 @@ static void scsi_sequential_lun_scan(struct Scsi_Host *shost,
* sparse_lun.
* sparse_lun.
*/
*/
for
(
lun
=
1
;
lun
<
max_dev_lun
;
++
lun
)
for
(
lun
=
1
;
lun
<
max_dev_lun
;
++
lun
)
if
((
scsi_probe_and_add_lun
(
shost
,
q
,
channel
,
id
,
lun
,
if
((
scsi_probe_and_add_lun
(
shost
,
channel
,
id
,
lun
,
NULL
,
NULL
)
!=
SCSI_SCAN_LUN_PRESENT
)
&&
!
sparse_lun
)
NULL
,
NULL
)
!=
SCSI_SCAN_LUN_PRESENT
)
&&
!
sparse_lun
)
return
;
return
;
}
}
...
@@ -1497,8 +1527,7 @@ static int scsilun_to_int(ScsiLun *scsilun)
...
@@ -1497,8 +1527,7 @@ static int scsilun_to_int(ScsiLun *scsilun)
* 0: scan completed (or no memory, so further scanning is futile)
* 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured
* 1: no report lun scan, or not configured
**/
**/
static
int
scsi_report_lun_scan
(
Scsi_Device
*
sdev
,
struct
request_queue
**
q
,
static
int
scsi_report_lun_scan
(
Scsi_Device
*
sdev
,
int
bflags
)
int
bflags
)
{
{
#ifdef CONFIG_SCSI_REPORT_LUNS
#ifdef CONFIG_SCSI_REPORT_LUNS
...
@@ -1659,8 +1688,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
...
@@ -1659,8 +1688,8 @@ static int scsi_report_lun_scan(Scsi_Device *sdev, struct request_queue **q,
}
else
{
}
else
{
int
res
;
int
res
;
res
=
scsi_probe_and_add_lun
(
sdev
->
host
,
q
,
res
=
scsi_probe_and_add_lun
(
sdev
->
host
,
sdev
->
channel
,
sdev
->
channel
,
sdev
->
id
,
lun
,
NULL
,
NULL
);
sdev
->
id
,
lun
,
NULL
,
NULL
);
if
(
res
==
SCSI_SCAN_NO_RESPONSE
)
{
if
(
res
==
SCSI_SCAN_NO_RESPONSE
)
{
/*
/*
* Got some results, but now none, abort.
* Got some results, but now none, abort.
...
@@ -1688,8 +1717,7 @@ struct scsi_device *scsi_add_device(struct Scsi_Host *shost,
...
@@ -1688,8 +1717,7 @@ struct scsi_device *scsi_add_device(struct Scsi_Host *shost,
struct
scsi_device
*
sdev
;
struct
scsi_device
*
sdev
;
int
error
=
-
ENODEV
,
res
;
int
error
=
-
ENODEV
,
res
;
res
=
scsi_probe_and_add_lun
(
shost
,
NULL
,
channel
,
id
,
lun
,
res
=
scsi_probe_and_add_lun
(
shost
,
channel
,
id
,
lun
,
NULL
,
&
sdev
);
NULL
,
&
sdev
);
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
error
=
scsi_attach_device
(
sdev
);
error
=
scsi_attach_device
(
sdev
);
...
@@ -1730,8 +1758,8 @@ int scsi_remove_device(struct scsi_device *sdev)
...
@@ -1730,8 +1758,8 @@ int scsi_remove_device(struct scsi_device *sdev)
* First try a REPORT LUN scan, if that does not scan the target, do a
* First try a REPORT LUN scan, if that does not scan the target, do a
* sequential scan of LUNs on the target id.
* sequential scan of LUNs on the target id.
**/
**/
static
void
scsi_scan_target
(
struct
Scsi_Host
*
shost
,
struct
request_queue
**
q
,
static
void
scsi_scan_target
(
struct
Scsi_Host
*
shost
,
unsigned
int
channel
,
unsigned
int
channel
,
unsigned
int
id
)
unsigned
int
id
)
{
{
int
bflags
=
0
;
int
bflags
=
0
;
int
res
;
int
res
;
...
@@ -1747,14 +1775,14 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
...
@@ -1747,14 +1775,14 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* Scan LUN 0, if there is some response, scan further. Ideally, we
* Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned.
* would not configure LUN 0 until all LUNs are scanned.
*/
*/
res
=
scsi_probe_and_add_lun
(
shost
,
q
,
channel
,
id
,
0
,
&
bflags
,
&
sdev
);
res
=
scsi_probe_and_add_lun
(
shost
,
channel
,
id
,
0
,
&
bflags
,
&
sdev
);
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
{
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
{
if
(
scsi_report_lun_scan
(
sdev
,
q
,
bflags
)
!=
0
)
if
(
scsi_report_lun_scan
(
sdev
,
bflags
)
!=
0
)
/*
/*
* The REPORT LUN did not scan the target,
* The REPORT LUN did not scan the target,
* do a sequential scan.
* do a sequential scan.
*/
*/
scsi_sequential_lun_scan
(
shost
,
q
,
channel
,
id
,
bflags
,
scsi_sequential_lun_scan
(
shost
,
channel
,
id
,
bflags
,
res
,
sdev
->
scsi_level
);
res
,
sdev
->
scsi_level
);
}
else
if
(
res
==
SCSI_SCAN_TARGET_PRESENT
)
{
}
else
if
(
res
==
SCSI_SCAN_TARGET_PRESENT
)
{
/*
/*
...
@@ -1763,7 +1791,7 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
...
@@ -1763,7 +1791,7 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
* sequential lun scan with a bflags of SPARSELUN and
* sequential lun scan with a bflags of SPARSELUN and
* a default scsi level of SCSI_2
* a default scsi level of SCSI_2
*/
*/
scsi_sequential_lun_scan
(
shost
,
q
,
channel
,
id
,
BLIST_SPARSELUN
,
scsi_sequential_lun_scan
(
shost
,
channel
,
id
,
BLIST_SPARSELUN
,
SCSI_SCAN_TARGET_PRESENT
,
SCSI_2
);
SCSI_SCAN_TARGET_PRESENT
,
SCSI_2
);
}
}
}
}
...
@@ -1778,7 +1806,6 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
...
@@ -1778,7 +1806,6 @@ static void scsi_scan_target(struct Scsi_Host *shost, struct request_queue **q,
**/
**/
void
scsi_scan_host
(
struct
Scsi_Host
*
shost
)
void
scsi_scan_host
(
struct
Scsi_Host
*
shost
)
{
{
struct
request_queue
*
q
=
NULL
;
uint
channel
,
id
,
order_id
;
uint
channel
,
id
,
order_id
;
/*
/*
...
@@ -1803,12 +1830,9 @@ void scsi_scan_host(struct Scsi_Host *shost)
...
@@ -1803,12 +1830,9 @@ void scsi_scan_host(struct Scsi_Host *shost)
order_id
=
shost
->
max_id
-
id
-
1
;
order_id
=
shost
->
max_id
-
id
-
1
;
else
else
order_id
=
id
;
order_id
=
id
;
scsi_scan_target
(
shost
,
&
q
,
channel
,
order_id
);
scsi_scan_target
(
shost
,
channel
,
order_id
);
}
}
}
}
if
(
q
)
scsi_free_queue
(
q
);
}
}
void
scsi_forget_host
(
struct
Scsi_Host
*
shost
)
void
scsi_forget_host
(
struct
Scsi_Host
*
shost
)
...
@@ -1847,7 +1871,7 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
...
@@ -1847,7 +1871,7 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
{
{
struct
scsi_device
*
sdev
;
struct
scsi_device
*
sdev
;
sdev
=
scsi_alloc_sdev
(
shost
,
NULL
,
0
,
shost
->
this_id
,
0
);
sdev
=
scsi_alloc_sdev
(
shost
,
0
,
shost
->
this_id
,
0
);
if
(
sdev
)
{
if
(
sdev
)
{
sdev
->
borken
=
0
;
sdev
->
borken
=
0
;
}
}
...
...
drivers/scsi/scsi_syms.c
View file @
99d60d5d
...
@@ -60,6 +60,8 @@ EXPORT_SYMBOL(scsi_allocate_request);
...
@@ -60,6 +60,8 @@ EXPORT_SYMBOL(scsi_allocate_request);
EXPORT_SYMBOL
(
scsi_release_request
);
EXPORT_SYMBOL
(
scsi_release_request
);
EXPORT_SYMBOL
(
scsi_wait_req
);
EXPORT_SYMBOL
(
scsi_wait_req
);
EXPORT_SYMBOL
(
scsi_do_req
);
EXPORT_SYMBOL
(
scsi_do_req
);
EXPORT_SYMBOL
(
scsi_get_command
);
EXPORT_SYMBOL
(
scsi_put_command
);
EXPORT_SYMBOL
(
scsi_report_bus_reset
);
EXPORT_SYMBOL
(
scsi_report_bus_reset
);
EXPORT_SYMBOL
(
scsi_block_requests
);
EXPORT_SYMBOL
(
scsi_block_requests
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment