Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
b88ac404
Commit
b88ac404
authored
May 26, 2003
by
Jens Axboe
Committed by
Linus Torvalds
May 26, 2003
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] blk layer tag resize
This allows drivers to resize their tag depth at run-time.
parent
dccd87ac
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
83 additions
and
24 deletions
+83
-24
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+80
-23
include/linux/blkdev.h
include/linux/blkdev.h
+3
-1
No files found.
drivers/block/ll_rw_blk.c
View file @
b88ac404
...
...
@@ -413,11 +413,12 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
if
(
unlikely
(
bqt
==
NULL
||
bqt
->
max_depth
<
tag
))
if
(
unlikely
(
bqt
==
NULL
||
tag
>=
bqt
->
real_max_depth
))
return
NULL
;
return
bqt
->
tag_index
[
tag
];
}
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
...
...
@@ -448,39 +449,28 @@ void blk_queue_free_tags(request_queue_t *q)
q
->
queue_flags
&=
~
(
1
<<
QUEUE_FLAG_QUEUED
);
}
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int
blk_queue_init_tags
(
request_queue_t
*
q
,
int
depth
)
static
int
init_tag_map
(
struct
blk_queue_tag
*
tags
,
int
depth
)
{
struct
blk_queue_tag
*
tags
;
int
bits
,
i
;
if
(
depth
>
(
queue_nr_requests
*
2
))
{
depth
=
(
queue_nr_requests
*
2
);
printk
(
"blk_queue_init_tags: adjusted depth to %d
\n
"
,
depth
);
printk
(
KERN_ERR
"%s: adjusted depth to %d
\n
"
,
__FUNCTION__
,
depth
);
}
tags
=
kmalloc
(
sizeof
(
struct
blk_queue_tag
),
GFP_ATOMIC
);
if
(
!
tags
)
goto
fail
;
tags
->
tag_index
=
kmalloc
(
depth
*
sizeof
(
struct
request
*
),
GFP_ATOMIC
);
if
(
!
tags
->
tag_index
)
goto
fail
_index
;
goto
fail
;
bits
=
(
depth
/
BLK_TAGS_PER_LONG
)
+
1
;
tags
->
tag_map
=
kmalloc
(
bits
*
sizeof
(
unsigned
long
),
GFP_ATOMIC
);
if
(
!
tags
->
tag_map
)
goto
fail
_map
;
goto
fail
;
memset
(
tags
->
tag_index
,
0
,
depth
*
sizeof
(
struct
request
*
));
memset
(
tags
->
tag_map
,
0
,
bits
*
sizeof
(
unsigned
long
));
INIT_LIST_HEAD
(
&
tags
->
busy_list
);
tags
->
busy
=
0
;
tags
->
max_depth
=
depth
;
tags
->
real_max_depth
=
bits
*
BITS_PER_LONG
;
/*
* set the upper bits if the depth isn't a multiple of the word size
...
...
@@ -488,21 +478,88 @@ int blk_queue_init_tags(request_queue_t *q, int depth)
for
(
i
=
depth
;
i
<
bits
*
BLK_TAGS_PER_LONG
;
i
++
)
__set_bit
(
i
,
tags
->
tag_map
);
return
0
;
fail:
kfree
(
tags
->
tag_index
);
return
-
ENOMEM
;
}
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int
blk_queue_init_tags
(
request_queue_t
*
q
,
int
depth
)
{
struct
blk_queue_tag
*
tags
;
tags
=
kmalloc
(
sizeof
(
struct
blk_queue_tag
),
GFP_ATOMIC
);
if
(
!
tags
)
goto
fail
;
if
(
init_tag_map
(
tags
,
depth
))
goto
fail
;
INIT_LIST_HEAD
(
&
tags
->
busy_list
);
tags
->
busy
=
0
;
/*
* assign it, all done
*/
q
->
queue_tags
=
tags
;
q
->
queue_flags
|=
(
1
<<
QUEUE_FLAG_QUEUED
);
return
0
;
fail_map:
kfree
(
tags
->
tag_index
);
fail_index:
kfree
(
tags
);
fail:
kfree
(
tags
);
return
-
ENOMEM
;
}
/**
* blk_queue_resize_tags - change the queueing depth
* @q: the request queue for the device
* @new_depth: the new max command queueing depth
*
* Notes:
* Must be called with the queue lock held.
**/
int
blk_queue_resize_tags
(
request_queue_t
*
q
,
int
new_depth
)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
struct
request
**
tag_index
;
unsigned
long
*
tag_map
;
int
bits
,
max_depth
;
if
(
!
bqt
)
return
-
ENXIO
;
/*
* don't bother sizing down
*/
if
(
new_depth
<=
bqt
->
real_max_depth
)
{
bqt
->
max_depth
=
new_depth
;
return
0
;
}
/*
* save the old state info, so we can copy it back
*/
tag_index
=
bqt
->
tag_index
;
tag_map
=
bqt
->
tag_map
;
max_depth
=
bqt
->
real_max_depth
;
if
(
init_tag_map
(
bqt
,
new_depth
))
return
-
ENOMEM
;
memcpy
(
bqt
->
tag_index
,
tag_index
,
max_depth
*
sizeof
(
struct
request
*
));
bits
=
max_depth
/
BLK_TAGS_PER_LONG
;
memcpy
(
bqt
->
tag_map
,
bqt
->
tag_map
,
bits
*
sizeof
(
unsigned
long
));
kfree
(
tag_index
);
kfree
(
tag_map
);
return
0
;
}
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
...
...
@@ -524,7 +581,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
BUG_ON
(
tag
==
-
1
);
if
(
unlikely
(
tag
>=
bqt
->
max_depth
))
if
(
unlikely
(
tag
>=
bqt
->
real_
max_depth
))
return
;
if
(
unlikely
(
!
__test_and_clear_bit
(
tag
,
bqt
->
tag_map
)))
{
...
...
include/linux/blkdev.h
View file @
b88ac404
...
...
@@ -179,7 +179,8 @@ struct blk_queue_tag {
unsigned
long
*
tag_map
;
/* bit map of free/busy tags */
struct
list_head
busy_list
;
/* fifo list of busy tags */
int
busy
;
/* current depth */
int
max_depth
;
int
max_depth
;
/* what we will send to device */
int
real_max_depth
;
/* what the array can hold */
};
struct
request_queue
...
...
@@ -452,6 +453,7 @@ extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern
void
blk_queue_end_tag
(
request_queue_t
*
,
struct
request
*
);
extern
int
blk_queue_init_tags
(
request_queue_t
*
,
int
);
extern
void
blk_queue_free_tags
(
request_queue_t
*
);
extern
int
blk_queue_resize_tags
(
request_queue_t
*
,
int
);
extern
void
blk_queue_invalidate_tags
(
request_queue_t
*
);
extern
void
blk_congestion_wait
(
int
rw
,
long
timeout
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment