Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
8d76d101
Commit
8d76d101
authored
Nov 17, 2014
by
Jens Axboe
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-3.19/core' into for-3.19/drivers
parents
e805b983
7c7f2f2b
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
53 additions
and
22 deletions
+53
-22
Documentation/block/biodoc.txt
Documentation/block/biodoc.txt
+5
-1
block/blk-mq.c
block/blk-mq.c
+25
-14
fs/fs-writeback.c
fs/fs-writeback.c
+22
-7
include/linux/blk-mq.h
include/linux/blk-mq.h
+1
-0
No files found.
Documentation/block/biodoc.txt
View file @
8d76d101
...
...
@@ -946,7 +946,11 @@ elevator_allow_merge_fn called whenever the block layer determines
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
this hook allows it to do that.
this hook allows it to do that. Note however
that two *requests* can still be merged at later
time. Currently the io scheduler has no way to
prevent that. It can only learn about the fact
from elevator_merge_req_fn callback.
elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
...
...
block/blk-mq.c
View file @
8d76d101
...
...
@@ -269,17 +269,25 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
blk_mq_queue_exit
(
q
);
}
void
blk_mq_free_
request
(
struct
request
*
rq
)
void
blk_mq_free_
hctx_request
(
struct
blk_mq_hw_ctx
*
hctx
,
struct
request
*
rq
)
{
struct
blk_mq_ctx
*
ctx
=
rq
->
mq_ctx
;
struct
blk_mq_hw_ctx
*
hctx
;
struct
request_queue
*
q
=
rq
->
q
;
ctx
->
rq_completed
[
rq_is_sync
(
rq
)]
++
;
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
ctx
->
cpu
);
__blk_mq_free_request
(
hctx
,
ctx
,
rq
);
}
EXPORT_SYMBOL_GPL
(
blk_mq_free_hctx_request
);
void
blk_mq_free_request
(
struct
request
*
rq
)
{
struct
blk_mq_hw_ctx
*
hctx
;
struct
request_queue
*
q
=
rq
->
q
;
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
rq
->
mq_ctx
->
cpu
);
blk_mq_free_hctx_request
(
hctx
,
rq
);
}
EXPORT_SYMBOL_GPL
(
blk_mq_free_request
);
inline
void
__blk_mq_end_request
(
struct
request
*
rq
,
int
error
)
{
...
...
@@ -801,9 +809,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if
(
unlikely
(
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
)))
return
;
if
(
!
async
&&
cpumask_test_cpu
(
smp_processor_id
(),
hctx
->
cpumask
))
if
(
!
async
)
{
int
cpu
=
get_cpu
();
if
(
cpumask_test_cpu
(
cpu
,
hctx
->
cpumask
))
{
__blk_mq_run_hw_queue
(
hctx
);
else
if
(
hctx
->
queue
->
nr_hw_queues
==
1
)
put_cpu
();
return
;
}
put_cpu
();
}
if
(
hctx
->
queue
->
nr_hw_queues
==
1
)
kblockd_schedule_delayed_work
(
&
hctx
->
run_work
,
0
);
else
{
unsigned
int
cpu
;
...
...
@@ -824,9 +841,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
))
continue
;
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
async
);
preempt_enable
();
}
}
EXPORT_SYMBOL
(
blk_mq_run_queues
);
...
...
@@ -853,9 +868,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
);
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
false
);
preempt_enable
();
}
EXPORT_SYMBOL
(
blk_mq_start_hw_queue
);
...
...
@@ -880,9 +893,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue
;
clear_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
);
preempt_disable
();
blk_mq_run_hw_queue
(
hctx
,
async
);
preempt_enable
();
}
}
EXPORT_SYMBOL
(
blk_mq_start_stopped_hw_queues
);
...
...
fs/fs-writeback.c
View file @
8d76d101
...
...
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* write_inode()
*/
spin_lock
(
&
inode
->
i_lock
);
/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
if
(
!
mapping_tagged
(
mapping
,
PAGECACHE_TAG_DIRTY
))
inode
->
i_state
&=
~
I_DIRTY_PAGES
;
dirty
=
inode
->
i_state
&
I_DIRTY
;
inode
->
i_state
&=
~
(
I_DIRTY_SYNC
|
I_DIRTY_DATASYNC
);
inode
->
i_state
&=
~
I_DIRTY
;
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
* __mark_inode_dirty() to test i_state without grabbing i_lock -
* either they see the I_DIRTY bits cleared or we see the dirtied
* inode.
*
* I_DIRTY_PAGES is always cleared together above even if @mapping
* still has dirty pages. The flag is reinstated after smp_mb() if
* necessary. This guarantees that either __mark_inode_dirty()
* sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
*/
smp_mb
();
if
(
mapping_tagged
(
mapping
,
PAGECACHE_TAG_DIRTY
))
inode
->
i_state
|=
I_DIRTY_PAGES
;
spin_unlock
(
&
inode
->
i_lock
);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if
(
dirty
&
(
I_DIRTY_SYNC
|
I_DIRTY_DATASYNC
))
{
int
err
=
write_inode
(
inode
,
wbc
);
...
...
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
/*
*
make sure that changes are seen by all cpus before we test i_stat
e
*
-- mikulas
*
Paired with smp_mb() in __writeback_single_inode() for th
e
*
following lockless i_state test. See there for details.
*/
smp_mb
();
/* avoid the locking if we can */
if
((
inode
->
i_state
&
flags
)
==
flags
)
return
;
...
...
include/linux/blk-mq.h
View file @
8d76d101
...
...
@@ -169,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void
blk_mq_insert_request
(
struct
request
*
,
bool
,
bool
,
bool
);
void
blk_mq_run_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_free_request
(
struct
request
*
rq
);
void
blk_mq_free_hctx_request
(
struct
blk_mq_hw_ctx
*
,
struct
request
*
rq
);
bool
blk_mq_can_queue
(
struct
blk_mq_hw_ctx
*
);
struct
request
*
blk_mq_alloc_request
(
struct
request_queue
*
q
,
int
rw
,
gfp_t
gfp
,
bool
reserved
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment