Commit 28fc591f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move the bio cgroup associatation helpers to blk-cgroup.c

Keep the cgroup code together.
Acked-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a18b9b15
...@@ -1627,81 +1627,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) ...@@ -1627,81 +1627,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
} }
EXPORT_SYMBOL(bioset_init_from_src); EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
/**
* bio_associate_blkg_from_css - associate a bio with a specified css
* @bio: target bio
* @css: target css
*
* Associate @bio with the blkg found by combining the css's blkg and the
* request_queue of the @bio. An association failure is handled by walking up
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
* and q->root_blkg. This situation only happens when a cgroup is dying and
* then the remaining bios will spill to the closest alive blkg.
*
* A reference will be taken on the blkg and will be released when @bio is
* freed.
*/
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
struct request_queue *q = bio->bi_disk->queue;
struct blkcg_gq *blkg = q->root_blkg;
if (bio->bi_blkg)
blkg_put(bio->bi_blkg);
rcu_read_lock();
if (css && css->parent)
blkg = blkg_lookup_create(css_to_blkcg(css), q);
bio->bi_blkg = blkg_tryget_closest(blkg);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
/**
* bio_associate_blkg - associate a bio with a blkg
* @bio: target bio
*
* Associate @bio with the blkg found from the bio's css and request_queue.
* If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
* already associated, the css is reused and association redone as the
* request_queue may have changed.
*/
void bio_associate_blkg(struct bio *bio)
{
struct cgroup_subsys_state *css;
rcu_read_lock();
if (bio->bi_blkg)
css = &bio_blkcg(bio)->css;
else
css = blkcg_css();
bio_associate_blkg_from_css(bio, css);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(bio_associate_blkg);
/**
* bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
if (src->bi_blkg) {
if (dst->bi_blkg)
blkg_put(dst->bi_blkg);
blkg_get(src->bi_blkg);
dst->bi_blkg = src->bi_blkg;
}
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void) static void __init biovec_init_slabs(void)
{ {
int i; int i;
......
...@@ -328,7 +328,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, ...@@ -328,7 +328,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
* Returns the blkg or the closest blkg if blkg_create() fails as it walks * Returns the blkg or the closest blkg if blkg_create() fails as it walks
* down from root. * down from root.
*/ */
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q) struct request_queue *q)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
...@@ -377,7 +377,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, ...@@ -377,7 +377,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
* This looks up or creates the blkg representing the unique pair * This looks up or creates the blkg representing the unique pair
* of the blkcg and the request_queue. * of the blkcg and the request_queue.
*/ */
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q) struct request_queue *q)
{ {
struct blkcg_gq *blkg = blkg_lookup(blkcg, q); struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
...@@ -1727,6 +1727,105 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) ...@@ -1727,6 +1727,105 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
atomic64_add(delta, &blkg->delay_nsec); atomic64_add(delta, &blkg->delay_nsec);
} }
/**
* blkg_tryget_closest - try and get a blkg ref on the closet blkg
* @blkg: blkg to get
*
* This needs to be called rcu protected. As the failure mode here is to walk
* up the blkg tree, this ensure that the blkg->parent pointers are always
* valid. This returns the blkg that it ended up taking a reference on or %NULL
* if no reference was taken.
*/
static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
{
struct blkcg_gq *ret_blkg = NULL;
WARN_ON_ONCE(!rcu_read_lock_held());
while (blkg) {
if (blkg_tryget(blkg)) {
ret_blkg = blkg;
break;
}
blkg = blkg->parent;
}
return ret_blkg;
}
/**
* bio_associate_blkg_from_css - associate a bio with a specified css
* @bio: target bio
* @css: target css
*
* Associate @bio with the blkg found by combining the css's blkg and the
* request_queue of the @bio. An association failure is handled by walking up
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
* and q->root_blkg. This situation only happens when a cgroup is dying and
* then the remaining bios will spill to the closest alive blkg.
*
* A reference will be taken on the blkg and will be released when @bio is
* freed.
*/
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
struct request_queue *q = bio->bi_disk->queue;
struct blkcg_gq *blkg = q->root_blkg;
if (bio->bi_blkg)
blkg_put(bio->bi_blkg);
rcu_read_lock();
if (css && css->parent)
blkg = blkg_lookup_create(css_to_blkcg(css), q);
bio->bi_blkg = blkg_tryget_closest(blkg);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
/**
* bio_associate_blkg - associate a bio with a blkg
* @bio: target bio
*
* Associate @bio with the blkg found from the bio's css and request_queue.
* If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
* already associated, the css is reused and association redone as the
* request_queue may have changed.
*/
void bio_associate_blkg(struct bio *bio)
{
struct cgroup_subsys_state *css;
rcu_read_lock();
if (bio->bi_blkg)
css = &bio_blkcg(bio)->css;
else
css = blkcg_css();
bio_associate_blkg_from_css(bio, css);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(bio_associate_blkg);
/**
* bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
if (src->bi_blkg) {
if (dst->bi_blkg)
blkg_put(dst->bi_blkg);
blkg_get(src->bi_blkg);
dst->bi_blkg = src->bi_blkg;
}
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
static int __init blkcg_init(void) static int __init blkcg_init(void)
{ {
blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
......
...@@ -183,10 +183,6 @@ extern bool blkcg_debug_stats; ...@@ -183,10 +183,6 @@ extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint); struct request_queue *q, bool update_hint);
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q); int blkcg_init_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q);
...@@ -480,32 +476,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg) ...@@ -480,32 +476,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg)
return blkg && percpu_ref_tryget(&blkg->refcnt); return blkg && percpu_ref_tryget(&blkg->refcnt);
} }
/**
* blkg_tryget_closest - try and get a blkg ref on the closet blkg
* @blkg: blkg to get
*
* This needs to be called rcu protected. As the failure mode here is to walk
* up the blkg tree, this ensure that the blkg->parent pointers are always
* valid. This returns the blkg that it ended up taking a reference on or %NULL
* if no reference was taken.
*/
static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
{
struct blkcg_gq *ret_blkg = NULL;
WARN_ON_ONCE(!rcu_read_lock_held());
while (blkg) {
if (blkg_tryget(blkg)) {
ret_blkg = blkg;
break;
}
blkg = blkg->parent;
}
return ret_blkg;
}
/** /**
* blkg_put - put a blkg reference * blkg_put - put a blkg reference
* @blkg: blkg to put * @blkg: blkg to put
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment