mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-15 05:11:32 +00:00
blkcg: consolidate blkg creation in blkcg_bio_issue_check()
blkg (blkcg_gq) currently is created by blkcg policies invoking blkg_lookup_create() which ends up repeating about the same code in different policies. Theoretically, this can avoid the overhead of looking and/or creating blkg's if blkcg is enabled but no policy is in use; however, the cost of blkg lookup / creation is very low especially if only the root blkcg is in use which is highly likely if no blkcg policy is in active use - it boils down to a single very predictable conditional and surrounding RCU protection. This patch consolidates blkg creation to a new function blkcg_bio_issue_check() which is called during bio issue from generic_make_request_checks(). blkcg_bio_issue_check() is now the only function which tries to create missing blkg's. The subsequent policy and request_list operations just perform blkg_lookup() and if missing falls back to the root. * blk_get_rl() no longer tries to create blkg. It uses blkg_lookup() instead of blkg_lookup_create(). * blk_throtl_bio() is now called from blkcg_bio_issue_check() with rcu read locked and blkg already looked up. Both throtl_lookup_tg() and throtl_lookup_create_tg() are dropped. * cfq is similarly updated. cfq_lookup_create_cfqg() is replaced with cfq_lookup_cfqg()which uses blkg_lookup(). This consolidates blkg handling and avoids unnecessary blkg creation retries under memory pressure. In addition, this provides a common bio entry point into blkcg where things like common accounting can be performed. v2: Build fixes for !CONFIG_CFQ_GROUP_IOSCHED and !CONFIG_BLK_DEV_THROTTLING. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Arianna Avanzini <avanzini.arianna@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
c9589f03e4
commit
ae11889636
@ -153,6 +153,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
|||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If @new_blkg is %NULL, this function tries to allocate a new one as
|
* If @new_blkg is %NULL, this function tries to allocate a new one as
|
||||||
@ -295,7 +296,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
|||||||
return blkg;
|
return blkg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkg_lookup_create);
|
|
||||||
|
|
||||||
static void blkg_destroy(struct blkcg_gq *blkg)
|
static void blkg_destroy(struct blkcg_gq *blkg)
|
||||||
{
|
{
|
||||||
|
@ -1889,8 +1889,8 @@ generic_make_request_checks(struct bio *bio)
|
|||||||
*/
|
*/
|
||||||
create_io_context(GFP_ATOMIC, q->node);
|
create_io_context(GFP_ATOMIC, q->node);
|
||||||
|
|
||||||
if (blk_throtl_bio(q, bio))
|
if (!blkcg_bio_issue_check(q, bio))
|
||||||
return false; /* throttled, will be resubmitted later */
|
return false;
|
||||||
|
|
||||||
trace_block_bio_queue(q, bio);
|
trace_block_bio_queue(q, bio);
|
||||||
return true;
|
return true;
|
||||||
|
@ -182,11 +182,6 @@ static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
|
|||||||
return pd_to_blkg(&tg->pd);
|
return pd_to_blkg(&tg->pd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
|
|
||||||
{
|
|
||||||
return blkg_to_tg(td->queue->root_blkg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sq_to_tg - return the throl_grp the specified service queue belongs to
|
* sq_to_tg - return the throl_grp the specified service queue belongs to
|
||||||
* @sq: the throtl_service_queue of interest
|
* @sq: the throtl_service_queue of interest
|
||||||
@ -449,39 +444,6 @@ static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
|
|
||||||
struct blkcg *blkcg)
|
|
||||||
{
|
|
||||||
return blkg_to_tg(blkg_lookup(blkcg, td->queue));
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
|
|
||||||
struct blkcg *blkcg)
|
|
||||||
{
|
|
||||||
struct request_queue *q = td->queue;
|
|
||||||
struct throtl_grp *tg = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the common case when there are no blkcgs. Avoid lookup
|
|
||||||
* in this case
|
|
||||||
*/
|
|
||||||
if (blkcg == &blkcg_root) {
|
|
||||||
tg = td_root_tg(td);
|
|
||||||
} else {
|
|
||||||
struct blkcg_gq *blkg;
|
|
||||||
|
|
||||||
blkg = blkg_lookup_create(blkcg, q);
|
|
||||||
|
|
||||||
/* if %NULL and @q is alive, fall back to root_tg */
|
|
||||||
if (!IS_ERR(blkg))
|
|
||||||
tg = blkg_to_tg(blkg);
|
|
||||||
else
|
|
||||||
tg = td_root_tg(td);
|
|
||||||
}
|
|
||||||
|
|
||||||
return tg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct throtl_grp *
|
static struct throtl_grp *
|
||||||
throtl_rb_first(struct throtl_service_queue *parent_sq)
|
throtl_rb_first(struct throtl_service_queue *parent_sq)
|
||||||
{
|
{
|
||||||
@ -1403,46 +1365,26 @@ static struct blkcg_policy blkcg_policy_throtl = {
|
|||||||
.pd_reset_stats_fn = throtl_pd_reset_stats,
|
.pd_reset_stats_fn = throtl_pd_reset_stats,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
|
||||||
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = q->td;
|
|
||||||
struct throtl_qnode *qn = NULL;
|
struct throtl_qnode *qn = NULL;
|
||||||
struct throtl_grp *tg;
|
struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
|
||||||
struct throtl_service_queue *sq;
|
struct throtl_service_queue *sq;
|
||||||
bool rw = bio_data_dir(bio);
|
bool rw = bio_data_dir(bio);
|
||||||
struct blkcg *blkcg;
|
|
||||||
bool throttled = false;
|
bool throttled = false;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||||
|
|
||||||
/* see throtl_charge_bio() */
|
/* see throtl_charge_bio() */
|
||||||
if (bio->bi_rw & REQ_THROTTLED)
|
if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* A throtl_grp pointer retrieved under rcu can be used to access
|
|
||||||
* basic fields like stats and io rates. If a group has no rules,
|
|
||||||
* just update the dispatch stats in lockless manner and return.
|
|
||||||
*/
|
|
||||||
rcu_read_lock();
|
|
||||||
blkcg = bio_blkcg(bio);
|
|
||||||
tg = throtl_lookup_tg(td, blkcg);
|
|
||||||
if (tg) {
|
|
||||||
if (!tg->has_rules[rw]) {
|
|
||||||
throtl_update_dispatch_stats(tg_to_blkg(tg),
|
|
||||||
bio->bi_iter.bi_size, bio->bi_rw);
|
|
||||||
goto out_unlock_rcu;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Either group has not been allocated yet or it is not an unlimited
|
|
||||||
* IO group
|
|
||||||
*/
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (unlikely(blk_queue_bypass(q)))
|
if (unlikely(blk_queue_bypass(q)))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
tg = throtl_lookup_create_tg(td, blkcg);
|
|
||||||
sq = &tg->service_queue;
|
sq = &tg->service_queue;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -1507,8 +1449,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
|||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
out_unlock_rcu:
|
|
||||||
rcu_read_unlock();
|
|
||||||
out:
|
out:
|
||||||
/*
|
/*
|
||||||
* As multiple blk-throtls may stack in the same issue path, we
|
* As multiple blk-throtls may stack in the same issue path, we
|
||||||
|
@ -266,15 +266,10 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
|
|||||||
* Internal throttling interface
|
* Internal throttling interface
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||||
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
|
|
||||||
extern void blk_throtl_drain(struct request_queue *q);
|
extern void blk_throtl_drain(struct request_queue *q);
|
||||||
extern int blk_throtl_init(struct request_queue *q);
|
extern int blk_throtl_init(struct request_queue *q);
|
||||||
extern void blk_throtl_exit(struct request_queue *q);
|
extern void blk_throtl_exit(struct request_queue *q);
|
||||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||||
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
static inline void blk_throtl_drain(struct request_queue *q) { }
|
static inline void blk_throtl_drain(struct request_queue *q) { }
|
||||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||||
static inline void blk_throtl_exit(struct request_queue *q) { }
|
static inline void blk_throtl_exit(struct request_queue *q) { }
|
||||||
|
@ -1683,28 +1683,15 @@ static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
|
|||||||
cfqg_stats_reset(&cfqg->dead_stats);
|
cfqg_stats_reset(&cfqg->dead_stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
|
||||||
* Search for the cfq group current task belongs to. request_queue lock must
|
struct blkcg *blkcg)
|
||||||
* be held.
|
|
||||||
*/
|
|
||||||
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
|
|
||||||
struct blkcg *blkcg)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = cfqd->queue;
|
struct blkcg_gq *blkg;
|
||||||
struct cfq_group *cfqg = NULL;
|
|
||||||
|
|
||||||
/* avoid lookup for the common case where there's no blkcg */
|
blkg = blkg_lookup(blkcg, cfqd->queue);
|
||||||
if (blkcg == &blkcg_root) {
|
if (likely(blkg))
|
||||||
cfqg = cfqd->root_group;
|
return blkg_to_cfqg(blkg);
|
||||||
} else {
|
return NULL;
|
||||||
struct blkcg_gq *blkg;
|
|
||||||
|
|
||||||
blkg = blkg_lookup_create(blkcg, q);
|
|
||||||
if (!IS_ERR(blkg))
|
|
||||||
cfqg = blkg_to_cfqg(blkg);
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfqg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
||||||
@ -2108,8 +2095,8 @@ static struct cftype cfq_blkcg_files[] = {
|
|||||||
{ } /* terminate */
|
{ } /* terminate */
|
||||||
};
|
};
|
||||||
#else /* GROUP_IOSCHED */
|
#else /* GROUP_IOSCHED */
|
||||||
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
|
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
|
||||||
struct blkcg *blkcg)
|
struct blkcg *blkcg)
|
||||||
{
|
{
|
||||||
return cfqd->root_group;
|
return cfqd->root_group;
|
||||||
}
|
}
|
||||||
@ -3716,7 +3703,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|||||||
struct cfq_group *cfqg;
|
struct cfq_group *cfqg;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cfqg = cfq_lookup_create_cfqg(cfqd, bio_blkcg(bio));
|
cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
|
||||||
if (!cfqg) {
|
if (!cfqg) {
|
||||||
cfqq = &cfqd->oom_cfqq;
|
cfqq = &cfqd->oom_cfqq;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -421,8 +421,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
|
|||||||
* or if either the blkcg or queue is going away. Fall back to
|
* or if either the blkcg or queue is going away. Fall back to
|
||||||
* root_rl in such cases.
|
* root_rl in such cases.
|
||||||
*/
|
*/
|
||||||
blkg = blkg_lookup_create(blkcg, q);
|
blkg = blkg_lookup(blkcg, q);
|
||||||
if (unlikely(IS_ERR(blkg)))
|
if (unlikely(!blkg))
|
||||||
goto root_rl;
|
goto root_rl;
|
||||||
|
|
||||||
blkg_get(blkg);
|
blkg_get(blkg);
|
||||||
@ -636,6 +636,39 @@ static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
|
|||||||
u64_stats_update_end(&to->syncp);
|
u64_stats_update_end(&to->syncp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||||
|
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
|
||||||
|
struct bio *bio);
|
||||||
|
#else
|
||||||
|
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
|
||||||
|
struct bio *bio) { return false; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||||
|
struct bio *bio)
|
||||||
|
{
|
||||||
|
struct blkcg *blkcg;
|
||||||
|
struct blkcg_gq *blkg;
|
||||||
|
bool throtl = false;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
blkcg = bio_blkcg(bio);
|
||||||
|
|
||||||
|
blkg = blkg_lookup(blkcg, q);
|
||||||
|
if (unlikely(!blkg)) {
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
blkg = blkg_lookup_create(blkcg, q);
|
||||||
|
if (IS_ERR(blkg))
|
||||||
|
blkg = NULL;
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
throtl = blk_throtl_bio(q, blkg, bio);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
return !throtl;
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_BLK_CGROUP */
|
#else /* CONFIG_BLK_CGROUP */
|
||||||
|
|
||||||
struct blkcg {
|
struct blkcg {
|
||||||
@ -689,6 +722,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
|
|||||||
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
|
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
|
||||||
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
|
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
|
||||||
|
|
||||||
|
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||||
|
struct bio *bio) { return true; }
|
||||||
|
|
||||||
#define blk_queue_for_each_rl(rl, q) \
|
#define blk_queue_for_each_rl(rl, q) \
|
||||||
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
|
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user