mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-03 05:02:09 +00:00
Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block
Merge block/IO core bits from Jens Axboe: "This is a bit bigger on the core side than usual, but that is purely because we decided to hold off on parts of Tejun's submission on 3.4 to give it a bit more time to simmer. As a consequence, it's seen a long cycle in for-next. It contains: - Bug fix from Dan, wrong locking type. - Relax splice gifting restriction from Eric. - A ton of updates from Tejun, primarily for blkcg. This improves the code a lot, making the API nicer and cleaner, and also includes fixes for how we handle and tie policies and re-activate on switches. The changes also include generic bug fixes. - A simple fix from Vivek, along with a fix for doing proper delayed allocation of the blkcg stats." Fix up annoying conflict just due to different merge resolution in Documentation/feature-removal-schedule.txt * 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits) blkcg: tg_stats_alloc_lock is an irq lock vmsplice: relax alignement requirements for SPLICE_F_GIFT blkcg: use radix tree to index blkgs from blkcg blkcg: fix blkcg->css ref leak in __blkg_lookup_create() block: fix elvpriv allocation failure handling block: collapse blk_alloc_request() into get_request() blkcg: collapse blkcg_policy_ops into blkcg_policy blkcg: embed struct blkg_policy_data in policy specific data blkcg: mass rename of blkcg API blkcg: style cleanups for blk-cgroup.h blkcg: remove blkio_group->path[] blkcg: blkg_rwstat_read() was missing inline blkcg: shoot down blkgs if all policies are deactivated blkcg: drop stuff unused after per-queue policy activation update blkcg: implement per-queue policy activation blkcg: add request_queue->root_blkg blkcg: make request_queue bypassing on allocation blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing blkcg: make blkg_conf_prep() take @pol and return with queue lock held blkcg: remove static policy ID enums ...
This commit is contained in:
commit
0d167518e0
@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
|
||||
|
||||
config IOSCHED_CFQ
|
||||
tristate "CFQ I/O scheduler"
|
||||
# If BLK_CGROUP is a module, CFQ has to be built as module.
|
||||
depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
|
||||
default y
|
||||
---help---
|
||||
The CFQ I/O scheduler tries to distribute bandwidth equally
|
||||
@ -34,8 +32,6 @@ config IOSCHED_CFQ
|
||||
|
||||
This is the default I/O scheduler.
|
||||
|
||||
Note: If BLK_CGROUP=m, then CFQ can be built only as module.
|
||||
|
||||
config CFQ_GROUP_IOSCHED
|
||||
bool "CFQ Group Scheduling support"
|
||||
depends on IOSCHED_CFQ && BLK_CGROUP
|
||||
|
2248
block/blk-cgroup.c
2248
block/blk-cgroup.c
File diff suppressed because it is too large
Load Diff
@ -15,350 +15,371 @@
|
||||
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
enum blkio_policy_id {
|
||||
BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
|
||||
BLKIO_POLICY_THROTL, /* Throttling */
|
||||
};
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
/* Max limits for throttle policy */
|
||||
#define THROTL_IOPS_MAX UINT_MAX
|
||||
|
||||
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
|
||||
/* CFQ specific, out here for blkcg->cfq_weight */
|
||||
#define CFQ_WEIGHT_MIN 10
|
||||
#define CFQ_WEIGHT_MAX 1000
|
||||
#define CFQ_WEIGHT_DEFAULT 500
|
||||
|
||||
#ifndef CONFIG_BLK_CGROUP
|
||||
/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
|
||||
extern struct cgroup_subsys blkio_subsys;
|
||||
#define blkio_subsys_id blkio_subsys.subsys_id
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
||||
enum stat_type {
|
||||
/* Total time spent (in ns) between request dispatch to the driver and
|
||||
* request completion for IOs doen by this cgroup. This may not be
|
||||
* accurate when NCQ is turned on. */
|
||||
BLKIO_STAT_SERVICE_TIME = 0,
|
||||
/* Total time spent waiting in scheduler queue in ns */
|
||||
BLKIO_STAT_WAIT_TIME,
|
||||
/* Number of IOs queued up */
|
||||
BLKIO_STAT_QUEUED,
|
||||
/* All the single valued stats go below this */
|
||||
BLKIO_STAT_TIME,
|
||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||
/* Time not charged to this cgroup */
|
||||
BLKIO_STAT_UNACCOUNTED_TIME,
|
||||
BLKIO_STAT_AVG_QUEUE_SIZE,
|
||||
BLKIO_STAT_IDLE_TIME,
|
||||
BLKIO_STAT_EMPTY_TIME,
|
||||
BLKIO_STAT_GROUP_WAIT_TIME,
|
||||
BLKIO_STAT_DEQUEUE
|
||||
#endif
|
||||
enum blkg_rwstat_type {
|
||||
BLKG_RWSTAT_READ,
|
||||
BLKG_RWSTAT_WRITE,
|
||||
BLKG_RWSTAT_SYNC,
|
||||
BLKG_RWSTAT_ASYNC,
|
||||
|
||||
BLKG_RWSTAT_NR,
|
||||
BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
|
||||
};
|
||||
|
||||
/* Per cpu stats */
|
||||
enum stat_type_cpu {
|
||||
BLKIO_STAT_CPU_SECTORS,
|
||||
/* Total bytes transferred */
|
||||
BLKIO_STAT_CPU_SERVICE_BYTES,
|
||||
/* Total IOs serviced, post merge */
|
||||
BLKIO_STAT_CPU_SERVICED,
|
||||
/* Number of IOs merged */
|
||||
BLKIO_STAT_CPU_MERGED,
|
||||
BLKIO_STAT_CPU_NR
|
||||
struct blkcg_gq;
|
||||
|
||||
struct blkcg {
|
||||
struct cgroup_subsys_state css;
|
||||
spinlock_t lock;
|
||||
|
||||
struct radix_tree_root blkg_tree;
|
||||
struct blkcg_gq *blkg_hint;
|
||||
struct hlist_head blkg_list;
|
||||
|
||||
/* for policies to test whether associated blkcg has changed */
|
||||
uint64_t id;
|
||||
|
||||
/* TODO: per-policy storage in blkcg */
|
||||
unsigned int cfq_weight; /* belongs to cfq */
|
||||
};
|
||||
|
||||
enum stat_sub_type {
|
||||
BLKIO_STAT_READ = 0,
|
||||
BLKIO_STAT_WRITE,
|
||||
BLKIO_STAT_SYNC,
|
||||
BLKIO_STAT_ASYNC,
|
||||
BLKIO_STAT_TOTAL
|
||||
struct blkg_stat {
|
||||
struct u64_stats_sync syncp;
|
||||
uint64_t cnt;
|
||||
};
|
||||
|
||||
/* blkg state flags */
|
||||
enum blkg_state_flags {
|
||||
BLKG_waiting = 0,
|
||||
BLKG_idling,
|
||||
BLKG_empty,
|
||||
struct blkg_rwstat {
|
||||
struct u64_stats_sync syncp;
|
||||
uint64_t cnt[BLKG_RWSTAT_NR];
|
||||
};
|
||||
|
||||
/* cgroup files owned by proportional weight policy */
|
||||
enum blkcg_file_name_prop {
|
||||
BLKIO_PROP_weight = 1,
|
||||
BLKIO_PROP_weight_device,
|
||||
BLKIO_PROP_io_service_bytes,
|
||||
BLKIO_PROP_io_serviced,
|
||||
BLKIO_PROP_time,
|
||||
BLKIO_PROP_sectors,
|
||||
BLKIO_PROP_unaccounted_time,
|
||||
BLKIO_PROP_io_service_time,
|
||||
BLKIO_PROP_io_wait_time,
|
||||
BLKIO_PROP_io_merged,
|
||||
BLKIO_PROP_io_queued,
|
||||
BLKIO_PROP_avg_queue_size,
|
||||
BLKIO_PROP_group_wait_time,
|
||||
BLKIO_PROP_idle_time,
|
||||
BLKIO_PROP_empty_time,
|
||||
BLKIO_PROP_dequeue,
|
||||
/*
|
||||
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
|
||||
* request_queue (q). This is used by blkcg policies which need to track
|
||||
* information per blkcg - q pair.
|
||||
*
|
||||
* There can be multiple active blkcg policies and each has its private
|
||||
* data on each blkg, the size of which is determined by
|
||||
* blkcg_policy->pd_size. blkcg core allocates and frees such areas
|
||||
* together with blkg and invokes pd_init/exit_fn() methods.
|
||||
*
|
||||
* Such private data must embed struct blkg_policy_data (pd) at the
|
||||
* beginning and pd_size can't be smaller than pd.
|
||||
*/
|
||||
struct blkg_policy_data {
|
||||
/* the blkg this per-policy data belongs to */
|
||||
struct blkcg_gq *blkg;
|
||||
|
||||
/* used during policy activation */
|
||||
struct list_head alloc_node;
|
||||
};
|
||||
|
||||
/* cgroup files owned by throttle policy */
|
||||
enum blkcg_file_name_throtl {
|
||||
BLKIO_THROTL_read_bps_device,
|
||||
BLKIO_THROTL_write_bps_device,
|
||||
BLKIO_THROTL_read_iops_device,
|
||||
BLKIO_THROTL_write_iops_device,
|
||||
BLKIO_THROTL_io_service_bytes,
|
||||
BLKIO_THROTL_io_serviced,
|
||||
/* association between a blk cgroup and a request queue */
|
||||
struct blkcg_gq {
|
||||
/* Pointer to the associated request_queue */
|
||||
struct request_queue *q;
|
||||
struct list_head q_node;
|
||||
struct hlist_node blkcg_node;
|
||||
struct blkcg *blkcg;
|
||||
/* reference count */
|
||||
int refcnt;
|
||||
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct blkio_cgroup {
|
||||
struct cgroup_subsys_state css;
|
||||
unsigned int weight;
|
||||
spinlock_t lock;
|
||||
struct hlist_head blkg_list;
|
||||
struct list_head policy_list; /* list of blkio_policy_node */
|
||||
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
|
||||
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
|
||||
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
|
||||
|
||||
struct blkcg_policy {
|
||||
int plid;
|
||||
/* policy specific private data size */
|
||||
size_t pd_size;
|
||||
/* cgroup files for the policy */
|
||||
struct cftype *cftypes;
|
||||
|
||||
/* operations */
|
||||
blkcg_pol_init_pd_fn *pd_init_fn;
|
||||
blkcg_pol_exit_pd_fn *pd_exit_fn;
|
||||
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
|
||||
};
|
||||
|
||||
struct blkio_group_stats {
|
||||
/* total disk time and nr sectors dispatched by this group */
|
||||
uint64_t time;
|
||||
uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
|
||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||
/* Time not charged to this cgroup */
|
||||
uint64_t unaccounted_time;
|
||||
extern struct blkcg blkcg_root;
|
||||
|
||||
/* Sum of number of IOs queued across all samples */
|
||||
uint64_t avg_queue_size_sum;
|
||||
/* Count of samples taken for average */
|
||||
uint64_t avg_queue_size_samples;
|
||||
/* How many times this group has been removed from service tree */
|
||||
unsigned long dequeue;
|
||||
|
||||
/* Total time spent waiting for it to be assigned a timeslice. */
|
||||
uint64_t group_wait_time;
|
||||
uint64_t start_group_wait_time;
|
||||
|
||||
/* Time spent idling for this blkio_group */
|
||||
uint64_t idle_time;
|
||||
uint64_t start_idle_time;
|
||||
/*
|
||||
* Total time when we have requests queued and do not contain the
|
||||
* current active queue.
|
||||
*/
|
||||
uint64_t empty_time;
|
||||
uint64_t start_empty_time;
|
||||
uint16_t flags;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Per cpu blkio group stats */
|
||||
struct blkio_group_stats_cpu {
|
||||
uint64_t sectors;
|
||||
uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
struct blkio_group {
|
||||
/* An rcu protected unique identifier for the group */
|
||||
void *key;
|
||||
struct hlist_node blkcg_node;
|
||||
unsigned short blkcg_id;
|
||||
/* Store cgroup path */
|
||||
char path[128];
|
||||
/* The device MKDEV(major, minor), this group has been created for */
|
||||
dev_t dev;
|
||||
/* policy which owns this blk group */
|
||||
enum blkio_policy_id plid;
|
||||
|
||||
/* Need to serialize the stats in the case of reset/update */
|
||||
spinlock_t stats_lock;
|
||||
struct blkio_group_stats stats;
|
||||
/* Per cpu stats pointer */
|
||||
struct blkio_group_stats_cpu __percpu *stats_cpu;
|
||||
};
|
||||
|
||||
struct blkio_policy_node {
|
||||
struct list_head node;
|
||||
dev_t dev;
|
||||
/* This node belongs to max bw policy or porportional weight policy */
|
||||
enum blkio_policy_id plid;
|
||||
/* cgroup file to which this rule belongs to */
|
||||
int fileid;
|
||||
|
||||
union {
|
||||
unsigned int weight;
|
||||
/*
|
||||
* Rate read/write in terms of bytes per second
|
||||
* Whether this rate represents read or write is determined
|
||||
* by file type "fileid".
|
||||
*/
|
||||
u64 bps;
|
||||
unsigned int iops;
|
||||
} val;
|
||||
};
|
||||
|
||||
extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
|
||||
dev_t dev);
|
||||
extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
|
||||
dev_t dev);
|
||||
extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
|
||||
dev_t dev);
|
||||
extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
|
||||
dev_t dev);
|
||||
extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
|
||||
dev_t dev);
|
||||
|
||||
typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
|
||||
|
||||
typedef void (blkio_update_group_weight_fn) (void *key,
|
||||
struct blkio_group *blkg, unsigned int weight);
|
||||
typedef void (blkio_update_group_read_bps_fn) (void * key,
|
||||
struct blkio_group *blkg, u64 read_bps);
|
||||
typedef void (blkio_update_group_write_bps_fn) (void *key,
|
||||
struct blkio_group *blkg, u64 write_bps);
|
||||
typedef void (blkio_update_group_read_iops_fn) (void *key,
|
||||
struct blkio_group *blkg, unsigned int read_iops);
|
||||
typedef void (blkio_update_group_write_iops_fn) (void *key,
|
||||
struct blkio_group *blkg, unsigned int write_iops);
|
||||
|
||||
struct blkio_policy_ops {
|
||||
blkio_unlink_group_fn *blkio_unlink_group_fn;
|
||||
blkio_update_group_weight_fn *blkio_update_group_weight_fn;
|
||||
blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
|
||||
blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
|
||||
blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
|
||||
blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
|
||||
};
|
||||
|
||||
struct blkio_policy_type {
|
||||
struct list_head list;
|
||||
struct blkio_policy_ops ops;
|
||||
enum blkio_policy_id plid;
|
||||
};
|
||||
struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
|
||||
struct blkcg *bio_blkcg(struct bio *bio);
|
||||
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
|
||||
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
||||
struct request_queue *q);
|
||||
int blkcg_init_queue(struct request_queue *q);
|
||||
void blkcg_drain_queue(struct request_queue *q);
|
||||
void blkcg_exit_queue(struct request_queue *q);
|
||||
|
||||
/* Blkio controller policy registration */
|
||||
extern void blkio_policy_register(struct blkio_policy_type *);
|
||||
extern void blkio_policy_unregister(struct blkio_policy_type *);
|
||||
int blkcg_policy_register(struct blkcg_policy *pol);
|
||||
void blkcg_policy_unregister(struct blkcg_policy *pol);
|
||||
int blkcg_activate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol);
|
||||
void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol);
|
||||
|
||||
static inline char *blkg_path(struct blkio_group *blkg)
|
||||
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
||||
u64 (*prfill)(struct seq_file *,
|
||||
struct blkg_policy_data *, int),
|
||||
const struct blkcg_policy *pol, int data,
|
||||
bool show_total);
|
||||
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
|
||||
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
const struct blkg_rwstat *rwstat);
|
||||
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
|
||||
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
int off);
|
||||
|
||||
struct blkg_conf_ctx {
|
||||
struct gendisk *disk;
|
||||
struct blkcg_gq *blkg;
|
||||
u64 v;
|
||||
};
|
||||
|
||||
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
const char *input, struct blkg_conf_ctx *ctx);
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
||||
|
||||
|
||||
/**
|
||||
* blkg_to_pdata - get policy private data
|
||||
* @blkg: blkg of interest
|
||||
* @pol: policy of interest
|
||||
*
|
||||
* Return pointer to private data associated with the @blkg-@pol pair.
|
||||
*/
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol)
|
||||
{
|
||||
return blkg->path;
|
||||
return blkg ? blkg->pd[pol->plid] : NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
/**
|
||||
* pdata_to_blkg - get blkg associated with policy private data
|
||||
* @pd: policy private data of interest
|
||||
*
|
||||
* @pd is policy private data. Determine the blkg it's associated with.
|
||||
*/
|
||||
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
|
||||
{
|
||||
return pd ? pd->blkg : NULL;
|
||||
}
|
||||
|
||||
struct blkio_group {
|
||||
};
|
||||
/**
|
||||
* blkg_path - format cgroup path of blkg
|
||||
* @blkg: blkg of interest
|
||||
* @buf: target buffer
|
||||
* @buflen: target buffer length
|
||||
*
|
||||
* Format the path of the cgroup of @blkg into @buf.
|
||||
*/
|
||||
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct blkio_policy_type {
|
||||
};
|
||||
rcu_read_lock();
|
||||
ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
strncpy(buf, "<unavailable>", buflen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
|
||||
static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
|
||||
/**
|
||||
* blkg_get - get a blkg reference
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* The caller should be holding queue_lock and an existing reference.
|
||||
*/
|
||||
static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
lockdep_assert_held(blkg->q->queue_lock);
|
||||
WARN_ON_ONCE(!blkg->refcnt);
|
||||
blkg->refcnt++;
|
||||
}
|
||||
|
||||
static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
|
||||
void __blkg_release(struct blkcg_gq *blkg);
|
||||
|
||||
#endif
|
||||
/**
|
||||
* blkg_put - put a blkg reference
|
||||
* @blkg: blkg to put
|
||||
*
|
||||
* The caller should be holding queue_lock.
|
||||
*/
|
||||
static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
{
|
||||
lockdep_assert_held(blkg->q->queue_lock);
|
||||
WARN_ON_ONCE(blkg->refcnt <= 0);
|
||||
if (!--blkg->refcnt)
|
||||
__blkg_release(blkg);
|
||||
}
|
||||
|
||||
#define BLKIO_WEIGHT_MIN 10
|
||||
#define BLKIO_WEIGHT_MAX 1000
|
||||
#define BLKIO_WEIGHT_DEFAULT 500
|
||||
/**
|
||||
* blkg_stat_add - add a value to a blkg_stat
|
||||
* @stat: target blkg_stat
|
||||
* @val: value to add
|
||||
*
|
||||
* Add @val to @stat. The caller is responsible for synchronizing calls to
|
||||
* this function.
|
||||
*/
|
||||
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
|
||||
{
|
||||
u64_stats_update_begin(&stat->syncp);
|
||||
stat->cnt += val;
|
||||
u64_stats_update_end(&stat->syncp);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||
void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
|
||||
void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue);
|
||||
void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
|
||||
void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
|
||||
void blkiocg_set_start_empty_time(struct blkio_group *blkg);
|
||||
/**
|
||||
* blkg_stat_read - read the current value of a blkg_stat
|
||||
* @stat: blkg_stat to read
|
||||
*
|
||||
* Read the current value of @stat. This function can be called without
|
||||
* synchroniztion and takes care of u64 atomicity.
|
||||
*/
|
||||
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
|
||||
{
|
||||
unsigned int start;
|
||||
uint64_t v;
|
||||
|
||||
#define BLKG_FLAG_FNS(name) \
|
||||
static inline void blkio_mark_blkg_##name( \
|
||||
struct blkio_group_stats *stats) \
|
||||
{ \
|
||||
stats->flags |= (1 << BLKG_##name); \
|
||||
} \
|
||||
static inline void blkio_clear_blkg_##name( \
|
||||
struct blkio_group_stats *stats) \
|
||||
{ \
|
||||
stats->flags &= ~(1 << BLKG_##name); \
|
||||
} \
|
||||
static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
|
||||
{ \
|
||||
return (stats->flags & (1 << BLKG_##name)) != 0; \
|
||||
} \
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&stat->syncp);
|
||||
v = stat->cnt;
|
||||
} while (u64_stats_fetch_retry(&stat->syncp, start));
|
||||
|
||||
BLKG_FLAG_FNS(waiting)
|
||||
BLKG_FLAG_FNS(idling)
|
||||
BLKG_FLAG_FNS(empty)
|
||||
#undef BLKG_FLAG_FNS
|
||||
#else
|
||||
static inline void blkiocg_update_avg_queue_size_stats(
|
||||
struct blkio_group *blkg) {}
|
||||
static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue) {}
|
||||
static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
|
||||
{}
|
||||
static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
|
||||
static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||
#endif
|
||||
return v;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_stat_reset - reset a blkg_stat
|
||||
* @stat: blkg_stat to reset
|
||||
*/
|
||||
static inline void blkg_stat_reset(struct blkg_stat *stat)
|
||||
{
|
||||
stat->cnt = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_rwstat_add - add a value to a blkg_rwstat
|
||||
* @rwstat: target blkg_rwstat
|
||||
* @rw: mask of REQ_{WRITE|SYNC}
|
||||
* @val: value to add
|
||||
*
|
||||
* Add @val to @rwstat. The counters are chosen according to @rw. The
|
||||
* caller is responsible for synchronizing calls to this function.
|
||||
*/
|
||||
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
||||
int rw, uint64_t val)
|
||||
{
|
||||
u64_stats_update_begin(&rwstat->syncp);
|
||||
|
||||
if (rw & REQ_WRITE)
|
||||
rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
|
||||
else
|
||||
rwstat->cnt[BLKG_RWSTAT_READ] += val;
|
||||
if (rw & REQ_SYNC)
|
||||
rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
|
||||
else
|
||||
rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
|
||||
|
||||
u64_stats_update_end(&rwstat->syncp);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_rwstat_read - read the current values of a blkg_rwstat
|
||||
* @rwstat: blkg_rwstat to read
|
||||
*
|
||||
* Read the current snapshot of @rwstat and return it as the return value.
|
||||
* This function can be called without synchronization and takes care of
|
||||
* u64 atomicity.
|
||||
*/
|
||||
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
|
||||
{
|
||||
unsigned int start;
|
||||
struct blkg_rwstat tmp;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&rwstat->syncp);
|
||||
tmp = *rwstat;
|
||||
} while (u64_stats_fetch_retry(&rwstat->syncp, start));
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_rwstat_sum - read the total count of a blkg_rwstat
|
||||
* @rwstat: blkg_rwstat to read
|
||||
*
|
||||
* Return the total count of @rwstat regardless of the IO direction. This
|
||||
* function can be called without synchronization and takes care of u64
|
||||
* atomicity.
|
||||
*/
|
||||
static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
|
||||
{
|
||||
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
|
||||
|
||||
return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_rwstat_reset - reset a blkg_rwstat
|
||||
* @rwstat: blkg_rwstat to reset
|
||||
*/
|
||||
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
|
||||
{
|
||||
memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
|
||||
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
|
||||
extern struct blkio_cgroup blkio_root_cgroup;
|
||||
extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
|
||||
extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
|
||||
extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev,
|
||||
enum blkio_policy_id plid);
|
||||
extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
|
||||
extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
|
||||
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
|
||||
void *key);
|
||||
void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time,
|
||||
unsigned long unaccounted_time);
|
||||
void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
|
||||
bool direction, bool sync);
|
||||
void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
||||
uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
|
||||
void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
|
||||
bool sync);
|
||||
void blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync);
|
||||
void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync);
|
||||
#else
|
||||
struct cgroup;
|
||||
static inline struct blkio_cgroup *
|
||||
cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
|
||||
static inline struct blkio_cgroup *
|
||||
task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
|
||||
|
||||
static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev,
|
||||
enum blkio_policy_id plid) {}
|
||||
struct blkg_policy_data {
|
||||
};
|
||||
|
||||
static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
|
||||
struct blkcg_gq {
|
||||
};
|
||||
|
||||
static inline int
|
||||
blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
|
||||
struct blkcg_policy {
|
||||
};
|
||||
|
||||
static inline struct blkio_group *
|
||||
blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
|
||||
static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time,
|
||||
unsigned long unaccounted_time)
|
||||
{}
|
||||
static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync) {}
|
||||
static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
||||
uint64_t start_time, uint64_t io_start_time, bool direction,
|
||||
bool sync) {}
|
||||
static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync) {}
|
||||
static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
#endif
|
||||
#endif /* _BLK_CGROUP_H */
|
||||
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
|
||||
static inline void blkcg_drain_queue(struct request_queue *q) { }
|
||||
static inline void blkcg_exit_queue(struct request_queue *q) { }
|
||||
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
|
||||
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
|
||||
static inline int blkcg_activate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { return 0; }
|
||||
static inline void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { }
|
||||
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol) { return NULL; }
|
||||
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
|
||||
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
|
||||
static inline void blkg_get(struct blkcg_gq *blkg) { }
|
||||
static inline void blkg_put(struct blkcg_gq *blkg) { }
|
||||
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
#endif /* _BLK_CGROUP_H */
|
||||
|
287
block/blk-core.c
287
block/blk-core.c
@ -29,11 +29,13 @@
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
|
||||
*
|
||||
* This function does not cancel any asynchronous activity arising
|
||||
* out of elevator or throttling code. That would require elevaotor_exit()
|
||||
* and blk_throtl_exit() to be called with queue lock initialized.
|
||||
* and blkcg_exit_queue() to be called with queue lock initialized.
|
||||
*
|
||||
*/
|
||||
void blk_sync_queue(struct request_queue *q)
|
||||
@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
elv_drain_elevator(q);
|
||||
if (drain_all)
|
||||
blk_throtl_drain(q);
|
||||
/*
|
||||
* The caller might be trying to drain @q before its
|
||||
* elevator is initialized.
|
||||
*/
|
||||
if (q->elevator)
|
||||
elv_drain_elevator(q);
|
||||
|
||||
blkcg_drain_queue(q);
|
||||
|
||||
/*
|
||||
* This function might be called on a queue which failed
|
||||
* driver init after queue creation. Some drivers
|
||||
* (e.g. fd) get unhappy in such cases. Kick queue iff
|
||||
* dispatch queue has something on it.
|
||||
* driver init after queue creation or is not yet fully
|
||||
* active yet. Some drivers (e.g. fd and loop) get unhappy
|
||||
* in such cases. Kick queue iff dispatch queue has
|
||||
* something on it and @q has request_fn set.
|
||||
*/
|
||||
if (!list_empty(&q->queue_head))
|
||||
if (!list_empty(&q->queue_head) && q->request_fn)
|
||||
__blk_run_queue(q);
|
||||
|
||||
drain |= q->rq.elvpriv;
|
||||
@ -402,6 +410,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_start - enter queue bypass mode
|
||||
* @q: queue of interest
|
||||
*
|
||||
* In bypass mode, only the dispatch FIFO queue of @q is used. This
|
||||
* function makes @q enter bypass mode and drains all requests which were
|
||||
* throttled or issued before. On return, it's guaranteed that no request
|
||||
* is being throttled or has ELVPRIV set and blk_queue_bypass() %true
|
||||
* inside queue or RCU read lock.
|
||||
*/
|
||||
void blk_queue_bypass_start(struct request_queue *q)
|
||||
{
|
||||
bool drain;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
drain = !q->bypass_depth++;
|
||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (drain) {
|
||||
blk_drain_queue(q, false);
|
||||
/* ensure blk_queue_bypass() is %true inside RCU read lock */
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_end - leave queue bypass mode
|
||||
* @q: queue of interest
|
||||
*
|
||||
* Leave bypass mode and restore the normal queueing behavior.
|
||||
*/
|
||||
void blk_queue_bypass_end(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!--q->bypass_depth)
|
||||
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
|
||||
WARN_ON_ONCE(q->bypass_depth < 0);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
||||
|
||||
/**
|
||||
* blk_cleanup_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/*
|
||||
* Dead queue is permanently in bypass mode till released. Note
|
||||
* that, unlike blk_queue_bypass_start(), we aren't performing
|
||||
* synchronize_rcu() after entering bypass mode to avoid the delay
|
||||
* as some drivers create and destroy a lot of queues while
|
||||
* probing. This is still safe because blk_release_queue() will be
|
||||
* called only after the queue refcnt drops to zero and nothing,
|
||||
* RCU or not, would be traversing the queue by then.
|
||||
*/
|
||||
q->bypass_depth++;
|
||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||
|
||||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
spin_unlock_irq(lock);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
/*
|
||||
* Drain all requests queued before DEAD marking. The caller might
|
||||
* be trying to tear down @q before its elevator is initialized, in
|
||||
* which case we don't want to call into draining.
|
||||
*/
|
||||
if (q->elevator)
|
||||
blk_drain_queue(q, true);
|
||||
/* drain all requests queued before DEAD marking */
|
||||
blk_drain_queue(q, true);
|
||||
|
||||
/* @q won't process any more request, flush async actions */
|
||||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
if (err)
|
||||
goto fail_id;
|
||||
|
||||
if (blk_throtl_init(q))
|
||||
goto fail_id;
|
||||
|
||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, (unsigned long) q);
|
||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
INIT_LIST_HEAD(&q->timeout_list);
|
||||
INIT_LIST_HEAD(&q->icq_list);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
INIT_LIST_HEAD(&q->blkg_list);
|
||||
#endif
|
||||
INIT_LIST_HEAD(&q->flush_queue[0]);
|
||||
INIT_LIST_HEAD(&q->flush_queue[1]);
|
||||
INIT_LIST_HEAD(&q->flush_data_in_flight);
|
||||
@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
*/
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
|
||||
/*
|
||||
* A queue starts its life with bypass turned on to avoid
|
||||
* unnecessary bypass on/off overhead and nasty surprises during
|
||||
* init. The initial bypass will be finished at the end of
|
||||
* blk_init_allocated_queue().
|
||||
*/
|
||||
q->bypass_depth = 1;
|
||||
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
|
||||
|
||||
if (blkcg_init_queue(q))
|
||||
goto fail_id;
|
||||
|
||||
return q;
|
||||
|
||||
fail_id:
|
||||
@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
|
||||
q->sg_reserved_size = INT_MAX;
|
||||
|
||||
/*
|
||||
* all done
|
||||
*/
|
||||
if (!elevator_init(q, NULL)) {
|
||||
blk_queue_congestion_threshold(q);
|
||||
return q;
|
||||
}
|
||||
/* init elevator */
|
||||
if (elevator_init(q, NULL))
|
||||
return NULL;
|
||||
|
||||
return NULL;
|
||||
blk_queue_congestion_threshold(q);
|
||||
|
||||
/* all done, end the initial bypass */
|
||||
blk_queue_bypass_end(q);
|
||||
return q;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
blk_alloc_request(struct request_queue *q, struct io_cq *icq,
|
||||
unsigned int flags, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||
|
||||
if (!rq)
|
||||
return NULL;
|
||||
|
||||
blk_rq_init(q, rq);
|
||||
|
||||
rq->cmd_flags = flags | REQ_ALLOCED;
|
||||
|
||||
if (flags & REQ_ELVPRIV) {
|
||||
rq->elv.icq = icq;
|
||||
if (unlikely(elv_set_request(q, rq, gfp_mask))) {
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
return NULL;
|
||||
}
|
||||
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
||||
if (icq)
|
||||
get_io_context(icq->ioc);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
/*
|
||||
* ioc_batching returns true if the ioc is a valid batching request and
|
||||
* should be given priority access to a request.
|
||||
@ -762,6 +807,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* rq_ioc - determine io_context for request allocation
|
||||
* @bio: request being allocated is for this bio (can be %NULL)
|
||||
*
|
||||
* Determine io_context to use for request allocation for @bio. May return
|
||||
* %NULL if %current->io_context doesn't exist.
|
||||
*/
|
||||
static struct io_context *rq_ioc(struct bio *bio)
|
||||
{
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
if (bio && bio->bi_ioc)
|
||||
return bio->bi_ioc;
|
||||
#endif
|
||||
return current->io_context;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_request - get a free request
|
||||
* @q: request_queue to allocate request from
|
||||
@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||
static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = NULL;
|
||||
struct request *rq;
|
||||
struct request_list *rl = &q->rq;
|
||||
struct elevator_type *et;
|
||||
struct io_context *ioc;
|
||||
@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
int may_queue;
|
||||
retry:
|
||||
et = q->elevator->type;
|
||||
ioc = current->io_context;
|
||||
ioc = rq_ioc(bio);
|
||||
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
@ -808,7 +869,7 @@ retry:
|
||||
*/
|
||||
if (!ioc && !retried) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
create_io_context(current, gfp_mask, q->node);
|
||||
create_io_context(gfp_mask, q->node);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
retried = true;
|
||||
goto retry;
|
||||
@ -831,7 +892,7 @@ retry:
|
||||
* process is not a "batcher", and not
|
||||
* exempted by the IO scheduler
|
||||
*/
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -844,7 +905,7 @@ retry:
|
||||
* allocated with any setting of ->nr_requests
|
||||
*/
|
||||
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
rl->count[is_sync]++;
|
||||
rl->starved[is_sync] = 0;
|
||||
@ -859,8 +920,7 @@ retry:
|
||||
* Also, lookup icq while holding queue_lock. If it doesn't exist,
|
||||
* it will be created after releasing queue_lock.
|
||||
*/
|
||||
if (blk_rq_should_init_elevator(bio) &&
|
||||
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
|
||||
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
|
||||
rw_flags |= REQ_ELVPRIV;
|
||||
rl->elvpriv++;
|
||||
if (et->icq_cache && ioc)
|
||||
@ -871,41 +931,36 @@ retry:
|
||||
rw_flags |= REQ_IO_STAT;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/* create icq if missing */
|
||||
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
|
||||
icq = ioc_create_icq(q, gfp_mask);
|
||||
if (!icq)
|
||||
goto fail_icq;
|
||||
/* allocate and init request */
|
||||
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||
if (!rq)
|
||||
goto fail_alloc;
|
||||
|
||||
blk_rq_init(q, rq);
|
||||
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
||||
|
||||
/* init elvpriv */
|
||||
if (rw_flags & REQ_ELVPRIV) {
|
||||
if (unlikely(et->icq_cache && !icq)) {
|
||||
create_io_context(gfp_mask, q->node);
|
||||
ioc = rq_ioc(bio);
|
||||
if (!ioc)
|
||||
goto fail_elvpriv;
|
||||
|
||||
icq = ioc_create_icq(ioc, q, gfp_mask);
|
||||
if (!icq)
|
||||
goto fail_elvpriv;
|
||||
}
|
||||
|
||||
rq->elv.icq = icq;
|
||||
if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
|
||||
goto fail_elvpriv;
|
||||
|
||||
/* @rq->elv.icq holds io_context until @rq is freed */
|
||||
if (icq)
|
||||
get_io_context(icq->ioc);
|
||||
}
|
||||
|
||||
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
|
||||
|
||||
fail_icq:
|
||||
if (unlikely(!rq)) {
|
||||
/*
|
||||
* Allocation failed presumably due to memory. Undo anything
|
||||
* we might have messed up.
|
||||
*
|
||||
* Allocating task should really be put onto the front of the
|
||||
* wait queue, but this is pretty rare.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
freed_request(q, rw_flags);
|
||||
|
||||
/*
|
||||
* in the very unlikely event that allocation failed and no
|
||||
* requests for this direction was pending, mark us starved
|
||||
* so that freeing of a request in the other direction will
|
||||
* notice us. another possible fix would be to split the
|
||||
* rq mempool into READ and WRITE
|
||||
*/
|
||||
rq_starved:
|
||||
if (unlikely(rl->count[is_sync] == 0))
|
||||
rl->starved[is_sync] = 1;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* ioc may be NULL here, and ioc_batching will be false. That's
|
||||
* OK, if the queue is under the request limit then requests need
|
||||
@ -916,8 +971,48 @@ rq_starved:
|
||||
ioc->nr_batch_requests--;
|
||||
|
||||
trace_block_getrq(q, bio, rw_flags & 1);
|
||||
out:
|
||||
return rq;
|
||||
|
||||
fail_elvpriv:
|
||||
/*
|
||||
* elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
|
||||
* and may fail indefinitely under memory pressure and thus
|
||||
* shouldn't stall IO. Treat this request as !elvpriv. This will
|
||||
* disturb iosched and blkcg but weird is bettern than dead.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
|
||||
dev_name(q->backing_dev_info.dev));
|
||||
|
||||
rq->cmd_flags &= ~REQ_ELVPRIV;
|
||||
rq->elv.icq = NULL;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rl->elvpriv--;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
goto out;
|
||||
|
||||
fail_alloc:
|
||||
/*
|
||||
* Allocation failed presumably due to memory. Undo anything we
|
||||
* might have messed up.
|
||||
*
|
||||
* Allocating task should really be put onto the front of the wait
|
||||
* queue, but this is pretty rare.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
freed_request(q, rw_flags);
|
||||
|
||||
/*
|
||||
* in the very unlikely event that allocation failed and no
|
||||
* requests for this direction was pending, mark us starved so that
|
||||
* freeing of a request in the other direction will notice
|
||||
* us. another possible fix would be to split the rq mempool into
|
||||
* READ and WRITE
|
||||
*/
|
||||
rq_starved:
|
||||
if (unlikely(rl->count[is_sync] == 0))
|
||||
rl->starved[is_sync] = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||
* up to a big batch of them for a small period time.
|
||||
* See ioc_batching, ioc_set_batching
|
||||
*/
|
||||
create_io_context(current, GFP_NOIO, q->node);
|
||||
create_io_context(GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, current->io_context);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
126
block/blk-ioc.c
126
block/blk-ioc.c
@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
|
||||
}
|
||||
EXPORT_SYMBOL(put_io_context);
|
||||
|
||||
/* Called by the exiting task */
|
||||
void exit_io_context(struct task_struct *task)
|
||||
/**
|
||||
* put_io_context_active - put active reference on ioc
|
||||
* @ioc: ioc of interest
|
||||
*
|
||||
* Undo get_io_context_active(). If active reference reaches zero after
|
||||
* put, @ioc can never issue further IOs and ioscheds are notified.
|
||||
*/
|
||||
void put_io_context_active(struct io_context *ioc)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
struct io_cq *icq;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
|
||||
if (!atomic_dec_and_test(&ioc->active_ref)) {
|
||||
put_io_context(ioc);
|
||||
return;
|
||||
}
|
||||
@ -197,6 +197,20 @@ retry:
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
/* Called by the exiting task */
|
||||
void exit_io_context(struct task_struct *task)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
atomic_dec(&ioc->nr_tasks);
|
||||
put_io_context_active(ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_clear_queue - break any ioc association with the specified queue
|
||||
* @q: request_queue being cleared
|
||||
@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
|
||||
int node)
|
||||
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
|
||||
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
|
||||
node);
|
||||
if (unlikely(!ioc))
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize */
|
||||
atomic_long_set(&ioc->refcount, 1);
|
||||
atomic_set(&ioc->nr_tasks, 1);
|
||||
atomic_set(&ioc->active_ref, 1);
|
||||
spin_lock_init(&ioc->lock);
|
||||
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
|
||||
INIT_HLIST_HEAD(&ioc->icq_list);
|
||||
@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
|
||||
else
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
task_unlock(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
||||
return ioc;
|
||||
}
|
||||
task_unlock(task);
|
||||
} while (create_io_context(task, gfp_flags, node));
|
||||
} while (!create_task_io_context(task, gfp_flags, node));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
|
||||
|
||||
/**
|
||||
* ioc_create_icq - create and link io_cq
|
||||
* @ioc: io_context of interest
|
||||
* @q: request_queue of interest
|
||||
* @gfp_mask: allocation mask
|
||||
*
|
||||
* Make sure io_cq linking %current->io_context and @q exists. If either
|
||||
* io_context and/or icq don't exist, they will be created using @gfp_mask.
|
||||
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
|
||||
* will be created using @gfp_mask.
|
||||
*
|
||||
* The caller is responsible for ensuring @ioc won't go away and @q is
|
||||
* alive and will stay alive until this function returns.
|
||||
*/
|
||||
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
||||
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_type *et = q->elevator->type;
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
|
||||
/* allocate stuff */
|
||||
ioc = create_io_context(current, gfp_mask, q->node);
|
||||
if (!ioc)
|
||||
return NULL;
|
||||
|
||||
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
|
||||
q->node);
|
||||
if (!icq)
|
||||
@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
||||
return icq;
|
||||
}
|
||||
|
||||
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
|
||||
{
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
|
||||
icq->flags |= flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_ioprio_changed - notify ioprio change
|
||||
* @ioc: io_context of interest
|
||||
* @ioprio: new ioprio
|
||||
*
|
||||
* @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
|
||||
* icq's. iosched is responsible for checking the bit and applying it on
|
||||
* request issue path.
|
||||
*/
|
||||
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc->ioprio = ioprio;
|
||||
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_cgroup_changed - notify cgroup change
|
||||
* @ioc: io_context of interest
|
||||
*
|
||||
* @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
|
||||
* iosched is responsible for checking the bit and applying it on request
|
||||
* issue path.
|
||||
*/
|
||||
void ioc_cgroup_changed(struct io_context *ioc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ioc_cgroup_changed);
|
||||
|
||||
/**
|
||||
* icq_get_changed - fetch and clear icq changed mask
|
||||
* @icq: icq of interest
|
||||
*
|
||||
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
|
||||
* @icq->ioc->lock.
|
||||
*/
|
||||
unsigned icq_get_changed(struct io_cq *icq)
|
||||
{
|
||||
unsigned int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
|
||||
spin_lock_irqsave(&icq->ioc->lock, flags);
|
||||
changed = icq->flags & ICQ_CHANGED_MASK;
|
||||
icq->flags &= ~ICQ_CHANGED_MASK;
|
||||
spin_unlock_irqrestore(&icq->ioc->lock, flags);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
EXPORT_SYMBOL(icq_get_changed);
|
||||
|
||||
static int __init blk_ioc_init(void)
|
||||
{
|
||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/blktrace_api.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
struct queue_sysfs_entry {
|
||||
struct attribute attr;
|
||||
@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
|
||||
blk_sync_queue(q);
|
||||
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
elevator_exit(q->elevator);
|
||||
}
|
||||
|
||||
blk_throtl_exit(q);
|
||||
|
||||
if (rl->rq_pool)
|
||||
mempool_destroy(rl->rq_pool);
|
||||
|
||||
if (q->queue_tags)
|
||||
__blk_queue_free_tags(q);
|
||||
|
||||
blk_throtl_release(q);
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
|
@ -21,6 +21,8 @@ static int throtl_quantum = 32;
|
||||
/* Throttling is performed over 100ms slice and after that slice is renewed */
|
||||
static unsigned long throtl_slice = HZ/10; /* 100 ms */
|
||||
|
||||
static struct blkcg_policy blkcg_policy_throtl;
|
||||
|
||||
/* A workqueue to queue throttle related work */
|
||||
static struct workqueue_struct *kthrotld_workqueue;
|
||||
static void throtl_schedule_delayed_work(struct throtl_data *td,
|
||||
@ -38,9 +40,17 @@ struct throtl_rb_root {
|
||||
|
||||
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
|
||||
|
||||
/* Per-cpu group stats */
|
||||
struct tg_stats_cpu {
|
||||
/* total bytes transferred */
|
||||
struct blkg_rwstat service_bytes;
|
||||
/* total IOs serviced, post merge */
|
||||
struct blkg_rwstat serviced;
|
||||
};
|
||||
|
||||
struct throtl_grp {
|
||||
/* List of throtl groups on the request queue*/
|
||||
struct hlist_node tg_node;
|
||||
/* must be the first member */
|
||||
struct blkg_policy_data pd;
|
||||
|
||||
/* active throtl group service_tree member */
|
||||
struct rb_node rb_node;
|
||||
@ -52,8 +62,6 @@ struct throtl_grp {
|
||||
*/
|
||||
unsigned long disptime;
|
||||
|
||||
struct blkio_group blkg;
|
||||
atomic_t ref;
|
||||
unsigned int flags;
|
||||
|
||||
/* Two lists for READ and WRITE */
|
||||
@ -80,18 +88,18 @@ struct throtl_grp {
|
||||
/* Some throttle limits got updated for the group */
|
||||
int limits_changed;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
/* Per cpu stats pointer */
|
||||
struct tg_stats_cpu __percpu *stats_cpu;
|
||||
|
||||
/* List of tgs waiting for per cpu stats memory to be allocated */
|
||||
struct list_head stats_alloc_node;
|
||||
};
|
||||
|
||||
struct throtl_data
|
||||
{
|
||||
/* List of throtl groups */
|
||||
struct hlist_head tg_list;
|
||||
|
||||
/* service tree for active throtl groups */
|
||||
struct throtl_rb_root tg_service_tree;
|
||||
|
||||
struct throtl_grp *root_tg;
|
||||
struct request_queue *queue;
|
||||
|
||||
/* Total Number of queued bios on READ and WRITE lists */
|
||||
@ -108,6 +116,33 @@ struct throtl_data
|
||||
int limits_changed;
|
||||
};
|
||||
|
||||
/* list and work item to allocate percpu group stats */
|
||||
static DEFINE_SPINLOCK(tg_stats_alloc_lock);
|
||||
static LIST_HEAD(tg_stats_alloc_list);
|
||||
|
||||
static void tg_stats_alloc_fn(struct work_struct *);
|
||||
static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
|
||||
|
||||
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
|
||||
{
|
||||
return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
|
||||
}
|
||||
|
||||
static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
|
||||
{
|
||||
return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
|
||||
}
|
||||
|
||||
static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
|
||||
{
|
||||
return pd_to_blkg(&tg->pd);
|
||||
}
|
||||
|
||||
static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
|
||||
{
|
||||
return blkg_to_tg(td->queue->root_blkg);
|
||||
}
|
||||
|
||||
enum tg_state_flags {
|
||||
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
|
||||
};
|
||||
@ -128,244 +163,150 @@ static inline int throtl_tg_##name(const struct throtl_grp *tg) \
|
||||
|
||||
THROTL_TG_FNS(on_rr);
|
||||
|
||||
#define throtl_log_tg(td, tg, fmt, args...) \
|
||||
blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
|
||||
blkg_path(&(tg)->blkg), ##args); \
|
||||
#define throtl_log_tg(td, tg, fmt, args...) do { \
|
||||
char __pbuf[128]; \
|
||||
\
|
||||
blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
|
||||
blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define throtl_log(td, fmt, args...) \
|
||||
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
|
||||
|
||||
static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
|
||||
{
|
||||
if (blkg)
|
||||
return container_of(blkg, struct throtl_grp, blkg);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned int total_nr_queued(struct throtl_data *td)
|
||||
{
|
||||
return td->nr_queued[0] + td->nr_queued[1];
|
||||
}
|
||||
|
||||
static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
|
||||
/*
|
||||
* Worker for allocating per cpu stat for tgs. This is scheduled on the
|
||||
* system_nrt_wq once there are some groups on the alloc_list waiting for
|
||||
* allocation.
|
||||
*/
|
||||
static void tg_stats_alloc_fn(struct work_struct *work)
|
||||
{
|
||||
atomic_inc(&tg->ref);
|
||||
return tg;
|
||||
static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
bool empty = false;
|
||||
|
||||
alloc_stats:
|
||||
if (!stats_cpu) {
|
||||
stats_cpu = alloc_percpu(struct tg_stats_cpu);
|
||||
if (!stats_cpu) {
|
||||
/* allocation failed, try again after some time */
|
||||
queue_delayed_work(system_nrt_wq, dwork,
|
||||
msecs_to_jiffies(10));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irq(&tg_stats_alloc_lock);
|
||||
|
||||
if (!list_empty(&tg_stats_alloc_list)) {
|
||||
struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
|
||||
struct throtl_grp,
|
||||
stats_alloc_node);
|
||||
swap(tg->stats_cpu, stats_cpu);
|
||||
list_del_init(&tg->stats_alloc_node);
|
||||
}
|
||||
|
||||
empty = list_empty(&tg_stats_alloc_list);
|
||||
spin_unlock_irq(&tg_stats_alloc_lock);
|
||||
if (!empty)
|
||||
goto alloc_stats;
|
||||
}
|
||||
|
||||
static void throtl_free_tg(struct rcu_head *head)
|
||||
static void throtl_pd_init(struct blkcg_gq *blkg)
|
||||
{
|
||||
struct throtl_grp *tg;
|
||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||
unsigned long flags;
|
||||
|
||||
tg = container_of(head, struct throtl_grp, rcu_head);
|
||||
free_percpu(tg->blkg.stats_cpu);
|
||||
kfree(tg);
|
||||
}
|
||||
|
||||
static void throtl_put_tg(struct throtl_grp *tg)
|
||||
{
|
||||
BUG_ON(atomic_read(&tg->ref) <= 0);
|
||||
if (!atomic_dec_and_test(&tg->ref))
|
||||
return;
|
||||
|
||||
/*
|
||||
* A group is freed in rcu manner. But having an rcu lock does not
|
||||
* mean that one can access all the fields of blkg and assume these
|
||||
* are valid. For example, don't try to follow throtl_data and
|
||||
* request queue links.
|
||||
*
|
||||
* Having a reference to blkg under an rcu allows acess to only
|
||||
* values local to groups like group stats and group rate limits
|
||||
*/
|
||||
call_rcu(&tg->rcu_head, throtl_free_tg);
|
||||
}
|
||||
|
||||
static void throtl_init_group(struct throtl_grp *tg)
|
||||
{
|
||||
INIT_HLIST_NODE(&tg->tg_node);
|
||||
RB_CLEAR_NODE(&tg->rb_node);
|
||||
bio_list_init(&tg->bio_lists[0]);
|
||||
bio_list_init(&tg->bio_lists[1]);
|
||||
tg->limits_changed = false;
|
||||
|
||||
/* Practically unlimited BW */
|
||||
tg->bps[0] = tg->bps[1] = -1;
|
||||
tg->iops[0] = tg->iops[1] = -1;
|
||||
tg->bps[READ] = -1;
|
||||
tg->bps[WRITE] = -1;
|
||||
tg->iops[READ] = -1;
|
||||
tg->iops[WRITE] = -1;
|
||||
|
||||
/*
|
||||
* Take the initial reference that will be released on destroy
|
||||
* This can be thought of a joint reference by cgroup and
|
||||
* request queue which will be dropped by either request queue
|
||||
* exit or cgroup deletion path depending on who is exiting first.
|
||||
* Ugh... We need to perform per-cpu allocation for tg->stats_cpu
|
||||
* but percpu allocator can't be called from IO path. Queue tg on
|
||||
* tg_stats_alloc_list and allocate from work item.
|
||||
*/
|
||||
atomic_set(&tg->ref, 1);
|
||||
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
|
||||
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
|
||||
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
|
||||
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
|
||||
}
|
||||
|
||||
/* Should be called with rcu read lock held (needed for blkcg) */
|
||||
static void
|
||||
throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
|
||||
static void throtl_pd_exit(struct blkcg_gq *blkg)
|
||||
{
|
||||
hlist_add_head(&tg->tg_node, &td->tg_list);
|
||||
td->nr_undestroyed_grps++;
|
||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
|
||||
list_del_init(&tg->stats_alloc_node);
|
||||
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
|
||||
|
||||
free_percpu(tg->stats_cpu);
|
||||
}
|
||||
|
||||
static void
|
||||
__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
|
||||
static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
|
||||
{
|
||||
struct backing_dev_info *bdi = &td->queue->backing_dev_info;
|
||||
unsigned int major, minor;
|
||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||
int cpu;
|
||||
|
||||
if (!tg || tg->blkg.dev)
|
||||
if (tg->stats_cpu == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Fill in device details for a group which might not have been
|
||||
* filled at group creation time as queue was being instantiated
|
||||
* and driver had not attached a device yet
|
||||
*/
|
||||
if (bdi->dev && dev_name(bdi->dev)) {
|
||||
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
|
||||
tg->blkg.dev = MKDEV(major, minor);
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
|
||||
|
||||
blkg_rwstat_reset(&sc->service_bytes);
|
||||
blkg_rwstat_reset(&sc->serviced);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Should be called with without queue lock held. Here queue lock will be
|
||||
* taken rarely. It will be taken only once during life time of a group
|
||||
* if need be
|
||||
*/
|
||||
static void
|
||||
throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
|
||||
static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
|
||||
struct blkcg *blkcg)
|
||||
{
|
||||
if (!tg || tg->blkg.dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(td->queue->queue_lock);
|
||||
__throtl_tg_fill_dev_details(td, tg);
|
||||
spin_unlock_irq(td->queue->queue_lock);
|
||||
}
|
||||
|
||||
static void throtl_init_add_tg_lists(struct throtl_data *td,
|
||||
struct throtl_grp *tg, struct blkio_cgroup *blkcg)
|
||||
{
|
||||
__throtl_tg_fill_dev_details(td, tg);
|
||||
|
||||
/* Add group onto cgroup list */
|
||||
blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
|
||||
tg->blkg.dev, BLKIO_POLICY_THROTL);
|
||||
|
||||
tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
|
||||
tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
|
||||
tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
|
||||
tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
|
||||
|
||||
throtl_add_group_to_td_list(td, tg);
|
||||
}
|
||||
|
||||
/* Should be called without queue lock and outside of rcu period */
|
||||
static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
|
||||
{
|
||||
struct throtl_grp *tg = NULL;
|
||||
int ret;
|
||||
|
||||
tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
|
||||
if (!tg)
|
||||
return NULL;
|
||||
|
||||
ret = blkio_alloc_blkg_stats(&tg->blkg);
|
||||
|
||||
if (ret) {
|
||||
kfree(tg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
throtl_init_group(tg);
|
||||
return tg;
|
||||
}
|
||||
|
||||
static struct
|
||||
throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
|
||||
{
|
||||
struct throtl_grp *tg = NULL;
|
||||
void *key = td;
|
||||
|
||||
/*
|
||||
* This is the common case when there are no blkio cgroups.
|
||||
* Avoid lookup in this case
|
||||
*/
|
||||
if (blkcg == &blkio_root_cgroup)
|
||||
tg = td->root_tg;
|
||||
else
|
||||
tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
|
||||
* This is the common case when there are no blkcgs. Avoid lookup
|
||||
* in this case
|
||||
*/
|
||||
if (blkcg == &blkcg_root)
|
||||
return td_root_tg(td);
|
||||
|
||||
__throtl_tg_fill_dev_details(td, tg);
|
||||
return tg;
|
||||
return blkg_to_tg(blkg_lookup(blkcg, td->queue));
|
||||
}
|
||||
|
||||
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
|
||||
struct blkcg *blkcg)
|
||||
{
|
||||
struct throtl_grp *tg = NULL, *__tg = NULL;
|
||||
struct blkio_cgroup *blkcg;
|
||||
struct request_queue *q = td->queue;
|
||||
|
||||
/* no throttling for dead queue */
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
blkcg = task_blkio_cgroup(current);
|
||||
tg = throtl_find_tg(td, blkcg);
|
||||
if (tg) {
|
||||
rcu_read_unlock();
|
||||
return tg;
|
||||
}
|
||||
struct throtl_grp *tg = NULL;
|
||||
|
||||
/*
|
||||
* Need to allocate a group. Allocation of group also needs allocation
|
||||
* of per cpu stats which in-turn takes a mutex() and can block. Hence
|
||||
* we need to drop rcu lock and queue_lock before we call alloc.
|
||||
* This is the common case when there are no blkcgs. Avoid lookup
|
||||
* in this case
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
if (blkcg == &blkcg_root) {
|
||||
tg = td_root_tg(td);
|
||||
} else {
|
||||
struct blkcg_gq *blkg;
|
||||
|
||||
tg = throtl_alloc_tg(td);
|
||||
blkg = blkg_lookup_create(blkcg, q);
|
||||
|
||||
/* Group allocated and queue is still alive. take the lock */
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
/* Make sure @q is still alive */
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
kfree(tg);
|
||||
return NULL;
|
||||
/* if %NULL and @q is alive, fall back to root_tg */
|
||||
if (!IS_ERR(blkg))
|
||||
tg = blkg_to_tg(blkg);
|
||||
else if (!blk_queue_dead(q))
|
||||
tg = td_root_tg(td);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the new group. After sleeping, read the blkcg again.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
blkcg = task_blkio_cgroup(current);
|
||||
|
||||
/*
|
||||
* If some other thread already allocated the group while we were
|
||||
* not holding queue lock, free up the group
|
||||
*/
|
||||
__tg = throtl_find_tg(td, blkcg);
|
||||
|
||||
if (__tg) {
|
||||
kfree(tg);
|
||||
rcu_read_unlock();
|
||||
return __tg;
|
||||
}
|
||||
|
||||
/* Group allocation failed. Account the IO to root group */
|
||||
if (!tg) {
|
||||
tg = td->root_tg;
|
||||
return tg;
|
||||
}
|
||||
|
||||
throtl_init_add_tg_lists(td, tg, blkcg);
|
||||
rcu_read_unlock();
|
||||
return tg;
|
||||
}
|
||||
|
||||
@ -734,16 +675,41 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
|
||||
int rw)
|
||||
{
|
||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||
struct tg_stats_cpu *stats_cpu;
|
||||
unsigned long flags;
|
||||
|
||||
/* If per cpu stats are not allocated yet, don't do any accounting. */
|
||||
if (tg->stats_cpu == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Disabling interrupts to provide mutual exclusion between two
|
||||
* writes on same cpu. It probably is not needed for 64bit. Not
|
||||
* optimizing that case yet.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
stats_cpu = this_cpu_ptr(tg->stats_cpu);
|
||||
|
||||
blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
|
||||
blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
bool rw = bio_data_dir(bio);
|
||||
bool sync = rw_is_sync(bio->bi_rw);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
tg->bytes_disp[rw] += bio->bi_size;
|
||||
tg->io_disp[rw]++;
|
||||
|
||||
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
|
||||
throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
|
||||
}
|
||||
|
||||
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
|
||||
@ -753,7 +719,7 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
|
||||
|
||||
bio_list_add(&tg->bio_lists[rw], bio);
|
||||
/* Take a bio reference on tg */
|
||||
throtl_ref_get_tg(tg);
|
||||
blkg_get(tg_to_blkg(tg));
|
||||
tg->nr_queued[rw]++;
|
||||
td->nr_queued[rw]++;
|
||||
throtl_enqueue_tg(td, tg);
|
||||
@ -786,8 +752,8 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
|
||||
|
||||
bio = bio_list_pop(&tg->bio_lists[rw]);
|
||||
tg->nr_queued[rw]--;
|
||||
/* Drop bio reference on tg */
|
||||
throtl_put_tg(tg);
|
||||
/* Drop bio reference on blkg */
|
||||
blkg_put(tg_to_blkg(tg));
|
||||
|
||||
BUG_ON(td->nr_queued[rw] <= 0);
|
||||
td->nr_queued[rw]--;
|
||||
@ -865,8 +831,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
|
||||
|
||||
static void throtl_process_limit_change(struct throtl_data *td)
|
||||
{
|
||||
struct throtl_grp *tg;
|
||||
struct hlist_node *pos, *n;
|
||||
struct request_queue *q = td->queue;
|
||||
struct blkcg_gq *blkg, *n;
|
||||
|
||||
if (!td->limits_changed)
|
||||
return;
|
||||
@ -875,7 +841,9 @@ static void throtl_process_limit_change(struct throtl_data *td)
|
||||
|
||||
throtl_log(td, "limits changed");
|
||||
|
||||
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
||||
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
|
||||
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||
|
||||
if (!tg->limits_changed)
|
||||
continue;
|
||||
|
||||
@ -973,119 +941,158 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
|
||||
static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
|
||||
struct blkg_policy_data *pd, int off)
|
||||
{
|
||||
/* Something wrong if we are trying to remove same group twice */
|
||||
BUG_ON(hlist_unhashed(&tg->tg_node));
|
||||
struct throtl_grp *tg = pd_to_tg(pd);
|
||||
struct blkg_rwstat rwstat = { }, tmp;
|
||||
int i, cpu;
|
||||
|
||||
hlist_del_init(&tg->tg_node);
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
|
||||
|
||||
/*
|
||||
* Put the reference taken at the time of creation so that when all
|
||||
* queues are gone, group can be destroyed.
|
||||
*/
|
||||
throtl_put_tg(tg);
|
||||
td->nr_undestroyed_grps--;
|
||||
}
|
||||
|
||||
static void throtl_release_tgs(struct throtl_data *td)
|
||||
{
|
||||
struct hlist_node *pos, *n;
|
||||
struct throtl_grp *tg;
|
||||
|
||||
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
||||
/*
|
||||
* If cgroup removal path got to blk_group first and removed
|
||||
* it from cgroup list, then it will take care of destroying
|
||||
* cfqg also.
|
||||
*/
|
||||
if (!blkiocg_del_blkio_group(&tg->blkg))
|
||||
throtl_destroy_tg(td, tg);
|
||||
tmp = blkg_rwstat_read((void *)sc + off);
|
||||
for (i = 0; i < BLKG_RWSTAT_NR; i++)
|
||||
rwstat.cnt[i] += tmp.cnt[i];
|
||||
}
|
||||
|
||||
return __blkg_prfill_rwstat(sf, pd, &rwstat);
|
||||
}
|
||||
|
||||
/*
|
||||
* Blk cgroup controller notification saying that blkio_group object is being
|
||||
* delinked as associated cgroup object is going away. That also means that
|
||||
* no new IO will come in this group. So get rid of this group as soon as
|
||||
* any pending IO in the group is finished.
|
||||
*
|
||||
* This function is called under rcu_read_lock(). key is the rcu protected
|
||||
* pointer. That means "key" is a valid throtl_data pointer as long as we are
|
||||
* rcu read lock.
|
||||
*
|
||||
* "key" was fetched from blkio_group under blkio_cgroup->lock. That means
|
||||
* it should not be NULL as even if queue was going away, cgroup deltion
|
||||
* path got to it first.
|
||||
*/
|
||||
void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
|
||||
static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct throtl_data *td = key;
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
|
||||
spin_lock_irqsave(td->queue->queue_lock, flags);
|
||||
throtl_destroy_tg(td, tg_of_blkg(blkg));
|
||||
spin_unlock_irqrestore(td->queue->queue_lock, flags);
|
||||
blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
|
||||
cft->private, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_common(struct throtl_data *td,
|
||||
struct throtl_grp *tg)
|
||||
static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
int off)
|
||||
{
|
||||
struct throtl_grp *tg = pd_to_tg(pd);
|
||||
u64 v = *(u64 *)((void *)tg + off);
|
||||
|
||||
if (v == -1)
|
||||
return 0;
|
||||
return __blkg_prfill_u64(sf, pd, v);
|
||||
}
|
||||
|
||||
static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
|
||||
int off)
|
||||
{
|
||||
struct throtl_grp *tg = pd_to_tg(pd);
|
||||
unsigned int v = *(unsigned int *)((void *)tg + off);
|
||||
|
||||
if (v == -1)
|
||||
return 0;
|
||||
return __blkg_prfill_u64(sf, pd, v);
|
||||
}
|
||||
|
||||
static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
|
||||
&blkcg_policy_throtl, cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
|
||||
struct seq_file *sf)
|
||||
{
|
||||
blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
|
||||
&blkcg_policy_throtl, cft->private, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
|
||||
bool is_u64)
|
||||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkg_conf_ctx ctx;
|
||||
struct throtl_grp *tg;
|
||||
struct throtl_data *td;
|
||||
int ret;
|
||||
|
||||
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tg = blkg_to_tg(ctx.blkg);
|
||||
td = ctx.blkg->q->td;
|
||||
|
||||
if (!ctx.v)
|
||||
ctx.v = -1;
|
||||
|
||||
if (is_u64)
|
||||
*(u64 *)((void *)tg + cft->private) = ctx.v;
|
||||
else
|
||||
*(unsigned int *)((void *)tg + cft->private) = ctx.v;
|
||||
|
||||
/* XXX: we don't need the following deferred processing */
|
||||
xchg(&tg->limits_changed, true);
|
||||
xchg(&td->limits_changed, true);
|
||||
/* Schedule a work now to process the limit change */
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
|
||||
blkg_conf_finish(&ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For all update functions, key should be a valid pointer because these
|
||||
* update functions are called under blkcg_lock, that means, blkg is
|
||||
* valid and in turn key is valid. queue exit path can not race because
|
||||
* of blkcg_lock
|
||||
*
|
||||
* Can not take queue lock in update functions as queue lock under blkcg_lock
|
||||
* is not allowed. Under other paths we take blkcg_lock under queue_lock.
|
||||
*/
|
||||
static void throtl_update_blkio_group_read_bps(void *key,
|
||||
struct blkio_group *blkg, u64 read_bps)
|
||||
static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf)
|
||||
{
|
||||
struct throtl_data *td = key;
|
||||
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||
|
||||
tg->bps[READ] = read_bps;
|
||||
throtl_update_blkio_group_common(td, tg);
|
||||
return tg_set_conf(cgrp, cft, buf, true);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_bps(void *key,
|
||||
struct blkio_group *blkg, u64 write_bps)
|
||||
static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
|
||||
const char *buf)
|
||||
{
|
||||
struct throtl_data *td = key;
|
||||
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||
|
||||
tg->bps[WRITE] = write_bps;
|
||||
throtl_update_blkio_group_common(td, tg);
|
||||
return tg_set_conf(cgrp, cft, buf, false);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_read_iops(void *key,
|
||||
struct blkio_group *blkg, unsigned int read_iops)
|
||||
{
|
||||
struct throtl_data *td = key;
|
||||
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||
|
||||
tg->iops[READ] = read_iops;
|
||||
throtl_update_blkio_group_common(td, tg);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_iops(void *key,
|
||||
struct blkio_group *blkg, unsigned int write_iops)
|
||||
{
|
||||
struct throtl_data *td = key;
|
||||
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||
|
||||
tg->iops[WRITE] = write_iops;
|
||||
throtl_update_blkio_group_common(td, tg);
|
||||
}
|
||||
static struct cftype throtl_files[] = {
|
||||
{
|
||||
.name = "throttle.read_bps_device",
|
||||
.private = offsetof(struct throtl_grp, bps[READ]),
|
||||
.read_seq_string = tg_print_conf_u64,
|
||||
.write_string = tg_set_conf_u64,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.write_bps_device",
|
||||
.private = offsetof(struct throtl_grp, bps[WRITE]),
|
||||
.read_seq_string = tg_print_conf_u64,
|
||||
.write_string = tg_set_conf_u64,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.read_iops_device",
|
||||
.private = offsetof(struct throtl_grp, iops[READ]),
|
||||
.read_seq_string = tg_print_conf_uint,
|
||||
.write_string = tg_set_conf_uint,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.write_iops_device",
|
||||
.private = offsetof(struct throtl_grp, iops[WRITE]),
|
||||
.read_seq_string = tg_print_conf_uint,
|
||||
.write_string = tg_set_conf_uint,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.io_service_bytes",
|
||||
.private = offsetof(struct tg_stats_cpu, service_bytes),
|
||||
.read_seq_string = tg_print_cpu_rwstat,
|
||||
},
|
||||
{
|
||||
.name = "throttle.io_serviced",
|
||||
.private = offsetof(struct tg_stats_cpu, serviced),
|
||||
.read_seq_string = tg_print_cpu_rwstat,
|
||||
},
|
||||
{ } /* terminate */
|
||||
};
|
||||
|
||||
static void throtl_shutdown_wq(struct request_queue *q)
|
||||
{
|
||||
@ -1094,19 +1101,13 @@ static void throtl_shutdown_wq(struct request_queue *q)
|
||||
cancel_delayed_work_sync(&td->throtl_work);
|
||||
}
|
||||
|
||||
static struct blkio_policy_type blkio_policy_throtl = {
|
||||
.ops = {
|
||||
.blkio_unlink_group_fn = throtl_unlink_blkio_group,
|
||||
.blkio_update_group_read_bps_fn =
|
||||
throtl_update_blkio_group_read_bps,
|
||||
.blkio_update_group_write_bps_fn =
|
||||
throtl_update_blkio_group_write_bps,
|
||||
.blkio_update_group_read_iops_fn =
|
||||
throtl_update_blkio_group_read_iops,
|
||||
.blkio_update_group_write_iops_fn =
|
||||
throtl_update_blkio_group_write_iops,
|
||||
},
|
||||
.plid = BLKIO_POLICY_THROTL,
|
||||
static struct blkcg_policy blkcg_policy_throtl = {
|
||||
.pd_size = sizeof(struct throtl_grp),
|
||||
.cftypes = throtl_files,
|
||||
|
||||
.pd_init_fn = throtl_pd_init,
|
||||
.pd_exit_fn = throtl_pd_exit,
|
||||
.pd_reset_stats_fn = throtl_pd_reset_stats,
|
||||
};
|
||||
|
||||
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
@ -1114,7 +1115,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
struct throtl_data *td = q->td;
|
||||
struct throtl_grp *tg;
|
||||
bool rw = bio_data_dir(bio), update_disptime = true;
|
||||
struct blkio_cgroup *blkcg;
|
||||
struct blkcg *blkcg;
|
||||
bool throttled = false;
|
||||
|
||||
if (bio->bi_rw & REQ_THROTTLED) {
|
||||
@ -1122,33 +1123,31 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* bio_associate_current() needs ioc, try creating */
|
||||
create_io_context(GFP_ATOMIC, q->node);
|
||||
|
||||
/*
|
||||
* A throtl_grp pointer retrieved under rcu can be used to access
|
||||
* basic fields like stats and io rates. If a group has no rules,
|
||||
* just update the dispatch stats in lockless manner and return.
|
||||
*/
|
||||
|
||||
rcu_read_lock();
|
||||
blkcg = task_blkio_cgroup(current);
|
||||
tg = throtl_find_tg(td, blkcg);
|
||||
blkcg = bio_blkcg(bio);
|
||||
tg = throtl_lookup_tg(td, blkcg);
|
||||
if (tg) {
|
||||
throtl_tg_fill_dev_details(td, tg);
|
||||
|
||||
if (tg_no_rule_group(tg, rw)) {
|
||||
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
|
||||
rw, rw_is_sync(bio->bi_rw));
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
throtl_update_dispatch_stats(tg_to_blkg(tg),
|
||||
bio->bi_size, bio->bi_rw);
|
||||
goto out_unlock_rcu;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Either group has not been allocated yet or it is not an unlimited
|
||||
* IO group
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
tg = throtl_get_tg(td);
|
||||
tg = throtl_lookup_create_tg(td, blkcg);
|
||||
if (unlikely(!tg))
|
||||
goto out_unlock;
|
||||
|
||||
@ -1189,6 +1188,7 @@ queue_bio:
|
||||
tg->io_disp[rw], tg->iops[rw],
|
||||
tg->nr_queued[READ], tg->nr_queued[WRITE]);
|
||||
|
||||
bio_associate_current(bio);
|
||||
throtl_add_bio_tg(q->td, tg, bio);
|
||||
throttled = true;
|
||||
|
||||
@ -1199,6 +1199,8 @@ queue_bio:
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
out_unlock_rcu:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
return throttled;
|
||||
}
|
||||
@ -1241,79 +1243,31 @@ void blk_throtl_drain(struct request_queue *q)
|
||||
int blk_throtl_init(struct request_queue *q)
|
||||
{
|
||||
struct throtl_data *td;
|
||||
struct throtl_grp *tg;
|
||||
int ret;
|
||||
|
||||
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
|
||||
if (!td)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_HLIST_HEAD(&td->tg_list);
|
||||
td->tg_service_tree = THROTL_RB_ROOT;
|
||||
td->limits_changed = false;
|
||||
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
|
||||
|
||||
/* alloc and Init root group. */
|
||||
td->queue = q;
|
||||
tg = throtl_alloc_tg(td);
|
||||
|
||||
if (!tg) {
|
||||
kfree(td);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
td->root_tg = tg;
|
||||
|
||||
rcu_read_lock();
|
||||
throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Attach throtl data to request queue */
|
||||
q->td = td;
|
||||
return 0;
|
||||
td->queue = q;
|
||||
|
||||
/* activate policy */
|
||||
ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
|
||||
if (ret)
|
||||
kfree(td);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void blk_throtl_exit(struct request_queue *q)
|
||||
{
|
||||
struct throtl_data *td = q->td;
|
||||
bool wait = false;
|
||||
|
||||
BUG_ON(!td);
|
||||
|
||||
BUG_ON(!q->td);
|
||||
throtl_shutdown_wq(q);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
throtl_release_tgs(td);
|
||||
|
||||
/* If there are other groups */
|
||||
if (td->nr_undestroyed_grps > 0)
|
||||
wait = true;
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/*
|
||||
* Wait for tg->blkg->key accessors to exit their grace periods.
|
||||
* Do this wait only if there are other undestroyed groups out
|
||||
* there (other than root group). This can happen if cgroup deletion
|
||||
* path claimed the responsibility of cleaning up a group before
|
||||
* queue cleanup code get to the group.
|
||||
*
|
||||
* Do not call synchronize_rcu() unconditionally as there are drivers
|
||||
* which create/delete request queue hundreds of times during scan/boot
|
||||
* and synchronize_rcu() can take significant time and slow down boot.
|
||||
*/
|
||||
if (wait)
|
||||
synchronize_rcu();
|
||||
|
||||
/*
|
||||
* Just being safe to make sure after previous flush if some body did
|
||||
* update limits through cgroup and another work got queued, cancel
|
||||
* it.
|
||||
*/
|
||||
throtl_shutdown_wq(q);
|
||||
}
|
||||
|
||||
void blk_throtl_release(struct request_queue *q)
|
||||
{
|
||||
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
|
||||
kfree(q->td);
|
||||
}
|
||||
|
||||
@ -1323,8 +1277,7 @@ static int __init throtl_init(void)
|
||||
if (!kthrotld_workqueue)
|
||||
panic("Failed to create kthrotld\n");
|
||||
|
||||
blkio_policy_register(&blkio_policy_throtl);
|
||||
return 0;
|
||||
return blkcg_policy_register(&blkcg_policy_throtl);
|
||||
}
|
||||
|
||||
module_init(throtl_init);
|
||||
|
32
block/blk.h
32
block/blk.h
@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
void blk_drain_queue(struct request_queue *q, bool drain_all);
|
||||
void blk_queue_bypass_start(struct request_queue *q);
|
||||
void blk_queue_bypass_end(struct request_queue *q);
|
||||
void blk_dequeue_request(struct request *rq);
|
||||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
bool __blk_end_bidi_request(struct request *rq, int error,
|
||||
@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
|
||||
|
||||
int blk_dev_init(void);
|
||||
|
||||
void elv_quiesce_start(struct request_queue *q);
|
||||
void elv_quiesce_end(struct request_queue *q);
|
||||
|
||||
|
||||
/*
|
||||
* Return the threshold (number of used requests) at which the queue is
|
||||
@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
|
||||
*/
|
||||
void get_io_context(struct io_context *ioc);
|
||||
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
|
||||
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
|
||||
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
||||
gfp_t gfp_mask);
|
||||
void ioc_clear_queue(struct request_queue *q);
|
||||
|
||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
|
||||
int node);
|
||||
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
|
||||
|
||||
/**
|
||||
* create_io_context - try to create task->io_context
|
||||
* @task: target task
|
||||
* @gfp_mask: allocation mask
|
||||
* @node: allocation node
|
||||
*
|
||||
* If @task->io_context is %NULL, allocate a new io_context and install it.
|
||||
* Returns the current @task->io_context which may be %NULL if allocation
|
||||
* failed.
|
||||
* If %current->io_context is %NULL, allocate a new io_context and install
|
||||
* it. Returns the current %current->io_context which may be %NULL if
|
||||
* allocation failed.
|
||||
*
|
||||
* Note that this function can't be called with IRQ disabled because
|
||||
* task_lock which protects @task->io_context is IRQ-unsafe.
|
||||
* task_lock which protects %current->io_context is IRQ-unsafe.
|
||||
*/
|
||||
static inline struct io_context *create_io_context(struct task_struct *task,
|
||||
gfp_t gfp_mask, int node)
|
||||
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
|
||||
{
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
if (unlikely(!task->io_context))
|
||||
create_io_context_slowpath(task, gfp_mask, node);
|
||||
return task->io_context;
|
||||
if (unlikely(!current->io_context))
|
||||
create_task_io_context(current, gfp_mask, node);
|
||||
return current->io_context;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
|
||||
extern void blk_throtl_drain(struct request_queue *q);
|
||||
extern int blk_throtl_init(struct request_queue *q);
|
||||
extern void blk_throtl_exit(struct request_queue *q);
|
||||
extern void blk_throtl_release(struct request_queue *q);
|
||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
static inline void blk_throtl_drain(struct request_queue *q) { }
|
||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||
static inline void blk_throtl_exit(struct request_queue *q) { }
|
||||
static inline void blk_throtl_release(struct request_queue *q) { }
|
||||
#endif /* CONFIG_BLK_DEV_THROTTLING */
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
1082
block/cfq-iosched.c
1082
block/cfq-iosched.c
File diff suppressed because it is too large
Load Diff
115
block/cfq.h
115
block/cfq.h
@ -1,115 +0,0 @@
|
||||
#ifndef _CFQ_H
|
||||
#define _CFQ_H
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue)
|
||||
{
|
||||
blkiocg_update_dequeue_stats(blkg, dequeue);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time, unsigned long unaccounted_time)
|
||||
{
|
||||
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_set_start_empty_time(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_remove_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_merged_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_avg_queue_size_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_set_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
|
||||
direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {
|
||||
blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
|
||||
}
|
||||
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return blkiocg_del_blkio_group(blkg);
|
||||
}
|
||||
|
||||
#else /* CFQ_GROUP_IOSCHED */
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time, unsigned long unaccounted_time) {}
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {}
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CFQ_GROUP_IOSCHED */
|
||||
#endif
|
@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
|
||||
/*
|
||||
* initialize elevator private data (deadline_data).
|
||||
*/
|
||||
static void *deadline_init_queue(struct request_queue *q)
|
||||
static int deadline_init_queue(struct request_queue *q)
|
||||
{
|
||||
struct deadline_data *dd;
|
||||
|
||||
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||
if (!dd)
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&dd->fifo_list[READ]);
|
||||
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
||||
@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
|
||||
dd->writes_starved = writes_starved;
|
||||
dd->front_merges = 1;
|
||||
dd->fifo_batch = fifo_batch;
|
||||
return dd;
|
||||
|
||||
q->elevator->elevator_data = dd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
121
block/elevator.c
121
block/elevator.c
@ -38,6 +38,7 @@
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
static DEFINE_SPINLOCK(elv_list_lock);
|
||||
static LIST_HEAD(elv_list);
|
||||
@ -121,15 +122,6 @@ static struct elevator_type *elevator_get(const char *name)
|
||||
return e;
|
||||
}
|
||||
|
||||
static int elevator_init_queue(struct request_queue *q,
|
||||
struct elevator_queue *eq)
|
||||
{
|
||||
eq->elevator_data = eq->type->ops.elevator_init_fn(q);
|
||||
if (eq->elevator_data)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static char chosen_elevator[ELV_NAME_MAX];
|
||||
|
||||
static int __init elevator_setup(char *str)
|
||||
@ -188,7 +180,6 @@ static void elevator_release(struct kobject *kobj)
|
||||
int elevator_init(struct request_queue *q, char *name)
|
||||
{
|
||||
struct elevator_type *e = NULL;
|
||||
struct elevator_queue *eq;
|
||||
int err;
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
@ -222,17 +213,16 @@ int elevator_init(struct request_queue *q, char *name)
|
||||
}
|
||||
}
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
q->elevator = elevator_alloc(q, e);
|
||||
if (!q->elevator)
|
||||
return -ENOMEM;
|
||||
|
||||
err = elevator_init_queue(q, eq);
|
||||
err = e->ops.elevator_init_fn(q);
|
||||
if (err) {
|
||||
kobject_put(&eq->kobj);
|
||||
kobject_put(&q->elevator->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
q->elevator = eq;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_init);
|
||||
@ -564,25 +554,6 @@ void elv_drain_elevator(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
void elv_quiesce_start(struct request_queue *q)
|
||||
{
|
||||
if (!q->elevator)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
blk_drain_queue(q, false);
|
||||
}
|
||||
|
||||
void elv_quiesce_end(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
{
|
||||
trace_block_rq_insert(q, rq);
|
||||
@ -692,12 +663,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.elevator_set_req_fn)
|
||||
return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
|
||||
return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -801,8 +773,9 @@ static struct kobj_type elv_ktype = {
|
||||
.release = elevator_release,
|
||||
};
|
||||
|
||||
int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
int error;
|
||||
|
||||
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
|
||||
@ -820,11 +793,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
{
|
||||
return __elv_register_queue(q, q->elevator);
|
||||
}
|
||||
EXPORT_SYMBOL(elv_register_queue);
|
||||
|
||||
void elv_unregister_queue(struct request_queue *q)
|
||||
@ -907,53 +875,60 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
*/
|
||||
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
{
|
||||
struct elevator_queue *old_elevator, *e;
|
||||
struct elevator_queue *old = q->elevator;
|
||||
bool registered = old->registered;
|
||||
int err;
|
||||
|
||||
/* allocate new elevator */
|
||||
e = elevator_alloc(q, new_e);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data.
|
||||
* Block layer doesn't call into a quiesced elevator - all requests
|
||||
* are directly put on the dispatch list without elevator data
|
||||
* using INSERT_BACK. All requests have SOFTBARRIER set and no
|
||||
* merge happens either.
|
||||
*/
|
||||
blk_queue_bypass_start(q);
|
||||
|
||||
err = elevator_init_queue(q, e);
|
||||
/* unregister and clear all auxiliary data of the old elevator */
|
||||
if (registered)
|
||||
elv_unregister_queue(q);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/* allocate, init and register new elevator */
|
||||
err = -ENOMEM;
|
||||
q->elevator = elevator_alloc(q, new_e);
|
||||
if (!q->elevator)
|
||||
goto fail_init;
|
||||
|
||||
err = new_e->ops.elevator_init_fn(q);
|
||||
if (err) {
|
||||
kobject_put(&e->kobj);
|
||||
return err;
|
||||
kobject_put(&q->elevator->kobj);
|
||||
goto fail_init;
|
||||
}
|
||||
|
||||
/* turn on BYPASS and drain all requests w/ elevator private data */
|
||||
elv_quiesce_start(q);
|
||||
|
||||
/* unregister old queue, register new one and kill old elevator */
|
||||
if (q->elevator->registered) {
|
||||
elv_unregister_queue(q);
|
||||
err = __elv_register_queue(q, e);
|
||||
if (registered) {
|
||||
err = elv_register_queue(q);
|
||||
if (err)
|
||||
goto fail_register;
|
||||
}
|
||||
|
||||
/* done, clear io_cq's, switch elevators and turn off BYPASS */
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
old_elevator = q->elevator;
|
||||
q->elevator = e;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
/* done, kill the old one and finish */
|
||||
elevator_exit(old);
|
||||
blk_queue_bypass_end(q);
|
||||
|
||||
elevator_exit(old_elevator);
|
||||
elv_quiesce_end(q);
|
||||
|
||||
blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
|
||||
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_register:
|
||||
/*
|
||||
* switch failed, exit the new io scheduler and reattach the old
|
||||
* one again (along with re-adding the sysfs dir)
|
||||
*/
|
||||
elevator_exit(e);
|
||||
elevator_exit(q->elevator);
|
||||
fail_init:
|
||||
/* switch failed, restore and re-register old elevator */
|
||||
q->elevator = old;
|
||||
elv_register_queue(q);
|
||||
elv_quiesce_end(q);
|
||||
blk_queue_bypass_end(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
|
||||
return list_entry(rq->queuelist.next, struct request, queuelist);
|
||||
}
|
||||
|
||||
static void *noop_init_queue(struct request_queue *q)
|
||||
static int noop_init_queue(struct request_queue *q)
|
||||
{
|
||||
struct noop_data *nd;
|
||||
|
||||
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
|
||||
if (!nd)
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&nd->queue);
|
||||
return nd;
|
||||
q->elevator->elevator_data = nd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void noop_exit_queue(struct elevator_queue *e)
|
||||
|
61
fs/bio.c
61
fs/bio.c
@ -19,12 +19,14 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/iocontext.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include <trace/events/block.h>
|
||||
@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
|
||||
* last put frees it
|
||||
*/
|
||||
if (atomic_dec_and_test(&bio->bi_cnt)) {
|
||||
bio_disassociate_task(bio);
|
||||
bio->bi_next = NULL;
|
||||
bio->bi_destructor(bio);
|
||||
}
|
||||
@ -1646,6 +1649,64 @@ bad:
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/**
|
||||
* bio_associate_current - associate a bio with %current
|
||||
* @bio: target bio
|
||||
*
|
||||
* Associate @bio with %current if it hasn't been associated yet. Block
|
||||
* layer will treat @bio as if it were issued by %current no matter which
|
||||
* task actually issues it.
|
||||
*
|
||||
* This function takes an extra reference of @task's io_context and blkcg
|
||||
* which will be put when @bio is released. The caller must own @bio,
|
||||
* ensure %current->io_context exists, and is responsible for synchronizing
|
||||
* calls to this function.
|
||||
*/
|
||||
int bio_associate_current(struct bio *bio)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
if (bio->bi_ioc)
|
||||
return -EBUSY;
|
||||
|
||||
ioc = current->io_context;
|
||||
if (!ioc)
|
||||
return -ENOENT;
|
||||
|
||||
/* acquire active ref on @ioc and associate */
|
||||
get_io_context_active(ioc);
|
||||
bio->bi_ioc = ioc;
|
||||
|
||||
/* associate blkcg if exists */
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(current, blkio_subsys_id);
|
||||
if (css && css_tryget(css))
|
||||
bio->bi_css = css;
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_disassociate_task - undo bio_associate_current()
|
||||
* @bio: target bio
|
||||
*/
|
||||
void bio_disassociate_task(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_ioc) {
|
||||
put_io_context(bio->bi_ioc);
|
||||
bio->bi_ioc = NULL;
|
||||
}
|
||||
if (bio->bi_css) {
|
||||
css_put(bio->bi_css);
|
||||
bio->bi_css = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
static void __init biovec_init_slabs(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
|
||||
|
||||
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
|
||||
if (ioc) {
|
||||
ioc_ioprio_changed(ioc, ioprio);
|
||||
ioc->ioprio = ioprio;
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
|
||||
*/
|
||||
static int get_iovec_page_array(const struct iovec __user *iov,
|
||||
unsigned int nr_vecs, struct page **pages,
|
||||
struct partial_page *partial, int aligned,
|
||||
struct partial_page *partial, bool aligned,
|
||||
unsigned int pipe_buffers)
|
||||
{
|
||||
int buffers = 0, error = 0;
|
||||
@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
|
||||
return -ENOMEM;
|
||||
|
||||
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
|
||||
spd.partial, flags & SPLICE_F_GIFT,
|
||||
spd.partial, false,
|
||||
pipe->buffers);
|
||||
if (spd.nr_pages <= 0)
|
||||
ret = spd.nr_pages;
|
||||
|
@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
|
||||
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_current(struct bio *bio);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
|
||||
static inline void bio_disassociate_task(struct bio *bio) { }
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
/*
|
||||
* bio_set is used to allow other portions of the IO system to
|
||||
* allocate their own private memory pools for bio and iovec structures.
|
||||
|
@ -14,6 +14,8 @@ struct bio;
|
||||
struct bio_integrity_payload;
|
||||
struct page;
|
||||
struct block_device;
|
||||
struct io_context;
|
||||
struct cgroup_subsys_state;
|
||||
typedef void (bio_end_io_t) (struct bio *, int);
|
||||
typedef void (bio_destructor_t) (struct bio *);
|
||||
|
||||
@ -66,6 +68,14 @@ struct bio {
|
||||
bio_end_io_t *bi_end_io;
|
||||
|
||||
void *bi_private;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
* Optional ioc and css associated with this bio. Put on bio
|
||||
* release. Read comment on top of bio_associate_current().
|
||||
*/
|
||||
struct io_context *bi_ioc;
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
#endif
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
||||
#endif
|
||||
|
@ -32,10 +32,17 @@ struct blk_trace;
|
||||
struct request;
|
||||
struct sg_io_hdr;
|
||||
struct bsg_job;
|
||||
struct blkcg_gq;
|
||||
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
|
||||
/*
|
||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||
* Defined here to simplify include dependency.
|
||||
*/
|
||||
#define BLKCG_MAX_POLS 2
|
||||
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
@ -363,6 +370,11 @@ struct request_queue {
|
||||
struct list_head timeout_list;
|
||||
|
||||
struct list_head icq_list;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||
struct blkcg_gq *root_blkg;
|
||||
struct list_head blkg_list;
|
||||
#endif
|
||||
|
||||
struct queue_limits limits;
|
||||
|
||||
@ -390,12 +402,17 @@ struct request_queue {
|
||||
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
int bypass_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
int bsg_job_size;
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct list_head all_q_node;
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
/* Throttle data */
|
||||
struct throtl_data *td;
|
||||
@ -407,7 +424,7 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
||||
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
||||
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
|
||||
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
|
||||
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
|
||||
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
|
||||
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
|
||||
@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
||||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
|
||||
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_noxmerges(q) \
|
||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||
|
@ -28,12 +28,13 @@ typedef int (elevator_may_queue_fn) (struct request_queue *, int);
|
||||
|
||||
typedef void (elevator_init_icq_fn) (struct io_cq *);
|
||||
typedef void (elevator_exit_icq_fn) (struct io_cq *);
|
||||
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
|
||||
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
|
||||
struct bio *, gfp_t);
|
||||
typedef void (elevator_put_req_fn) (struct request *);
|
||||
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
|
||||
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
||||
|
||||
typedef void *(elevator_init_fn) (struct request_queue *);
|
||||
typedef int (elevator_init_fn) (struct request_queue *);
|
||||
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
||||
|
||||
struct elevator_ops
|
||||
@ -129,7 +130,8 @@ extern void elv_unregister_queue(struct request_queue *q);
|
||||
extern int elv_may_queue(struct request_queue *, int);
|
||||
extern void elv_abort_queue(struct request_queue *);
|
||||
extern void elv_completed_request(struct request_queue *, struct request *);
|
||||
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
|
||||
extern int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask);
|
||||
extern void elv_put_request(struct request_queue *, struct request *);
|
||||
extern void elv_drain_elevator(struct request_queue *);
|
||||
|
||||
|
@ -6,11 +6,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
enum {
|
||||
ICQ_IOPRIO_CHANGED = 1 << 0,
|
||||
ICQ_CGROUP_CHANGED = 1 << 1,
|
||||
ICQ_EXITED = 1 << 2,
|
||||
|
||||
ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -100,6 +96,7 @@ struct io_cq {
|
||||
*/
|
||||
struct io_context {
|
||||
atomic_long_t refcount;
|
||||
atomic_t active_ref;
|
||||
atomic_t nr_tasks;
|
||||
|
||||
/* all the fields below are protected by this lock */
|
||||
@ -120,29 +117,37 @@ struct io_context {
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
||||
static inline struct io_context *ioc_task_link(struct io_context *ioc)
|
||||
/**
|
||||
* get_io_context_active - get active reference on ioc
|
||||
* @ioc: ioc of interest
|
||||
*
|
||||
* Only iocs with active reference can issue new IOs. This function
|
||||
* acquires an active reference on @ioc. The caller must already have an
|
||||
* active reference on @ioc.
|
||||
*/
|
||||
static inline void get_io_context_active(struct io_context *ioc)
|
||||
{
|
||||
/*
|
||||
* if ref count is zero, don't allow sharing (ioc is going away, it's
|
||||
* a race).
|
||||
*/
|
||||
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
|
||||
atomic_inc(&ioc->nr_tasks);
|
||||
return ioc;
|
||||
}
|
||||
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
|
||||
WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
|
||||
atomic_long_inc(&ioc->refcount);
|
||||
atomic_inc(&ioc->active_ref);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
static inline void ioc_task_link(struct io_context *ioc)
|
||||
{
|
||||
get_io_context_active(ioc);
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
|
||||
atomic_inc(&ioc->nr_tasks);
|
||||
}
|
||||
|
||||
struct task_struct;
|
||||
#ifdef CONFIG_BLOCK
|
||||
void put_io_context(struct io_context *ioc);
|
||||
void put_io_context_active(struct io_context *ioc);
|
||||
void exit_io_context(struct task_struct *task);
|
||||
struct io_context *get_task_io_context(struct task_struct *task,
|
||||
gfp_t gfp_flags, int node);
|
||||
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
|
||||
void ioc_cgroup_changed(struct io_context *ioc);
|
||||
unsigned int icq_get_changed(struct io_cq *icq);
|
||||
#else
|
||||
struct io_context;
|
||||
static inline void put_io_context(struct io_context *ioc) { }
|
||||
|
@ -41,27 +41,15 @@ enum {
|
||||
IOPRIO_WHO_USER,
|
||||
};
|
||||
|
||||
/*
|
||||
* Fallback BE priority
|
||||
*/
|
||||
#define IOPRIO_NORM (4)
|
||||
|
||||
/*
|
||||
* if process has set io priority explicitly, use that. if not, convert
|
||||
* the cpu scheduler nice value to an io priority
|
||||
*/
|
||||
#define IOPRIO_NORM (4)
|
||||
static inline int task_ioprio(struct io_context *ioc)
|
||||
{
|
||||
if (ioprio_valid(ioc->ioprio))
|
||||
return IOPRIO_PRIO_DATA(ioc->ioprio);
|
||||
|
||||
return IOPRIO_NORM;
|
||||
}
|
||||
|
||||
static inline int task_ioprio_class(struct io_context *ioc)
|
||||
{
|
||||
if (ioprio_valid(ioc->ioprio))
|
||||
return IOPRIO_PRIO_CLASS(ioc->ioprio);
|
||||
|
||||
return IOPRIO_CLASS_BE;
|
||||
}
|
||||
|
||||
static inline int task_nice_ioprio(struct task_struct *task)
|
||||
{
|
||||
return (task_nice(task) + 20) / 5;
|
||||
|
@ -803,7 +803,7 @@ config RT_GROUP_SCHED
|
||||
endif #CGROUP_SCHED
|
||||
|
||||
config BLK_CGROUP
|
||||
tristate "Block IO controller"
|
||||
bool "Block IO controller"
|
||||
depends on BLOCK
|
||||
default n
|
||||
---help---
|
||||
|
@ -976,9 +976,8 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
* Share io context with parent, if CLONE_IO is set
|
||||
*/
|
||||
if (clone_flags & CLONE_IO) {
|
||||
tsk->io_context = ioc_task_link(ioc);
|
||||
if (unlikely(!tsk->io_context))
|
||||
return -ENOMEM;
|
||||
ioc_task_link(ioc);
|
||||
tsk->io_context = ioc;
|
||||
} else if (ioprio_valid(ioc->ioprio)) {
|
||||
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
|
||||
if (unlikely(!new_ioc))
|
||||
|
Loading…
Reference in New Issue
Block a user