Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_PRIVATE_H
3#define _BLK_CGROUP_PRIVATE_H
4/*
5 * block cgroup private header
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/blk-cgroup.h>
18#include <linux/cgroup.h>
19#include <linux/kthread.h>
20#include <linux/blk-mq.h>
21#include <linux/llist.h>
22
23struct blkcg_gq;
24struct blkg_policy_data;
25
26
27/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
28#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
29
30#ifdef CONFIG_BLK_CGROUP
31
32enum blkg_iostat_type {
33 BLKG_IOSTAT_READ,
34 BLKG_IOSTAT_WRITE,
35 BLKG_IOSTAT_DISCARD,
36
37 BLKG_IOSTAT_NR,
38};
39
40struct blkg_iostat {
41 u64 bytes[BLKG_IOSTAT_NR];
42 u64 ios[BLKG_IOSTAT_NR];
43};
44
45struct blkg_iostat_set {
46 struct u64_stats_sync sync;
47 struct blkcg_gq *blkg;
48 struct llist_node lnode;
49 int lqueued; /* queued in llist */
50 struct blkg_iostat cur;
51 struct blkg_iostat last;
52};
53
54/* association between a blk cgroup and a request queue */
55struct blkcg_gq {
56 /* Pointer to the associated request_queue */
57 struct request_queue *q;
58 struct list_head q_node;
59 struct hlist_node blkcg_node;
60 struct blkcg *blkcg;
61
62 /* all non-root blkcg_gq's are guaranteed to have access to parent */
63 struct blkcg_gq *parent;
64
65 /* reference count */
66 struct percpu_ref refcnt;
67
68 /* is this blkg online? protected by both blkcg and q locks */
69 bool online;
70
71 struct blkg_iostat_set __percpu *iostat_cpu;
72 struct blkg_iostat_set iostat;
73
74 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
75
76 spinlock_t async_bio_lock;
77 struct bio_list async_bios;
78 union {
79 struct work_struct async_bio_work;
80 struct work_struct free_work;
81 };
82
83 atomic_t use_delay;
84 atomic64_t delay_nsec;
85 atomic64_t delay_start;
86 u64 last_delay;
87 int last_use;
88
89 struct rcu_head rcu_head;
90};
91
92struct blkcg {
93 struct cgroup_subsys_state css;
94 spinlock_t lock;
95 refcount_t online_pin;
96
97 struct radix_tree_root blkg_tree;
98 struct blkcg_gq __rcu *blkg_hint;
99 struct hlist_head blkg_list;
100
101 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
102
103 struct list_head all_blkcgs_node;
104
105 /*
106 * List of updated percpu blkg_iostat_set's since the last flush.
107 */
108 struct llist_head __percpu *lhead;
109
110#ifdef CONFIG_BLK_CGROUP_FC_APPID
111 char fc_app_id[FC_APPID_LEN];
112#endif
113#ifdef CONFIG_CGROUP_WRITEBACK
114 struct list_head cgwb_list;
115#endif
116};
117
118static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
119{
120 return css ? container_of(css, struct blkcg, css) : NULL;
121}
122
123/*
124 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
125 * request_queue (q). This is used by blkcg policies which need to track
126 * information per blkcg - q pair.
127 *
128 * There can be multiple active blkcg policies and each blkg:policy pair is
129 * represented by a blkg_policy_data which is allocated and freed by each
130 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
131 * area by allocating larger data structure which embeds blkg_policy_data
132 * at the beginning.
133 */
134struct blkg_policy_data {
135 /* the blkg and policy id this per-policy data belongs to */
136 struct blkcg_gq *blkg;
137 int plid;
138};
139
140/*
141 * Policies that need to keep per-blkcg data which is independent from any
142 * request_queue associated to it should implement cpd_alloc/free_fn()
143 * methods. A policy can allocate private data area by allocating larger
144 * data structure which embeds blkcg_policy_data at the beginning.
145 * cpd_init() is invoked to let each policy handle per-blkcg data.
146 */
147struct blkcg_policy_data {
148 /* the blkcg and policy id this per-policy data belongs to */
149 struct blkcg *blkcg;
150 int plid;
151};
152
153typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
154typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
155typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
156typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
157typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
158 struct request_queue *q, struct blkcg *blkcg);
159typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
160typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
161typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
162typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
163typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
164typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
165 struct seq_file *s);
166
167struct blkcg_policy {
168 int plid;
169 /* cgroup files for the policy */
170 struct cftype *dfl_cftypes;
171 struct cftype *legacy_cftypes;
172
173 /* operations */
174 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
175 blkcg_pol_init_cpd_fn *cpd_init_fn;
176 blkcg_pol_free_cpd_fn *cpd_free_fn;
177 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
178
179 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
180 blkcg_pol_init_pd_fn *pd_init_fn;
181 blkcg_pol_online_pd_fn *pd_online_fn;
182 blkcg_pol_offline_pd_fn *pd_offline_fn;
183 blkcg_pol_free_pd_fn *pd_free_fn;
184 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
185 blkcg_pol_stat_pd_fn *pd_stat_fn;
186};
187
188extern struct blkcg blkcg_root;
189extern bool blkcg_debug_stats;
190
191int blkcg_init_disk(struct gendisk *disk);
192void blkcg_exit_disk(struct gendisk *disk);
193
194/* Blkio controller policy registration */
195int blkcg_policy_register(struct blkcg_policy *pol);
196void blkcg_policy_unregister(struct blkcg_policy *pol);
197int blkcg_activate_policy(struct request_queue *q,
198 const struct blkcg_policy *pol);
199void blkcg_deactivate_policy(struct request_queue *q,
200 const struct blkcg_policy *pol);
201
202const char *blkg_dev_name(struct blkcg_gq *blkg);
203void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
204 u64 (*prfill)(struct seq_file *,
205 struct blkg_policy_data *, int),
206 const struct blkcg_policy *pol, int data,
207 bool show_total);
208u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
209
210struct blkg_conf_ctx {
211 struct block_device *bdev;
212 struct blkcg_gq *blkg;
213 char *body;
214};
215
216struct block_device *blkcg_conf_open_bdev(char **inputp);
217int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
218 char *input, struct blkg_conf_ctx *ctx);
219void blkg_conf_finish(struct blkg_conf_ctx *ctx);
220
221/**
222 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
223 * @return: true if this bio needs to be submitted with the root blkg context.
224 *
225 * In order to avoid priority inversions we sometimes need to issue a bio as if
226 * it were attached to the root blkg, and then backcharge to the actual owning
227 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for
228 * the bio and attach the appropriate blkg to the bio. Then we call this helper
229 * and if it is true run with the root blkg for that queue and then do any
230 * backcharging to the originating cgroup once the io is complete.
231 */
232static inline bool bio_issue_as_root_blkg(struct bio *bio)
233{
234 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
235}
236
237/**
238 * blkg_lookup - lookup blkg for the specified blkcg - q pair
239 * @blkcg: blkcg of interest
240 * @q: request_queue of interest
241 *
242 * Lookup blkg for the @blkcg - @q pair.
243
244 * Must be called in a RCU critical section.
245 */
246static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
247 struct request_queue *q)
248{
249 struct blkcg_gq *blkg;
250
251 WARN_ON_ONCE(!rcu_read_lock_held());
252
253 if (blkcg == &blkcg_root)
254 return q->root_blkg;
255
256 blkg = rcu_dereference(blkcg->blkg_hint);
257 if (blkg && blkg->q == q)
258 return blkg;
259
260 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
261 if (blkg && blkg->q != q)
262 blkg = NULL;
263 return blkg;
264}
265
266/**
267 * blkg_to_pdata - get policy private data
268 * @blkg: blkg of interest
269 * @pol: policy of interest
270 *
271 * Return pointer to private data associated with the @blkg-@pol pair.
272 */
273static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
274 struct blkcg_policy *pol)
275{
276 return blkg ? blkg->pd[pol->plid] : NULL;
277}
278
279static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
280 struct blkcg_policy *pol)
281{
282 return blkcg ? blkcg->cpd[pol->plid] : NULL;
283}
284
285/**
286 * pdata_to_blkg - get blkg associated with policy private data
287 * @pd: policy private data of interest
288 *
289 * @pd is policy private data. Determine the blkg it's associated with.
290 */
291static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
292{
293 return pd ? pd->blkg : NULL;
294}
295
296static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
297{
298 return cpd ? cpd->blkcg : NULL;
299}
300
301/**
302 * blkg_path - format cgroup path of blkg
303 * @blkg: blkg of interest
304 * @buf: target buffer
305 * @buflen: target buffer length
306 *
307 * Format the path of the cgroup of @blkg into @buf.
308 */
309static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
310{
311 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
312}
313
314/**
315 * blkg_get - get a blkg reference
316 * @blkg: blkg to get
317 *
318 * The caller should be holding an existing reference.
319 */
320static inline void blkg_get(struct blkcg_gq *blkg)
321{
322 percpu_ref_get(&blkg->refcnt);
323}
324
325/**
326 * blkg_tryget - try and get a blkg reference
327 * @blkg: blkg to get
328 *
329 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
330 * of freeing this blkg, so we can only use it if the refcnt is not zero.
331 */
332static inline bool blkg_tryget(struct blkcg_gq *blkg)
333{
334 return blkg && percpu_ref_tryget(&blkg->refcnt);
335}
336
337/**
338 * blkg_put - put a blkg reference
339 * @blkg: blkg to put
340 */
341static inline void blkg_put(struct blkcg_gq *blkg)
342{
343 percpu_ref_put(&blkg->refcnt);
344}
345
346/**
347 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
348 * @d_blkg: loop cursor pointing to the current descendant
349 * @pos_css: used for iteration
350 * @p_blkg: target blkg to walk descendants of
351 *
352 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
353 * read locked. If called under either blkcg or queue lock, the iteration
354 * is guaranteed to include all and only online blkgs. The caller may
355 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
356 * @p_blkg is included in the iteration and the first node to be visited.
357 */
358#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
359 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
360 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
361 (p_blkg)->q)))
362
363/**
364 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
365 * @d_blkg: loop cursor pointing to the current descendant
366 * @pos_css: used for iteration
367 * @p_blkg: target blkg to walk descendants of
368 *
369 * Similar to blkg_for_each_descendant_pre() but performs post-order
370 * traversal instead. Synchronization rules are the same. @p_blkg is
371 * included in the iteration and the last node to be visited.
372 */
373#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
374 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
375 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
376 (p_blkg)->q)))
377
378bool __blkcg_punt_bio_submit(struct bio *bio);
379
380static inline bool blkcg_punt_bio_submit(struct bio *bio)
381{
382 if (bio->bi_opf & REQ_CGROUP_PUNT)
383 return __blkcg_punt_bio_submit(bio);
384 else
385 return false;
386}
387
388static inline void blkcg_bio_issue_init(struct bio *bio)
389{
390 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
391}
392
393static inline void blkcg_use_delay(struct blkcg_gq *blkg)
394{
395 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
396 return;
397 if (atomic_add_return(1, &blkg->use_delay) == 1)
398 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
399}
400
401static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
402{
403 int old = atomic_read(&blkg->use_delay);
404
405 if (WARN_ON_ONCE(old < 0))
406 return 0;
407 if (old == 0)
408 return 0;
409
410 /*
411 * We do this song and dance because we can race with somebody else
412 * adding or removing delay. If we just did an atomic_dec we'd end up
413 * negative and we'd already be in trouble. We need to subtract 1 and
414 * then check to see if we were the last delay so we can drop the
415 * congestion count on the cgroup.
416 */
417 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
418 ;
419
420 if (old == 0)
421 return 0;
422 if (old == 1)
423 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
424 return 1;
425}
426
427/**
428 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
429 * @blkg: target blkg
430 * @delay: delay duration in nsecs
431 *
432 * When enabled with this function, the delay is not decayed and must be
433 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
434 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
435 */
436static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
437{
438 int old = atomic_read(&blkg->use_delay);
439
440 /* We only want 1 person setting the congestion count for this blkg. */
441 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
442 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
443
444 atomic64_set(&blkg->delay_nsec, delay);
445}
446
447/**
448 * blkcg_clear_delay - Disable allocator delay mechanism
449 * @blkg: target blkg
450 *
451 * Disable use_delay mechanism. See blkcg_set_delay().
452 */
453static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
454{
455 int old = atomic_read(&blkg->use_delay);
456
457 /* We only want 1 person clearing the congestion count for this blkg. */
458 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
459 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
460}
461
462/**
463 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
464 * @rq: request to merge into
465 * @bio: bio to merge
466 *
467 * @bio and @rq should belong to the same cgroup and their issue_as_root should
468 * match. The latter is necessary as we don't want to throttle e.g. a metadata
469 * update because it happens to be next to a regular IO.
470 */
471static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
472{
473 return rq->bio->bi_blkg == bio->bi_blkg &&
474 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
475}
476
477void blk_cgroup_bio_start(struct bio *bio);
478void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
479#else /* CONFIG_BLK_CGROUP */
480
481struct blkg_policy_data {
482};
483
484struct blkcg_policy_data {
485};
486
487struct blkcg_policy {
488};
489
490struct blkcg {
491};
492
493static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
494static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
495static inline void blkcg_exit_disk(struct gendisk *disk) { }
496static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
497static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
498static inline int blkcg_activate_policy(struct request_queue *q,
499 const struct blkcg_policy *pol) { return 0; }
500static inline void blkcg_deactivate_policy(struct request_queue *q,
501 const struct blkcg_policy *pol) { }
502
503static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
504 struct blkcg_policy *pol) { return NULL; }
505static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
506static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
507static inline void blkg_get(struct blkcg_gq *blkg) { }
508static inline void blkg_put(struct blkcg_gq *blkg) { }
509
510static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
511static inline void blkcg_bio_issue_init(struct bio *bio) { }
512static inline void blk_cgroup_bio_start(struct bio *bio) { }
513static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
514
515#define blk_queue_for_each_rl(rl, q) \
516 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
517
518#endif /* CONFIG_BLK_CGROUP */
519
520#endif /* _BLK_CGROUP_PRIVATE_H */
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
18
19enum blkio_policy_id {
20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
21 BLKIO_POLICY_THROTL, /* Throttling */
22};
23
24/* Max limits for throttle policy */
25#define THROTL_IOPS_MAX UINT_MAX
26
27#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
28
29#ifndef CONFIG_BLK_CGROUP
30/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
31extern struct cgroup_subsys blkio_subsys;
32#define blkio_subsys_id blkio_subsys.subsys_id
33#endif
34
35enum stat_type {
36 /* Total time spent (in ns) between request dispatch to the driver and
37 * request completion for IOs doen by this cgroup. This may not be
38 * accurate when NCQ is turned on. */
39 BLKIO_STAT_SERVICE_TIME = 0,
40 /* Total time spent waiting in scheduler queue in ns */
41 BLKIO_STAT_WAIT_TIME,
42 /* Number of IOs queued up */
43 BLKIO_STAT_QUEUED,
44 /* All the single valued stats go below this */
45 BLKIO_STAT_TIME,
46#ifdef CONFIG_DEBUG_BLK_CGROUP
47 /* Time not charged to this cgroup */
48 BLKIO_STAT_UNACCOUNTED_TIME,
49 BLKIO_STAT_AVG_QUEUE_SIZE,
50 BLKIO_STAT_IDLE_TIME,
51 BLKIO_STAT_EMPTY_TIME,
52 BLKIO_STAT_GROUP_WAIT_TIME,
53 BLKIO_STAT_DEQUEUE
54#endif
55};
56
57/* Per cpu stats */
58enum stat_type_cpu {
59 BLKIO_STAT_CPU_SECTORS,
60 /* Total bytes transferred */
61 BLKIO_STAT_CPU_SERVICE_BYTES,
62 /* Total IOs serviced, post merge */
63 BLKIO_STAT_CPU_SERVICED,
64 /* Number of IOs merged */
65 BLKIO_STAT_CPU_MERGED,
66 BLKIO_STAT_CPU_NR
67};
68
69enum stat_sub_type {
70 BLKIO_STAT_READ = 0,
71 BLKIO_STAT_WRITE,
72 BLKIO_STAT_SYNC,
73 BLKIO_STAT_ASYNC,
74 BLKIO_STAT_TOTAL
75};
76
77/* blkg state flags */
78enum blkg_state_flags {
79 BLKG_waiting = 0,
80 BLKG_idling,
81 BLKG_empty,
82};
83
84/* cgroup files owned by proportional weight policy */
85enum blkcg_file_name_prop {
86 BLKIO_PROP_weight = 1,
87 BLKIO_PROP_weight_device,
88 BLKIO_PROP_io_service_bytes,
89 BLKIO_PROP_io_serviced,
90 BLKIO_PROP_time,
91 BLKIO_PROP_sectors,
92 BLKIO_PROP_unaccounted_time,
93 BLKIO_PROP_io_service_time,
94 BLKIO_PROP_io_wait_time,
95 BLKIO_PROP_io_merged,
96 BLKIO_PROP_io_queued,
97 BLKIO_PROP_avg_queue_size,
98 BLKIO_PROP_group_wait_time,
99 BLKIO_PROP_idle_time,
100 BLKIO_PROP_empty_time,
101 BLKIO_PROP_dequeue,
102};
103
104/* cgroup files owned by throttle policy */
105enum blkcg_file_name_throtl {
106 BLKIO_THROTL_read_bps_device,
107 BLKIO_THROTL_write_bps_device,
108 BLKIO_THROTL_read_iops_device,
109 BLKIO_THROTL_write_iops_device,
110 BLKIO_THROTL_io_service_bytes,
111 BLKIO_THROTL_io_serviced,
112};
113
114struct blkio_cgroup {
115 struct cgroup_subsys_state css;
116 unsigned int weight;
117 spinlock_t lock;
118 struct hlist_head blkg_list;
119 struct list_head policy_list; /* list of blkio_policy_node */
120};
121
122struct blkio_group_stats {
123 /* total disk time and nr sectors dispatched by this group */
124 uint64_t time;
125 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
126#ifdef CONFIG_DEBUG_BLK_CGROUP
127 /* Time not charged to this cgroup */
128 uint64_t unaccounted_time;
129
130 /* Sum of number of IOs queued across all samples */
131 uint64_t avg_queue_size_sum;
132 /* Count of samples taken for average */
133 uint64_t avg_queue_size_samples;
134 /* How many times this group has been removed from service tree */
135 unsigned long dequeue;
136
137 /* Total time spent waiting for it to be assigned a timeslice. */
138 uint64_t group_wait_time;
139 uint64_t start_group_wait_time;
140
141 /* Time spent idling for this blkio_group */
142 uint64_t idle_time;
143 uint64_t start_idle_time;
144 /*
145 * Total time when we have requests queued and do not contain the
146 * current active queue.
147 */
148 uint64_t empty_time;
149 uint64_t start_empty_time;
150 uint16_t flags;
151#endif
152};
153
154/* Per cpu blkio group stats */
155struct blkio_group_stats_cpu {
156 uint64_t sectors;
157 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
158 struct u64_stats_sync syncp;
159};
160
161struct blkio_group {
162 /* An rcu protected unique identifier for the group */
163 void *key;
164 struct hlist_node blkcg_node;
165 unsigned short blkcg_id;
166 /* Store cgroup path */
167 char path[128];
168 /* The device MKDEV(major, minor), this group has been created for */
169 dev_t dev;
170 /* policy which owns this blk group */
171 enum blkio_policy_id plid;
172
173 /* Need to serialize the stats in the case of reset/update */
174 spinlock_t stats_lock;
175 struct blkio_group_stats stats;
176 /* Per cpu stats pointer */
177 struct blkio_group_stats_cpu __percpu *stats_cpu;
178};
179
180struct blkio_policy_node {
181 struct list_head node;
182 dev_t dev;
183 /* This node belongs to max bw policy or porportional weight policy */
184 enum blkio_policy_id plid;
185 /* cgroup file to which this rule belongs to */
186 int fileid;
187
188 union {
189 unsigned int weight;
190 /*
191 * Rate read/write in terms of byptes per second
192 * Whether this rate represents read or write is determined
193 * by file type "fileid".
194 */
195 u64 bps;
196 unsigned int iops;
197 } val;
198};
199
200extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
201 dev_t dev);
202extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
203 dev_t dev);
204extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
205 dev_t dev);
206extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
207 dev_t dev);
208extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
209 dev_t dev);
210
211typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
212
213typedef void (blkio_update_group_weight_fn) (void *key,
214 struct blkio_group *blkg, unsigned int weight);
215typedef void (blkio_update_group_read_bps_fn) (void * key,
216 struct blkio_group *blkg, u64 read_bps);
217typedef void (blkio_update_group_write_bps_fn) (void *key,
218 struct blkio_group *blkg, u64 write_bps);
219typedef void (blkio_update_group_read_iops_fn) (void *key,
220 struct blkio_group *blkg, unsigned int read_iops);
221typedef void (blkio_update_group_write_iops_fn) (void *key,
222 struct blkio_group *blkg, unsigned int write_iops);
223
224struct blkio_policy_ops {
225 blkio_unlink_group_fn *blkio_unlink_group_fn;
226 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
227 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
228 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
229 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
230 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
231};
232
233struct blkio_policy_type {
234 struct list_head list;
235 struct blkio_policy_ops ops;
236 enum blkio_policy_id plid;
237};
238
239/* Blkio controller policy registration */
240extern void blkio_policy_register(struct blkio_policy_type *);
241extern void blkio_policy_unregister(struct blkio_policy_type *);
242
243static inline char *blkg_path(struct blkio_group *blkg)
244{
245 return blkg->path;
246}
247
248#else
249
250struct blkio_group {
251};
252
253struct blkio_policy_type {
254};
255
256static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
257static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
258
259static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
260
261#endif
262
263#define BLKIO_WEIGHT_MIN 10
264#define BLKIO_WEIGHT_MAX 1000
265#define BLKIO_WEIGHT_DEFAULT 500
266
267#ifdef CONFIG_DEBUG_BLK_CGROUP
268void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
269void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
270 unsigned long dequeue);
271void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
272void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
273void blkiocg_set_start_empty_time(struct blkio_group *blkg);
274
275#define BLKG_FLAG_FNS(name) \
276static inline void blkio_mark_blkg_##name( \
277 struct blkio_group_stats *stats) \
278{ \
279 stats->flags |= (1 << BLKG_##name); \
280} \
281static inline void blkio_clear_blkg_##name( \
282 struct blkio_group_stats *stats) \
283{ \
284 stats->flags &= ~(1 << BLKG_##name); \
285} \
286static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
287{ \
288 return (stats->flags & (1 << BLKG_##name)) != 0; \
289} \
290
291BLKG_FLAG_FNS(waiting)
292BLKG_FLAG_FNS(idling)
293BLKG_FLAG_FNS(empty)
294#undef BLKG_FLAG_FNS
295#else
296static inline void blkiocg_update_avg_queue_size_stats(
297 struct blkio_group *blkg) {}
298static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
299 unsigned long dequeue) {}
300static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
301{}
302static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
303static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
304#endif
305
306#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
307extern struct blkio_cgroup blkio_root_cgroup;
308extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
309extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
310extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
311 struct blkio_group *blkg, void *key, dev_t dev,
312 enum blkio_policy_id plid);
313extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
314extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
315extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
316 void *key);
317void blkiocg_update_timeslice_used(struct blkio_group *blkg,
318 unsigned long time,
319 unsigned long unaccounted_time);
320void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
321 bool direction, bool sync);
322void blkiocg_update_completion_stats(struct blkio_group *blkg,
323 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
324void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
325 bool sync);
326void blkiocg_update_io_add_stats(struct blkio_group *blkg,
327 struct blkio_group *curr_blkg, bool direction, bool sync);
328void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
329 bool direction, bool sync);
330#else
331struct cgroup;
332static inline struct blkio_cgroup *
333cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
334static inline struct blkio_cgroup *
335task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
336
337static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
338 struct blkio_group *blkg, void *key, dev_t dev,
339 enum blkio_policy_id plid) {}
340
341static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
342
343static inline int
344blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
345
346static inline struct blkio_group *
347blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
348static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
349 unsigned long time,
350 unsigned long unaccounted_time)
351{}
352static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
353 uint64_t bytes, bool direction, bool sync) {}
354static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
355 uint64_t start_time, uint64_t io_start_time, bool direction,
356 bool sync) {}
357static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
358 bool direction, bool sync) {}
359static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
360 struct blkio_group *curr_blkg, bool direction, bool sync) {}
361static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
362 bool direction, bool sync) {}
363#endif
364#endif /* _BLK_CGROUP_H */