Loading...
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
18#include <linux/seq_file.h>
19#include <linux/radix-tree.h>
20#include <linux/blkdev.h>
21
22/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX
24
25/* CFQ specific, out here for blkcg->cfq_weight */
26#define CFQ_WEIGHT_MIN 10
27#define CFQ_WEIGHT_MAX 1000
28#define CFQ_WEIGHT_DEFAULT 500
29
30#ifdef CONFIG_BLK_CGROUP
31
32enum blkg_rwstat_type {
33 BLKG_RWSTAT_READ,
34 BLKG_RWSTAT_WRITE,
35 BLKG_RWSTAT_SYNC,
36 BLKG_RWSTAT_ASYNC,
37
38 BLKG_RWSTAT_NR,
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40};
41
42struct blkcg_gq;
43
44struct blkcg {
45 struct cgroup_subsys_state css;
46 spinlock_t lock;
47
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq *blkg_hint;
50 struct hlist_head blkg_list;
51
52 /* for policies to test whether associated blkcg has changed */
53 uint64_t id;
54
55 /* TODO: per-policy storage in blkcg */
56 unsigned int cfq_weight; /* belongs to cfq */
57 unsigned int cfq_leaf_weight;
58};
59
60struct blkg_stat {
61 struct u64_stats_sync syncp;
62 uint64_t cnt;
63};
64
65struct blkg_rwstat {
66 struct u64_stats_sync syncp;
67 uint64_t cnt[BLKG_RWSTAT_NR];
68};
69
70/*
71 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
72 * request_queue (q). This is used by blkcg policies which need to track
73 * information per blkcg - q pair.
74 *
75 * There can be multiple active blkcg policies and each has its private
76 * data on each blkg, the size of which is determined by
77 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
78 * together with blkg and invokes pd_init/exit_fn() methods.
79 *
80 * Such private data must embed struct blkg_policy_data (pd) at the
81 * beginning and pd_size can't be smaller than pd.
82 */
83struct blkg_policy_data {
84 /* the blkg and policy id this per-policy data belongs to */
85 struct blkcg_gq *blkg;
86 int plid;
87
88 /* used during policy activation */
89 struct list_head alloc_node;
90};
91
92/* association between a blk cgroup and a request queue */
93struct blkcg_gq {
94 /* Pointer to the associated request_queue */
95 struct request_queue *q;
96 struct list_head q_node;
97 struct hlist_node blkcg_node;
98 struct blkcg *blkcg;
99
100 /* all non-root blkcg_gq's are guaranteed to have access to parent */
101 struct blkcg_gq *parent;
102
103 /* request allocation list for this blkcg-q pair */
104 struct request_list rl;
105
106 /* reference count */
107 int refcnt;
108
109 /* is this blkg online? protected by both blkcg and q locks */
110 bool online;
111
112 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
113
114 struct rcu_head rcu_head;
115};
116
117typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
118typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
119typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
120typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
121typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
122
123struct blkcg_policy {
124 int plid;
125 /* policy specific private data size */
126 size_t pd_size;
127 /* cgroup files for the policy */
128 struct cftype *cftypes;
129
130 /* operations */
131 blkcg_pol_init_pd_fn *pd_init_fn;
132 blkcg_pol_online_pd_fn *pd_online_fn;
133 blkcg_pol_offline_pd_fn *pd_offline_fn;
134 blkcg_pol_exit_pd_fn *pd_exit_fn;
135 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
136};
137
138extern struct blkcg blkcg_root;
139
140struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
141struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
142 struct request_queue *q);
143int blkcg_init_queue(struct request_queue *q);
144void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q);
146
147/* Blkio controller policy registration */
148int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q,
151 const struct blkcg_policy *pol);
152void blkcg_deactivate_policy(struct request_queue *q,
153 const struct blkcg_policy *pol);
154
155void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
156 u64 (*prfill)(struct seq_file *,
157 struct blkg_policy_data *, int),
158 const struct blkcg_policy *pol, int data,
159 bool show_total);
160u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
161u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
162 const struct blkg_rwstat *rwstat);
163u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
164u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165 int off);
166
167u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
168struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
169 int off);
170
171struct blkg_conf_ctx {
172 struct gendisk *disk;
173 struct blkcg_gq *blkg;
174 u64 v;
175};
176
177int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
178 const char *input, struct blkg_conf_ctx *ctx);
179void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180
181
182static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
183{
184 return css ? container_of(css, struct blkcg, css) : NULL;
185}
186
187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
188{
189 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
190}
191
192static inline struct blkcg *bio_blkcg(struct bio *bio)
193{
194 if (bio && bio->bi_css)
195 return css_to_blkcg(bio->bi_css);
196 return task_blkcg(current);
197}
198
199/**
200 * blkcg_parent - get the parent of a blkcg
201 * @blkcg: blkcg of interest
202 *
203 * Return the parent blkcg of @blkcg. Can be called anytime.
204 */
205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
206{
207 return css_to_blkcg(css_parent(&blkcg->css));
208}
209
210/**
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
214 *
215 * Return pointer to private data associated with the @blkg-@pol pair.
216 */
217static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
218 struct blkcg_policy *pol)
219{
220 return blkg ? blkg->pd[pol->plid] : NULL;
221}
222
223/**
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pd: policy private data of interest
226 *
227 * @pd is policy private data. Determine the blkg it's associated with.
228 */
229static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
230{
231 return pd ? pd->blkg : NULL;
232}
233
234/**
235 * blkg_path - format cgroup path of blkg
236 * @blkg: blkg of interest
237 * @buf: target buffer
238 * @buflen: target buffer length
239 *
240 * Format the path of the cgroup of @blkg into @buf.
241 */
242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243{
244 char *p;
245
246 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247 if (!p) {
248 strncpy(buf, "<unavailable>", buflen);
249 return -ENAMETOOLONG;
250 }
251
252 memmove(buf, p, buf + buflen - p);
253 return 0;
254}
255
256/**
257 * blkg_get - get a blkg reference
258 * @blkg: blkg to get
259 *
260 * The caller should be holding queue_lock and an existing reference.
261 */
262static inline void blkg_get(struct blkcg_gq *blkg)
263{
264 lockdep_assert_held(blkg->q->queue_lock);
265 WARN_ON_ONCE(!blkg->refcnt);
266 blkg->refcnt++;
267}
268
269void __blkg_release_rcu(struct rcu_head *rcu);
270
271/**
272 * blkg_put - put a blkg reference
273 * @blkg: blkg to put
274 *
275 * The caller should be holding queue_lock.
276 */
277static inline void blkg_put(struct blkcg_gq *blkg)
278{
279 lockdep_assert_held(blkg->q->queue_lock);
280 WARN_ON_ONCE(blkg->refcnt <= 0);
281 if (!--blkg->refcnt)
282 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283}
284
285struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
286 bool update_hint);
287
288/**
289 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
290 * @d_blkg: loop cursor pointing to the current descendant
291 * @pos_css: used for iteration
292 * @p_blkg: target blkg to walk descendants of
293 *
294 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
295 * read locked. If called under either blkcg or queue lock, the iteration
296 * is guaranteed to include all and only online blkgs. The caller may
297 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
298 * @p_blkg is included in the iteration and the first node to be visited.
299 */
300#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
301 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
302 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
303 (p_blkg)->q, false)))
304
305/**
306 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
307 * @d_blkg: loop cursor pointing to the current descendant
308 * @pos_css: used for iteration
309 * @p_blkg: target blkg to walk descendants of
310 *
311 * Similar to blkg_for_each_descendant_pre() but performs post-order
312 * traversal instead. Synchronization rules are the same. @p_blkg is
313 * included in the iteration and the last node to be visited.
314 */
315#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
316 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
317 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
318 (p_blkg)->q, false)))
319
320/**
321 * blk_get_rl - get request_list to use
322 * @q: request_queue of interest
323 * @bio: bio which will be attached to the allocated request (may be %NULL)
324 *
325 * The caller wants to allocate a request from @q to use for @bio. Find
326 * the request_list to use and obtain a reference on it. Should be called
327 * under queue_lock. This function is guaranteed to return non-%NULL
328 * request_list.
329 */
330static inline struct request_list *blk_get_rl(struct request_queue *q,
331 struct bio *bio)
332{
333 struct blkcg *blkcg;
334 struct blkcg_gq *blkg;
335
336 rcu_read_lock();
337
338 blkcg = bio_blkcg(bio);
339
340 /* bypass blkg lookup and use @q->root_rl directly for root */
341 if (blkcg == &blkcg_root)
342 goto root_rl;
343
344 /*
345 * Try to use blkg->rl. blkg lookup may fail under memory pressure
346 * or if either the blkcg or queue is going away. Fall back to
347 * root_rl in such cases.
348 */
349 blkg = blkg_lookup_create(blkcg, q);
350 if (unlikely(IS_ERR(blkg)))
351 goto root_rl;
352
353 blkg_get(blkg);
354 rcu_read_unlock();
355 return &blkg->rl;
356root_rl:
357 rcu_read_unlock();
358 return &q->root_rl;
359}
360
361/**
362 * blk_put_rl - put request_list
363 * @rl: request_list to put
364 *
365 * Put the reference acquired by blk_get_rl(). Should be called under
366 * queue_lock.
367 */
368static inline void blk_put_rl(struct request_list *rl)
369{
370 /* root_rl may not have blkg set */
371 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
372 blkg_put(rl->blkg);
373}
374
375/**
376 * blk_rq_set_rl - associate a request with a request_list
377 * @rq: request of interest
378 * @rl: target request_list
379 *
380 * Associate @rq with @rl so that accounting and freeing can know the
381 * request_list @rq came from.
382 */
383static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
384{
385 rq->rl = rl;
386}
387
388/**
389 * blk_rq_rl - return the request_list a request came from
390 * @rq: request of interest
391 *
392 * Return the request_list @rq is allocated from.
393 */
394static inline struct request_list *blk_rq_rl(struct request *rq)
395{
396 return rq->rl;
397}
398
399struct request_list *__blk_queue_next_rl(struct request_list *rl,
400 struct request_queue *q);
401/**
402 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
403 *
404 * Should be used under queue_lock.
405 */
406#define blk_queue_for_each_rl(rl, q) \
407 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
408
409static inline void blkg_stat_init(struct blkg_stat *stat)
410{
411 u64_stats_init(&stat->syncp);
412}
413
414/**
415 * blkg_stat_add - add a value to a blkg_stat
416 * @stat: target blkg_stat
417 * @val: value to add
418 *
419 * Add @val to @stat. The caller is responsible for synchronizing calls to
420 * this function.
421 */
422static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
423{
424 u64_stats_update_begin(&stat->syncp);
425 stat->cnt += val;
426 u64_stats_update_end(&stat->syncp);
427}
428
429/**
430 * blkg_stat_read - read the current value of a blkg_stat
431 * @stat: blkg_stat to read
432 *
433 * Read the current value of @stat. This function can be called without
434 * synchroniztion and takes care of u64 atomicity.
435 */
436static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
437{
438 unsigned int start;
439 uint64_t v;
440
441 do {
442 start = u64_stats_fetch_begin_irq(&stat->syncp);
443 v = stat->cnt;
444 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
445
446 return v;
447}
448
449/**
450 * blkg_stat_reset - reset a blkg_stat
451 * @stat: blkg_stat to reset
452 */
453static inline void blkg_stat_reset(struct blkg_stat *stat)
454{
455 stat->cnt = 0;
456}
457
458/**
459 * blkg_stat_merge - merge a blkg_stat into another
460 * @to: the destination blkg_stat
461 * @from: the source
462 *
463 * Add @from's count to @to.
464 */
465static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
466{
467 blkg_stat_add(to, blkg_stat_read(from));
468}
469
470static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
471{
472 u64_stats_init(&rwstat->syncp);
473}
474
475/**
476 * blkg_rwstat_add - add a value to a blkg_rwstat
477 * @rwstat: target blkg_rwstat
478 * @rw: mask of REQ_{WRITE|SYNC}
479 * @val: value to add
480 *
481 * Add @val to @rwstat. The counters are chosen according to @rw. The
482 * caller is responsible for synchronizing calls to this function.
483 */
484static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
485 int rw, uint64_t val)
486{
487 u64_stats_update_begin(&rwstat->syncp);
488
489 if (rw & REQ_WRITE)
490 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
491 else
492 rwstat->cnt[BLKG_RWSTAT_READ] += val;
493 if (rw & REQ_SYNC)
494 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
495 else
496 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
497
498 u64_stats_update_end(&rwstat->syncp);
499}
500
501/**
502 * blkg_rwstat_read - read the current values of a blkg_rwstat
503 * @rwstat: blkg_rwstat to read
504 *
505 * Read the current snapshot of @rwstat and return it as the return value.
506 * This function can be called without synchronization and takes care of
507 * u64 atomicity.
508 */
509static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
510{
511 unsigned int start;
512 struct blkg_rwstat tmp;
513
514 do {
515 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
516 tmp = *rwstat;
517 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
518
519 return tmp;
520}
521
522/**
523 * blkg_rwstat_total - read the total count of a blkg_rwstat
524 * @rwstat: blkg_rwstat to read
525 *
526 * Return the total count of @rwstat regardless of the IO direction. This
527 * function can be called without synchronization and takes care of u64
528 * atomicity.
529 */
530static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
531{
532 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
533
534 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
535}
536
537/**
538 * blkg_rwstat_reset - reset a blkg_rwstat
539 * @rwstat: blkg_rwstat to reset
540 */
541static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
542{
543 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
544}
545
546/**
547 * blkg_rwstat_merge - merge a blkg_rwstat into another
548 * @to: the destination blkg_rwstat
549 * @from: the source
550 *
551 * Add @from's counts to @to.
552 */
553static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
554 struct blkg_rwstat *from)
555{
556 struct blkg_rwstat v = blkg_rwstat_read(from);
557 int i;
558
559 u64_stats_update_begin(&to->syncp);
560 for (i = 0; i < BLKG_RWSTAT_NR; i++)
561 to->cnt[i] += v.cnt[i];
562 u64_stats_update_end(&to->syncp);
563}
564
565#else /* CONFIG_BLK_CGROUP */
566
567struct cgroup;
568struct blkcg;
569
570struct blkg_policy_data {
571};
572
573struct blkcg_gq {
574};
575
576struct blkcg_policy {
577};
578
579static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
580static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
581static inline void blkcg_drain_queue(struct request_queue *q) { }
582static inline void blkcg_exit_queue(struct request_queue *q) { }
583static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
584static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
585static inline int blkcg_activate_policy(struct request_queue *q,
586 const struct blkcg_policy *pol) { return 0; }
587static inline void blkcg_deactivate_policy(struct request_queue *q,
588 const struct blkcg_policy *pol) { }
589
590static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
591
592static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
593 struct blkcg_policy *pol) { return NULL; }
594static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
595static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
596static inline void blkg_get(struct blkcg_gq *blkg) { }
597static inline void blkg_put(struct blkcg_gq *blkg) { }
598
599static inline struct request_list *blk_get_rl(struct request_queue *q,
600 struct bio *bio) { return &q->root_rl; }
601static inline void blk_put_rl(struct request_list *rl) { }
602static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
603static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
604
605#define blk_queue_for_each_rl(rl, q) \
606 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
607
608#endif /* CONFIG_BLK_CGROUP */
609#endif /* _BLK_CGROUP_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_PRIVATE_H
3#define _BLK_CGROUP_PRIVATE_H
4/*
5 * block cgroup private header
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/blk-cgroup.h>
18#include <linux/cgroup.h>
19#include <linux/kthread.h>
20#include <linux/blk-mq.h>
21#include <linux/llist.h>
22
23struct blkcg_gq;
24struct blkg_policy_data;
25
26
27/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
28#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
29
30#ifdef CONFIG_BLK_CGROUP
31
32enum blkg_iostat_type {
33 BLKG_IOSTAT_READ,
34 BLKG_IOSTAT_WRITE,
35 BLKG_IOSTAT_DISCARD,
36
37 BLKG_IOSTAT_NR,
38};
39
40struct blkg_iostat {
41 u64 bytes[BLKG_IOSTAT_NR];
42 u64 ios[BLKG_IOSTAT_NR];
43};
44
45struct blkg_iostat_set {
46 struct u64_stats_sync sync;
47 struct blkcg_gq *blkg;
48 struct llist_node lnode;
49 int lqueued; /* queued in llist */
50 struct blkg_iostat cur;
51 struct blkg_iostat last;
52};
53
54/* association between a blk cgroup and a request queue */
55struct blkcg_gq {
56 /* Pointer to the associated request_queue */
57 struct request_queue *q;
58 struct list_head q_node;
59 struct hlist_node blkcg_node;
60 struct blkcg *blkcg;
61
62 /* all non-root blkcg_gq's are guaranteed to have access to parent */
63 struct blkcg_gq *parent;
64
65 /* reference count */
66 struct percpu_ref refcnt;
67
68 /* is this blkg online? protected by both blkcg and q locks */
69 bool online;
70
71 struct blkg_iostat_set __percpu *iostat_cpu;
72 struct blkg_iostat_set iostat;
73
74 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
75#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
76 spinlock_t async_bio_lock;
77 struct bio_list async_bios;
78#endif
79 union {
80 struct work_struct async_bio_work;
81 struct work_struct free_work;
82 };
83
84 atomic_t use_delay;
85 atomic64_t delay_nsec;
86 atomic64_t delay_start;
87 u64 last_delay;
88 int last_use;
89
90 struct rcu_head rcu_head;
91};
92
93struct blkcg {
94 struct cgroup_subsys_state css;
95 spinlock_t lock;
96 refcount_t online_pin;
97
98 struct radix_tree_root blkg_tree;
99 struct blkcg_gq __rcu *blkg_hint;
100 struct hlist_head blkg_list;
101
102 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
103
104 struct list_head all_blkcgs_node;
105
106 /*
107 * List of updated percpu blkg_iostat_set's since the last flush.
108 */
109 struct llist_head __percpu *lhead;
110
111#ifdef CONFIG_BLK_CGROUP_FC_APPID
112 char fc_app_id[FC_APPID_LEN];
113#endif
114#ifdef CONFIG_CGROUP_WRITEBACK
115 struct list_head cgwb_list;
116#endif
117};
118
119static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
120{
121 return css ? container_of(css, struct blkcg, css) : NULL;
122}
123
124/*
125 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
126 * request_queue (q). This is used by blkcg policies which need to track
127 * information per blkcg - q pair.
128 *
129 * There can be multiple active blkcg policies and each blkg:policy pair is
130 * represented by a blkg_policy_data which is allocated and freed by each
131 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
132 * area by allocating larger data structure which embeds blkg_policy_data
133 * at the beginning.
134 */
135struct blkg_policy_data {
136 /* the blkg and policy id this per-policy data belongs to */
137 struct blkcg_gq *blkg;
138 int plid;
139 bool online;
140};
141
142/*
143 * Policies that need to keep per-blkcg data which is independent from any
144 * request_queue associated to it should implement cpd_alloc/free_fn()
145 * methods. A policy can allocate private data area by allocating larger
146 * data structure which embeds blkcg_policy_data at the beginning.
147 * cpd_init() is invoked to let each policy handle per-blkcg data.
148 */
149struct blkcg_policy_data {
150 /* the blkcg and policy id this per-policy data belongs to */
151 struct blkcg *blkcg;
152 int plid;
153};
154
155typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
156typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
157typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
158typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
159typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
160 struct blkcg *blkcg, gfp_t gfp);
161typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
162typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
163typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
164typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
165typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
166typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
167 struct seq_file *s);
168
169struct blkcg_policy {
170 int plid;
171 /* cgroup files for the policy */
172 struct cftype *dfl_cftypes;
173 struct cftype *legacy_cftypes;
174
175 /* operations */
176 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
177 blkcg_pol_free_cpd_fn *cpd_free_fn;
178
179 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
180 blkcg_pol_init_pd_fn *pd_init_fn;
181 blkcg_pol_online_pd_fn *pd_online_fn;
182 blkcg_pol_offline_pd_fn *pd_offline_fn;
183 blkcg_pol_free_pd_fn *pd_free_fn;
184 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
185 blkcg_pol_stat_pd_fn *pd_stat_fn;
186};
187
188extern struct blkcg blkcg_root;
189extern bool blkcg_debug_stats;
190
191int blkcg_init_disk(struct gendisk *disk);
192void blkcg_exit_disk(struct gendisk *disk);
193
194/* Blkio controller policy registration */
195int blkcg_policy_register(struct blkcg_policy *pol);
196void blkcg_policy_unregister(struct blkcg_policy *pol);
197int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
198void blkcg_deactivate_policy(struct gendisk *disk,
199 const struct blkcg_policy *pol);
200
201const char *blkg_dev_name(struct blkcg_gq *blkg);
202void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
203 u64 (*prfill)(struct seq_file *,
204 struct blkg_policy_data *, int),
205 const struct blkcg_policy *pol, int data,
206 bool show_total);
207u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
208
209struct blkg_conf_ctx {
210 char *input;
211 char *body;
212 struct block_device *bdev;
213 struct blkcg_gq *blkg;
214};
215
216void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
217int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
218int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
219 struct blkg_conf_ctx *ctx);
220void blkg_conf_exit(struct blkg_conf_ctx *ctx);
221
222/**
223 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
224 * @return: true if this bio needs to be submitted with the root blkg context.
225 *
226 * In order to avoid priority inversions we sometimes need to issue a bio as if
227 * it were attached to the root blkg, and then backcharge to the actual owning
228 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for
229 * the bio and attach the appropriate blkg to the bio. Then we call this helper
230 * and if it is true run with the root blkg for that queue and then do any
231 * backcharging to the originating cgroup once the io is complete.
232 */
233static inline bool bio_issue_as_root_blkg(struct bio *bio)
234{
235 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
236}
237
238/**
239 * blkg_lookup - lookup blkg for the specified blkcg - q pair
240 * @blkcg: blkcg of interest
241 * @q: request_queue of interest
242 *
243 * Lookup blkg for the @blkcg - @q pair.
244
245 * Must be called in a RCU critical section.
246 */
247static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
248 struct request_queue *q)
249{
250 struct blkcg_gq *blkg;
251
252 if (blkcg == &blkcg_root)
253 return q->root_blkg;
254
255 blkg = rcu_dereference_check(blkcg->blkg_hint,
256 lockdep_is_held(&q->queue_lock));
257 if (blkg && blkg->q == q)
258 return blkg;
259
260 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
261 if (blkg && blkg->q != q)
262 blkg = NULL;
263 return blkg;
264}
265
266/**
267 * blkg_to_pdata - get policy private data
268 * @blkg: blkg of interest
269 * @pol: policy of interest
270 *
271 * Return pointer to private data associated with the @blkg-@pol pair.
272 */
273static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
274 struct blkcg_policy *pol)
275{
276 return blkg ? blkg->pd[pol->plid] : NULL;
277}
278
279static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
280 struct blkcg_policy *pol)
281{
282 return blkcg ? blkcg->cpd[pol->plid] : NULL;
283}
284
285/**
286 * pdata_to_blkg - get blkg associated with policy private data
287 * @pd: policy private data of interest
288 *
289 * @pd is policy private data. Determine the blkg it's associated with.
290 */
291static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
292{
293 return pd ? pd->blkg : NULL;
294}
295
296static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
297{
298 return cpd ? cpd->blkcg : NULL;
299}
300
301/**
302 * blkg_path - format cgroup path of blkg
303 * @blkg: blkg of interest
304 * @buf: target buffer
305 * @buflen: target buffer length
306 *
307 * Format the path of the cgroup of @blkg into @buf.
308 */
309static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
310{
311 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
312}
313
314/**
315 * blkg_get - get a blkg reference
316 * @blkg: blkg to get
317 *
318 * The caller should be holding an existing reference.
319 */
320static inline void blkg_get(struct blkcg_gq *blkg)
321{
322 percpu_ref_get(&blkg->refcnt);
323}
324
325/**
326 * blkg_tryget - try and get a blkg reference
327 * @blkg: blkg to get
328 *
329 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
330 * of freeing this blkg, so we can only use it if the refcnt is not zero.
331 */
332static inline bool blkg_tryget(struct blkcg_gq *blkg)
333{
334 return blkg && percpu_ref_tryget(&blkg->refcnt);
335}
336
337/**
338 * blkg_put - put a blkg reference
339 * @blkg: blkg to put
340 */
341static inline void blkg_put(struct blkcg_gq *blkg)
342{
343 percpu_ref_put(&blkg->refcnt);
344}
345
346/**
347 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
348 * @d_blkg: loop cursor pointing to the current descendant
349 * @pos_css: used for iteration
350 * @p_blkg: target blkg to walk descendants of
351 *
352 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
353 * read locked. If called under either blkcg or queue lock, the iteration
354 * is guaranteed to include all and only online blkgs. The caller may
355 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
356 * @p_blkg is included in the iteration and the first node to be visited.
357 */
358#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
359 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
360 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
361 (p_blkg)->q)))
362
363/**
364 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
365 * @d_blkg: loop cursor pointing to the current descendant
366 * @pos_css: used for iteration
367 * @p_blkg: target blkg to walk descendants of
368 *
369 * Similar to blkg_for_each_descendant_pre() but performs post-order
370 * traversal instead. Synchronization rules are the same. @p_blkg is
371 * included in the iteration and the last node to be visited.
372 */
373#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
374 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
375 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
376 (p_blkg)->q)))
377
378static inline void blkcg_bio_issue_init(struct bio *bio)
379{
380 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
381}
382
383static inline void blkcg_use_delay(struct blkcg_gq *blkg)
384{
385 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
386 return;
387 if (atomic_add_return(1, &blkg->use_delay) == 1)
388 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
389}
390
391static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
392{
393 int old = atomic_read(&blkg->use_delay);
394
395 if (WARN_ON_ONCE(old < 0))
396 return 0;
397 if (old == 0)
398 return 0;
399
400 /*
401 * We do this song and dance because we can race with somebody else
402 * adding or removing delay. If we just did an atomic_dec we'd end up
403 * negative and we'd already be in trouble. We need to subtract 1 and
404 * then check to see if we were the last delay so we can drop the
405 * congestion count on the cgroup.
406 */
407 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
408 ;
409
410 if (old == 0)
411 return 0;
412 if (old == 1)
413 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
414 return 1;
415}
416
417/**
418 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
419 * @blkg: target blkg
420 * @delay: delay duration in nsecs
421 *
422 * When enabled with this function, the delay is not decayed and must be
423 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
424 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
425 */
426static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
427{
428 int old = atomic_read(&blkg->use_delay);
429
430 /* We only want 1 person setting the congestion count for this blkg. */
431 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
432 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
433
434 atomic64_set(&blkg->delay_nsec, delay);
435}
436
437/**
438 * blkcg_clear_delay - Disable allocator delay mechanism
439 * @blkg: target blkg
440 *
441 * Disable use_delay mechanism. See blkcg_set_delay().
442 */
443static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
444{
445 int old = atomic_read(&blkg->use_delay);
446
447 /* We only want 1 person clearing the congestion count for this blkg. */
448 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
449 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
450}
451
452/**
453 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
454 * @rq: request to merge into
455 * @bio: bio to merge
456 *
457 * @bio and @rq should belong to the same cgroup and their issue_as_root should
458 * match. The latter is necessary as we don't want to throttle e.g. a metadata
459 * update because it happens to be next to a regular IO.
460 */
461static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
462{
463 return rq->bio->bi_blkg == bio->bi_blkg &&
464 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
465}
466
467void blk_cgroup_bio_start(struct bio *bio);
468void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
469#else /* CONFIG_BLK_CGROUP */
470
471struct blkg_policy_data {
472};
473
474struct blkcg_policy_data {
475};
476
477struct blkcg_policy {
478};
479
480struct blkcg {
481};
482
483static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
484static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
485static inline void blkcg_exit_disk(struct gendisk *disk) { }
486static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
487static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
488static inline int blkcg_activate_policy(struct gendisk *disk,
489 const struct blkcg_policy *pol) { return 0; }
490static inline void blkcg_deactivate_policy(struct gendisk *disk,
491 const struct blkcg_policy *pol) { }
492
493static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
494 struct blkcg_policy *pol) { return NULL; }
495static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
496static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
497static inline void blkg_get(struct blkcg_gq *blkg) { }
498static inline void blkg_put(struct blkcg_gq *blkg) { }
499static inline void blkcg_bio_issue_init(struct bio *bio) { }
500static inline void blk_cgroup_bio_start(struct bio *bio) { }
501static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
502
503#define blk_queue_for_each_rl(rl, q) \
504 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
505
506#endif /* CONFIG_BLK_CGROUP */
507
508#endif /* _BLK_CGROUP_PRIVATE_H */