Linux Audio

Check our new training course

Loading...
v3.5.6
  1#ifndef _BLK_CGROUP_H
  2#define _BLK_CGROUP_H
  3/*
  4 * Common Block IO controller cgroup interface
  5 *
  6 * Based on ideas and code from CFQ, CFS and BFQ:
  7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8 *
  9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 10 *		      Paolo Valente <paolo.valente@unimore.it>
 11 *
 12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 13 * 	              Nauman Rafique <nauman@google.com>
 14 */
 15
 16#include <linux/cgroup.h>
 17#include <linux/u64_stats_sync.h>
 18#include <linux/seq_file.h>
 19#include <linux/radix-tree.h>
 
 20
 21/* Max limits for throttle policy */
 22#define THROTL_IOPS_MAX		UINT_MAX
 23
 24/* CFQ specific, out here for blkcg->cfq_weight */
 25#define CFQ_WEIGHT_MIN		10
 26#define CFQ_WEIGHT_MAX		1000
 27#define CFQ_WEIGHT_DEFAULT	500
 28
 29#ifdef CONFIG_BLK_CGROUP
 30
 31enum blkg_rwstat_type {
 32	BLKG_RWSTAT_READ,
 33	BLKG_RWSTAT_WRITE,
 34	BLKG_RWSTAT_SYNC,
 35	BLKG_RWSTAT_ASYNC,
 36
 37	BLKG_RWSTAT_NR,
 38	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
 39};
 40
 41struct blkcg_gq;
 42
 43struct blkcg {
 44	struct cgroup_subsys_state	css;
 45	spinlock_t			lock;
 46
 47	struct radix_tree_root		blkg_tree;
 48	struct blkcg_gq			*blkg_hint;
 49	struct hlist_head		blkg_list;
 50
 51	/* for policies to test whether associated blkcg has changed */
 52	uint64_t			id;
 53
 54	/* TODO: per-policy storage in blkcg */
 55	unsigned int			cfq_weight;	/* belongs to cfq */
 
 56};
 57
 58struct blkg_stat {
 59	struct u64_stats_sync		syncp;
 60	uint64_t			cnt;
 61};
 62
 63struct blkg_rwstat {
 64	struct u64_stats_sync		syncp;
 65	uint64_t			cnt[BLKG_RWSTAT_NR];
 66};
 67
 68/*
 69 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 70 * request_queue (q).  This is used by blkcg policies which need to track
 71 * information per blkcg - q pair.
 72 *
 73 * There can be multiple active blkcg policies and each has its private
 74 * data on each blkg, the size of which is determined by
 75 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
 76 * together with blkg and invokes pd_init/exit_fn() methods.
 77 *
 78 * Such private data must embed struct blkg_policy_data (pd) at the
 79 * beginning and pd_size can't be smaller than pd.
 80 */
 81struct blkg_policy_data {
 82	/* the blkg this per-policy data belongs to */
 83	struct blkcg_gq			*blkg;
 
 84
 85	/* used during policy activation */
 86	struct list_head		alloc_node;
 87};
 88
 89/* association between a blk cgroup and a request queue */
 90struct blkcg_gq {
 91	/* Pointer to the associated request_queue */
 92	struct request_queue		*q;
 93	struct list_head		q_node;
 94	struct hlist_node		blkcg_node;
 95	struct blkcg			*blkcg;
 
 
 
 
 
 
 
 96	/* reference count */
 97	int				refcnt;
 98
 
 
 
 99	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
100
101	struct rcu_head			rcu_head;
102};
103
104typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
 
 
105typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
106typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
107
108struct blkcg_policy {
109	int				plid;
110	/* policy specific private data size */
111	size_t				pd_size;
112	/* cgroup files for the policy */
113	struct cftype			*cftypes;
114
115	/* operations */
116	blkcg_pol_init_pd_fn		*pd_init_fn;
 
 
117	blkcg_pol_exit_pd_fn		*pd_exit_fn;
118	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
119};
120
121extern struct blkcg blkcg_root;
122
123struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
124struct blkcg *bio_blkcg(struct bio *bio);
125struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
126struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
127				    struct request_queue *q);
128int blkcg_init_queue(struct request_queue *q);
129void blkcg_drain_queue(struct request_queue *q);
130void blkcg_exit_queue(struct request_queue *q);
131
132/* Blkio controller policy registration */
133int blkcg_policy_register(struct blkcg_policy *pol);
134void blkcg_policy_unregister(struct blkcg_policy *pol);
135int blkcg_activate_policy(struct request_queue *q,
136			  const struct blkcg_policy *pol);
137void blkcg_deactivate_policy(struct request_queue *q,
138			     const struct blkcg_policy *pol);
139
140void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
141		       u64 (*prfill)(struct seq_file *,
142				     struct blkg_policy_data *, int),
143		       const struct blkcg_policy *pol, int data,
144		       bool show_total);
145u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
146u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
147			 const struct blkg_rwstat *rwstat);
148u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
149u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
150		       int off);
151
 
 
 
 
152struct blkg_conf_ctx {
153	struct gendisk			*disk;
154	struct blkcg_gq			*blkg;
155	u64				v;
156};
157
158int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
159		   const char *input, struct blkg_conf_ctx *ctx);
160void blkg_conf_finish(struct blkg_conf_ctx *ctx);
161
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163/**
164 * blkg_to_pdata - get policy private data
165 * @blkg: blkg of interest
166 * @pol: policy of interest
167 *
168 * Return pointer to private data associated with the @blkg-@pol pair.
169 */
170static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
171						  struct blkcg_policy *pol)
172{
173	return blkg ? blkg->pd[pol->plid] : NULL;
174}
175
176/**
177 * pdata_to_blkg - get blkg associated with policy private data
178 * @pd: policy private data of interest
179 *
180 * @pd is policy private data.  Determine the blkg it's associated with.
181 */
182static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
183{
184	return pd ? pd->blkg : NULL;
185}
186
187/**
188 * blkg_path - format cgroup path of blkg
189 * @blkg: blkg of interest
190 * @buf: target buffer
191 * @buflen: target buffer length
192 *
193 * Format the path of the cgroup of @blkg into @buf.
194 */
195static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
196{
197	int ret;
198
199	rcu_read_lock();
200	ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
201	rcu_read_unlock();
202	if (ret)
203		strncpy(buf, "<unavailable>", buflen);
204	return ret;
 
 
 
 
205}
206
207/**
208 * blkg_get - get a blkg reference
209 * @blkg: blkg to get
210 *
211 * The caller should be holding queue_lock and an existing reference.
212 */
213static inline void blkg_get(struct blkcg_gq *blkg)
214{
215	lockdep_assert_held(blkg->q->queue_lock);
216	WARN_ON_ONCE(!blkg->refcnt);
217	blkg->refcnt++;
218}
219
220void __blkg_release(struct blkcg_gq *blkg);
221
222/**
223 * blkg_put - put a blkg reference
224 * @blkg: blkg to put
225 *
226 * The caller should be holding queue_lock.
227 */
228static inline void blkg_put(struct blkcg_gq *blkg)
229{
230	lockdep_assert_held(blkg->q->queue_lock);
231	WARN_ON_ONCE(blkg->refcnt <= 0);
232	if (!--blkg->refcnt)
233		__blkg_release(blkg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234}
235
236/**
237 * blkg_stat_add - add a value to a blkg_stat
238 * @stat: target blkg_stat
239 * @val: value to add
240 *
241 * Add @val to @stat.  The caller is responsible for synchronizing calls to
242 * this function.
243 */
244static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
245{
246	u64_stats_update_begin(&stat->syncp);
247	stat->cnt += val;
248	u64_stats_update_end(&stat->syncp);
249}
250
251/**
252 * blkg_stat_read - read the current value of a blkg_stat
253 * @stat: blkg_stat to read
254 *
255 * Read the current value of @stat.  This function can be called without
256 * synchroniztion and takes care of u64 atomicity.
257 */
258static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
259{
260	unsigned int start;
261	uint64_t v;
262
263	do {
264		start = u64_stats_fetch_begin(&stat->syncp);
265		v = stat->cnt;
266	} while (u64_stats_fetch_retry(&stat->syncp, start));
267
268	return v;
269}
270
271/**
272 * blkg_stat_reset - reset a blkg_stat
273 * @stat: blkg_stat to reset
274 */
275static inline void blkg_stat_reset(struct blkg_stat *stat)
276{
277	stat->cnt = 0;
278}
279
280/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281 * blkg_rwstat_add - add a value to a blkg_rwstat
282 * @rwstat: target blkg_rwstat
283 * @rw: mask of REQ_{WRITE|SYNC}
284 * @val: value to add
285 *
286 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
287 * caller is responsible for synchronizing calls to this function.
288 */
289static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
290				   int rw, uint64_t val)
291{
292	u64_stats_update_begin(&rwstat->syncp);
293
294	if (rw & REQ_WRITE)
295		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
296	else
297		rwstat->cnt[BLKG_RWSTAT_READ] += val;
298	if (rw & REQ_SYNC)
299		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
300	else
301		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
302
303	u64_stats_update_end(&rwstat->syncp);
304}
305
306/**
307 * blkg_rwstat_read - read the current values of a blkg_rwstat
308 * @rwstat: blkg_rwstat to read
309 *
310 * Read the current snapshot of @rwstat and return it as the return value.
311 * This function can be called without synchronization and takes care of
312 * u64 atomicity.
313 */
314static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
315{
316	unsigned int start;
317	struct blkg_rwstat tmp;
318
319	do {
320		start = u64_stats_fetch_begin(&rwstat->syncp);
321		tmp = *rwstat;
322	} while (u64_stats_fetch_retry(&rwstat->syncp, start));
323
324	return tmp;
325}
326
327/**
328 * blkg_rwstat_sum - read the total count of a blkg_rwstat
329 * @rwstat: blkg_rwstat to read
330 *
331 * Return the total count of @rwstat regardless of the IO direction.  This
332 * function can be called without synchronization and takes care of u64
333 * atomicity.
334 */
335static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
336{
337	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
338
339	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
340}
341
342/**
343 * blkg_rwstat_reset - reset a blkg_rwstat
344 * @rwstat: blkg_rwstat to reset
345 */
346static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
347{
348	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
349}
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351#else	/* CONFIG_BLK_CGROUP */
352
353struct cgroup;
 
354
355struct blkg_policy_data {
356};
357
358struct blkcg_gq {
359};
360
361struct blkcg_policy {
362};
363
364static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
365static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
366static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
367static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
368static inline void blkcg_drain_queue(struct request_queue *q) { }
369static inline void blkcg_exit_queue(struct request_queue *q) { }
370static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
371static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
372static inline int blkcg_activate_policy(struct request_queue *q,
373					const struct blkcg_policy *pol) { return 0; }
374static inline void blkcg_deactivate_policy(struct request_queue *q,
375					   const struct blkcg_policy *pol) { }
376
 
 
377static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
378						  struct blkcg_policy *pol) { return NULL; }
379static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
380static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
381static inline void blkg_get(struct blkcg_gq *blkg) { }
382static inline void blkg_put(struct blkcg_gq *blkg) { }
 
 
 
 
 
 
 
 
 
383
384#endif	/* CONFIG_BLK_CGROUP */
385#endif	/* _BLK_CGROUP_H */
v3.15
  1#ifndef _BLK_CGROUP_H
  2#define _BLK_CGROUP_H
  3/*
  4 * Common Block IO controller cgroup interface
  5 *
  6 * Based on ideas and code from CFQ, CFS and BFQ:
  7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8 *
  9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 10 *		      Paolo Valente <paolo.valente@unimore.it>
 11 *
 12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 13 * 	              Nauman Rafique <nauman@google.com>
 14 */
 15
 16#include <linux/cgroup.h>
 17#include <linux/u64_stats_sync.h>
 18#include <linux/seq_file.h>
 19#include <linux/radix-tree.h>
 20#include <linux/blkdev.h>
 21
 22/* Max limits for throttle policy */
 23#define THROTL_IOPS_MAX		UINT_MAX
 24
 25/* CFQ specific, out here for blkcg->cfq_weight */
 26#define CFQ_WEIGHT_MIN		10
 27#define CFQ_WEIGHT_MAX		1000
 28#define CFQ_WEIGHT_DEFAULT	500
 29
 30#ifdef CONFIG_BLK_CGROUP
 31
 32enum blkg_rwstat_type {
 33	BLKG_RWSTAT_READ,
 34	BLKG_RWSTAT_WRITE,
 35	BLKG_RWSTAT_SYNC,
 36	BLKG_RWSTAT_ASYNC,
 37
 38	BLKG_RWSTAT_NR,
 39	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
 40};
 41
 42struct blkcg_gq;
 43
 44struct blkcg {
 45	struct cgroup_subsys_state	css;
 46	spinlock_t			lock;
 47
 48	struct radix_tree_root		blkg_tree;
 49	struct blkcg_gq			*blkg_hint;
 50	struct hlist_head		blkg_list;
 51
 52	/* for policies to test whether associated blkcg has changed */
 53	uint64_t			id;
 54
 55	/* TODO: per-policy storage in blkcg */
 56	unsigned int			cfq_weight;	/* belongs to cfq */
 57	unsigned int			cfq_leaf_weight;
 58};
 59
 60struct blkg_stat {
 61	struct u64_stats_sync		syncp;
 62	uint64_t			cnt;
 63};
 64
 65struct blkg_rwstat {
 66	struct u64_stats_sync		syncp;
 67	uint64_t			cnt[BLKG_RWSTAT_NR];
 68};
 69
 70/*
 71 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 72 * request_queue (q).  This is used by blkcg policies which need to track
 73 * information per blkcg - q pair.
 74 *
 75 * There can be multiple active blkcg policies and each has its private
 76 * data on each blkg, the size of which is determined by
 77 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
 78 * together with blkg and invokes pd_init/exit_fn() methods.
 79 *
 80 * Such private data must embed struct blkg_policy_data (pd) at the
 81 * beginning and pd_size can't be smaller than pd.
 82 */
 83struct blkg_policy_data {
 84	/* the blkg and policy id this per-policy data belongs to */
 85	struct blkcg_gq			*blkg;
 86	int				plid;
 87
 88	/* used during policy activation */
 89	struct list_head		alloc_node;
 90};
 91
 92/* association between a blk cgroup and a request queue */
 93struct blkcg_gq {
 94	/* Pointer to the associated request_queue */
 95	struct request_queue		*q;
 96	struct list_head		q_node;
 97	struct hlist_node		blkcg_node;
 98	struct blkcg			*blkcg;
 99
100	/* all non-root blkcg_gq's are guaranteed to have access to parent */
101	struct blkcg_gq			*parent;
102
103	/* request allocation list for this blkcg-q pair */
104	struct request_list		rl;
105
106	/* reference count */
107	int				refcnt;
108
109	/* is this blkg online? protected by both blkcg and q locks */
110	bool				online;
111
112	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
113
114	struct rcu_head			rcu_head;
115};
116
117typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
118typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
119typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
120typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
121typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
122
123struct blkcg_policy {
124	int				plid;
125	/* policy specific private data size */
126	size_t				pd_size;
127	/* cgroup files for the policy */
128	struct cftype			*cftypes;
129
130	/* operations */
131	blkcg_pol_init_pd_fn		*pd_init_fn;
132	blkcg_pol_online_pd_fn		*pd_online_fn;
133	blkcg_pol_offline_pd_fn		*pd_offline_fn;
134	blkcg_pol_exit_pd_fn		*pd_exit_fn;
135	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
136};
137
138extern struct blkcg blkcg_root;
139
 
 
140struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
141struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
142				    struct request_queue *q);
143int blkcg_init_queue(struct request_queue *q);
144void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q);
146
147/* Blkio controller policy registration */
148int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q,
151			  const struct blkcg_policy *pol);
152void blkcg_deactivate_policy(struct request_queue *q,
153			     const struct blkcg_policy *pol);
154
155void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
156		       u64 (*prfill)(struct seq_file *,
157				     struct blkg_policy_data *, int),
158		       const struct blkcg_policy *pol, int data,
159		       bool show_total);
160u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
161u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
162			 const struct blkg_rwstat *rwstat);
163u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
164u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165		       int off);
166
167u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
168struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
169					     int off);
170
171struct blkg_conf_ctx {
172	struct gendisk			*disk;
173	struct blkcg_gq			*blkg;
174	u64				v;
175};
176
177int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
178		   const char *input, struct blkg_conf_ctx *ctx);
179void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180
181
182static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
183{
184	return css ? container_of(css, struct blkcg, css) : NULL;
185}
186
187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
188{
189	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
190}
191
192static inline struct blkcg *bio_blkcg(struct bio *bio)
193{
194	if (bio && bio->bi_css)
195		return css_to_blkcg(bio->bi_css);
196	return task_blkcg(current);
197}
198
199/**
200 * blkcg_parent - get the parent of a blkcg
201 * @blkcg: blkcg of interest
202 *
203 * Return the parent blkcg of @blkcg.  Can be called anytime.
204 */
205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
206{
207	return css_to_blkcg(css_parent(&blkcg->css));
208}
209
210/**
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
214 *
215 * Return pointer to private data associated with the @blkg-@pol pair.
216 */
217static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
218						  struct blkcg_policy *pol)
219{
220	return blkg ? blkg->pd[pol->plid] : NULL;
221}
222
223/**
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pd: policy private data of interest
226 *
227 * @pd is policy private data.  Determine the blkg it's associated with.
228 */
229static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
230{
231	return pd ? pd->blkg : NULL;
232}
233
234/**
235 * blkg_path - format cgroup path of blkg
236 * @blkg: blkg of interest
237 * @buf: target buffer
238 * @buflen: target buffer length
239 *
240 * Format the path of the cgroup of @blkg into @buf.
241 */
242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243{
244	char *p;
245
246	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247	if (!p) {
 
 
248		strncpy(buf, "<unavailable>", buflen);
249		return -ENAMETOOLONG;
250	}
251
252	memmove(buf, p, buf + buflen - p);
253	return 0;
254}
255
256/**
257 * blkg_get - get a blkg reference
258 * @blkg: blkg to get
259 *
260 * The caller should be holding queue_lock and an existing reference.
261 */
262static inline void blkg_get(struct blkcg_gq *blkg)
263{
264	lockdep_assert_held(blkg->q->queue_lock);
265	WARN_ON_ONCE(!blkg->refcnt);
266	blkg->refcnt++;
267}
268
269void __blkg_release_rcu(struct rcu_head *rcu);
270
271/**
272 * blkg_put - put a blkg reference
273 * @blkg: blkg to put
274 *
275 * The caller should be holding queue_lock.
276 */
277static inline void blkg_put(struct blkcg_gq *blkg)
278{
279	lockdep_assert_held(blkg->q->queue_lock);
280	WARN_ON_ONCE(blkg->refcnt <= 0);
281	if (!--blkg->refcnt)
282		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283}
284
285struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
286			       bool update_hint);
287
288/**
289 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
290 * @d_blkg: loop cursor pointing to the current descendant
291 * @pos_css: used for iteration
292 * @p_blkg: target blkg to walk descendants of
293 *
294 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
295 * read locked.  If called under either blkcg or queue lock, the iteration
296 * is guaranteed to include all and only online blkgs.  The caller may
297 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
298 * @p_blkg is included in the iteration and the first node to be visited.
299 */
300#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
301	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
302		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
303					      (p_blkg)->q, false)))
304
305/**
306 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
307 * @d_blkg: loop cursor pointing to the current descendant
308 * @pos_css: used for iteration
309 * @p_blkg: target blkg to walk descendants of
310 *
311 * Similar to blkg_for_each_descendant_pre() but performs post-order
312 * traversal instead.  Synchronization rules are the same.  @p_blkg is
313 * included in the iteration and the last node to be visited.
314 */
315#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
316	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
317		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
318					      (p_blkg)->q, false)))
319
320/**
321 * blk_get_rl - get request_list to use
322 * @q: request_queue of interest
323 * @bio: bio which will be attached to the allocated request (may be %NULL)
324 *
325 * The caller wants to allocate a request from @q to use for @bio.  Find
326 * the request_list to use and obtain a reference on it.  Should be called
327 * under queue_lock.  This function is guaranteed to return non-%NULL
328 * request_list.
329 */
330static inline struct request_list *blk_get_rl(struct request_queue *q,
331					      struct bio *bio)
332{
333	struct blkcg *blkcg;
334	struct blkcg_gq *blkg;
335
336	rcu_read_lock();
337
338	blkcg = bio_blkcg(bio);
339
340	/* bypass blkg lookup and use @q->root_rl directly for root */
341	if (blkcg == &blkcg_root)
342		goto root_rl;
343
344	/*
345	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
346	 * or if either the blkcg or queue is going away.  Fall back to
347	 * root_rl in such cases.
348	 */
349	blkg = blkg_lookup_create(blkcg, q);
350	if (unlikely(IS_ERR(blkg)))
351		goto root_rl;
352
353	blkg_get(blkg);
354	rcu_read_unlock();
355	return &blkg->rl;
356root_rl:
357	rcu_read_unlock();
358	return &q->root_rl;
359}
360
361/**
362 * blk_put_rl - put request_list
363 * @rl: request_list to put
364 *
365 * Put the reference acquired by blk_get_rl().  Should be called under
366 * queue_lock.
367 */
368static inline void blk_put_rl(struct request_list *rl)
369{
370	/* root_rl may not have blkg set */
371	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
372		blkg_put(rl->blkg);
373}
374
375/**
376 * blk_rq_set_rl - associate a request with a request_list
377 * @rq: request of interest
378 * @rl: target request_list
379 *
380 * Associate @rq with @rl so that accounting and freeing can know the
381 * request_list @rq came from.
382 */
383static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
384{
385	rq->rl = rl;
386}
387
388/**
389 * blk_rq_rl - return the request_list a request came from
390 * @rq: request of interest
391 *
392 * Return the request_list @rq is allocated from.
393 */
394static inline struct request_list *blk_rq_rl(struct request *rq)
395{
396	return rq->rl;
397}
398
399struct request_list *__blk_queue_next_rl(struct request_list *rl,
400					 struct request_queue *q);
401/**
402 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
403 *
404 * Should be used under queue_lock.
405 */
406#define blk_queue_for_each_rl(rl, q)	\
407	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
408
409static inline void blkg_stat_init(struct blkg_stat *stat)
410{
411	u64_stats_init(&stat->syncp);
412}
413
414/**
415 * blkg_stat_add - add a value to a blkg_stat
416 * @stat: target blkg_stat
417 * @val: value to add
418 *
419 * Add @val to @stat.  The caller is responsible for synchronizing calls to
420 * this function.
421 */
422static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
423{
424	u64_stats_update_begin(&stat->syncp);
425	stat->cnt += val;
426	u64_stats_update_end(&stat->syncp);
427}
428
429/**
430 * blkg_stat_read - read the current value of a blkg_stat
431 * @stat: blkg_stat to read
432 *
433 * Read the current value of @stat.  This function can be called without
434 * synchroniztion and takes care of u64 atomicity.
435 */
436static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
437{
438	unsigned int start;
439	uint64_t v;
440
441	do {
442		start = u64_stats_fetch_begin_irq(&stat->syncp);
443		v = stat->cnt;
444	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
445
446	return v;
447}
448
449/**
450 * blkg_stat_reset - reset a blkg_stat
451 * @stat: blkg_stat to reset
452 */
453static inline void blkg_stat_reset(struct blkg_stat *stat)
454{
455	stat->cnt = 0;
456}
457
458/**
459 * blkg_stat_merge - merge a blkg_stat into another
460 * @to: the destination blkg_stat
461 * @from: the source
462 *
463 * Add @from's count to @to.
464 */
465static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
466{
467	blkg_stat_add(to, blkg_stat_read(from));
468}
469
470static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
471{
472	u64_stats_init(&rwstat->syncp);
473}
474
475/**
476 * blkg_rwstat_add - add a value to a blkg_rwstat
477 * @rwstat: target blkg_rwstat
478 * @rw: mask of REQ_{WRITE|SYNC}
479 * @val: value to add
480 *
481 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
482 * caller is responsible for synchronizing calls to this function.
483 */
484static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
485				   int rw, uint64_t val)
486{
487	u64_stats_update_begin(&rwstat->syncp);
488
489	if (rw & REQ_WRITE)
490		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
491	else
492		rwstat->cnt[BLKG_RWSTAT_READ] += val;
493	if (rw & REQ_SYNC)
494		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
495	else
496		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
497
498	u64_stats_update_end(&rwstat->syncp);
499}
500
501/**
502 * blkg_rwstat_read - read the current values of a blkg_rwstat
503 * @rwstat: blkg_rwstat to read
504 *
505 * Read the current snapshot of @rwstat and return it as the return value.
506 * This function can be called without synchronization and takes care of
507 * u64 atomicity.
508 */
509static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
510{
511	unsigned int start;
512	struct blkg_rwstat tmp;
513
514	do {
515		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
516		tmp = *rwstat;
517	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
518
519	return tmp;
520}
521
522/**
523 * blkg_rwstat_total - read the total count of a blkg_rwstat
524 * @rwstat: blkg_rwstat to read
525 *
526 * Return the total count of @rwstat regardless of the IO direction.  This
527 * function can be called without synchronization and takes care of u64
528 * atomicity.
529 */
530static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
531{
532	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
533
534	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
535}
536
537/**
538 * blkg_rwstat_reset - reset a blkg_rwstat
539 * @rwstat: blkg_rwstat to reset
540 */
541static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
542{
543	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
544}
545
546/**
547 * blkg_rwstat_merge - merge a blkg_rwstat into another
548 * @to: the destination blkg_rwstat
549 * @from: the source
550 *
551 * Add @from's counts to @to.
552 */
553static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
554				     struct blkg_rwstat *from)
555{
556	struct blkg_rwstat v = blkg_rwstat_read(from);
557	int i;
558
559	u64_stats_update_begin(&to->syncp);
560	for (i = 0; i < BLKG_RWSTAT_NR; i++)
561		to->cnt[i] += v.cnt[i];
562	u64_stats_update_end(&to->syncp);
563}
564
565#else	/* CONFIG_BLK_CGROUP */
566
567struct cgroup;
568struct blkcg;
569
570struct blkg_policy_data {
571};
572
573struct blkcg_gq {
574};
575
576struct blkcg_policy {
577};
578
 
 
579static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
580static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
581static inline void blkcg_drain_queue(struct request_queue *q) { }
582static inline void blkcg_exit_queue(struct request_queue *q) { }
583static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
584static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
585static inline int blkcg_activate_policy(struct request_queue *q,
586					const struct blkcg_policy *pol) { return 0; }
587static inline void blkcg_deactivate_policy(struct request_queue *q,
588					   const struct blkcg_policy *pol) { }
589
590static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
591
592static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
593						  struct blkcg_policy *pol) { return NULL; }
594static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
595static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
596static inline void blkg_get(struct blkcg_gq *blkg) { }
597static inline void blkg_put(struct blkcg_gq *blkg) { }
598
599static inline struct request_list *blk_get_rl(struct request_queue *q,
600					      struct bio *bio) { return &q->root_rl; }
601static inline void blk_put_rl(struct request_list *rl) { }
602static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
603static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
604
605#define blk_queue_for_each_rl(rl, q)	\
606	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
607
608#endif	/* CONFIG_BLK_CGROUP */
609#endif	/* _BLK_CGROUP_H */