Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Functions related to io context handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/slab.h>
 
 10
 11#include "blk.h"
 12
 13/*
 14 * For io context allocations
 15 */
 16static struct kmem_cache *iocontext_cachep;
 17
 18/**
 19 * get_io_context - increment reference count to io_context
 20 * @ioc: io_context to get
 21 *
 22 * Increment reference count to @ioc.
 23 */
 24void get_io_context(struct io_context *ioc)
 25{
 26	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 27	atomic_long_inc(&ioc->refcount);
 28}
 29EXPORT_SYMBOL(get_io_context);
 30
 31static void icq_free_icq_rcu(struct rcu_head *head)
 32{
 33	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 34
 35	kmem_cache_free(icq->__rcu_icq_cache, icq);
 36}
 37
 38/* Exit an icq. Called with both ioc and q locked. */
 
 
 
 39static void ioc_exit_icq(struct io_cq *icq)
 40{
 41	struct elevator_type *et = icq->q->elevator->type;
 42
 43	if (icq->flags & ICQ_EXITED)
 44		return;
 45
 46	if (et->ops.elevator_exit_icq_fn)
 47		et->ops.elevator_exit_icq_fn(icq);
 
 
 48
 49	icq->flags |= ICQ_EXITED;
 50}
 51
 52/* Release an icq.  Called with both ioc and q locked. */
 
 
 
 53static void ioc_destroy_icq(struct io_cq *icq)
 54{
 55	struct io_context *ioc = icq->ioc;
 56	struct request_queue *q = icq->q;
 57	struct elevator_type *et = q->elevator->type;
 58
 59	lockdep_assert_held(&ioc->lock);
 60	lockdep_assert_held(q->queue_lock);
 61
 62	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 63	hlist_del_init(&icq->ioc_node);
 64	list_del_init(&icq->q_node);
 65
 66	/*
 67	 * Both setting lookup hint to and clearing it from @icq are done
 68	 * under queue_lock.  If it's not pointing to @icq now, it never
 69	 * will.  Hint assignment itself can race safely.
 70	 */
 71	if (rcu_access_pointer(ioc->icq_hint) == icq)
 72		rcu_assign_pointer(ioc->icq_hint, NULL);
 73
 74	ioc_exit_icq(icq);
 75
 76	/*
 77	 * @icq->q might have gone away by the time RCU callback runs
 78	 * making it impossible to determine icq_cache.  Record it in @icq.
 79	 */
 80	icq->__rcu_icq_cache = et->icq_cache;
 81	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 82}
 83
 84/*
 85 * Slow path for ioc release in put_io_context().  Performs double-lock
 86 * dancing to unlink all icq's and then frees ioc.
 87 */
 88static void ioc_release_fn(struct work_struct *work)
 89{
 90	struct io_context *ioc = container_of(work, struct io_context,
 91					      release_work);
 92	unsigned long flags;
 93
 94	/*
 95	 * Exiting icq may call into put_io_context() through elevator
 96	 * which will trigger lockdep warning.  The ioc's are guaranteed to
 97	 * be different, use a different locking subclass here.  Use
 98	 * irqsave variant as there's no spin_lock_irq_nested().
 99	 */
100	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
101
102	while (!hlist_empty(&ioc->icq_list)) {
103		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104						struct io_cq, ioc_node);
105		struct request_queue *q = icq->q;
106
107		if (spin_trylock(q->queue_lock)) {
108			ioc_destroy_icq(icq);
109			spin_unlock(q->queue_lock);
110		} else {
111			spin_unlock_irqrestore(&ioc->lock, flags);
112			cpu_relax();
113			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
114		}
115	}
116
117	spin_unlock_irqrestore(&ioc->lock, flags);
118
119	kmem_cache_free(iocontext_cachep, ioc);
120}
121
122/**
123 * put_io_context - put a reference of io_context
124 * @ioc: io_context to put
125 *
126 * Decrement reference count of @ioc and release it if the count reaches
127 * zero.
128 */
129void put_io_context(struct io_context *ioc)
130{
131	unsigned long flags;
132	bool free_ioc = false;
133
134	if (ioc == NULL)
135		return;
136
137	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
138
139	/*
140	 * Releasing ioc requires reverse order double locking and we may
141	 * already be holding a queue_lock.  Do it asynchronously from wq.
142	 */
143	if (atomic_long_dec_and_test(&ioc->refcount)) {
144		spin_lock_irqsave(&ioc->lock, flags);
145		if (!hlist_empty(&ioc->icq_list))
146			queue_work(system_power_efficient_wq,
147					&ioc->release_work);
148		else
149			free_ioc = true;
150		spin_unlock_irqrestore(&ioc->lock, flags);
151	}
152
153	if (free_ioc)
154		kmem_cache_free(iocontext_cachep, ioc);
155}
156EXPORT_SYMBOL(put_io_context);
157
158/**
159 * put_io_context_active - put active reference on ioc
160 * @ioc: ioc of interest
161 *
162 * Undo get_io_context_active().  If active reference reaches zero after
163 * put, @ioc can never issue further IOs and ioscheds are notified.
164 */
165void put_io_context_active(struct io_context *ioc)
166{
 
167	unsigned long flags;
168	struct io_cq *icq;
169
170	if (!atomic_dec_and_test(&ioc->active_ref)) {
171		put_io_context(ioc);
172		return;
173	}
174
175	/*
176	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
177	 * reverse double locking.  Read comment in ioc_release_fn() for
178	 * explanation on the nested locking annotation.
179	 */
180retry:
181	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
182	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
183		if (icq->flags & ICQ_EXITED)
184			continue;
185		if (spin_trylock(icq->q->queue_lock)) {
 
 
186			ioc_exit_icq(icq);
187			spin_unlock(icq->q->queue_lock);
188		} else {
189			spin_unlock_irqrestore(&ioc->lock, flags);
190			cpu_relax();
191			goto retry;
 
 
 
 
 
192		}
193	}
194	spin_unlock_irqrestore(&ioc->lock, flags);
195
196	put_io_context(ioc);
197}
198
199/* Called by the exiting task */
200void exit_io_context(struct task_struct *task)
201{
202	struct io_context *ioc;
203
204	task_lock(task);
205	ioc = task->io_context;
206	task->io_context = NULL;
207	task_unlock(task);
208
209	atomic_dec(&ioc->nr_tasks);
210	put_io_context_active(ioc);
211}
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213/**
214 * ioc_clear_queue - break any ioc association with the specified queue
215 * @q: request_queue being cleared
216 *
217 * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
218 */
219void ioc_clear_queue(struct request_queue *q)
220{
221	lockdep_assert_held(q->queue_lock);
222
223	while (!list_empty(&q->icq_list)) {
224		struct io_cq *icq = list_entry(q->icq_list.next,
225					       struct io_cq, q_node);
226		struct io_context *ioc = icq->ioc;
227
228		spin_lock(&ioc->lock);
229		ioc_destroy_icq(icq);
230		spin_unlock(&ioc->lock);
 
 
 
231	}
232}
233
234int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
235{
236	struct io_context *ioc;
237	int ret;
238
239	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
240				    node);
241	if (unlikely(!ioc))
242		return -ENOMEM;
243
244	/* initialize */
245	atomic_long_set(&ioc->refcount, 1);
246	atomic_set(&ioc->nr_tasks, 1);
247	atomic_set(&ioc->active_ref, 1);
248	spin_lock_init(&ioc->lock);
249	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
250	INIT_HLIST_HEAD(&ioc->icq_list);
251	INIT_WORK(&ioc->release_work, ioc_release_fn);
252
253	/*
254	 * Try to install.  ioc shouldn't be installed if someone else
255	 * already did or @task, which isn't %current, is exiting.  Note
256	 * that we need to allow ioc creation on exiting %current as exit
257	 * path may issue IOs from e.g. exit_files().  The exit path is
258	 * responsible for not issuing IO after exit_io_context().
259	 */
260	task_lock(task);
261	if (!task->io_context &&
262	    (task == current || !(task->flags & PF_EXITING)))
263		task->io_context = ioc;
264	else
265		kmem_cache_free(iocontext_cachep, ioc);
266
267	ret = task->io_context ? 0 : -EBUSY;
268
269	task_unlock(task);
270
271	return ret;
272}
273
274/**
275 * get_task_io_context - get io_context of a task
276 * @task: task of interest
277 * @gfp_flags: allocation flags, used if allocation is necessary
278 * @node: allocation node, used if allocation is necessary
279 *
280 * Return io_context of @task.  If it doesn't exist, it is created with
281 * @gfp_flags and @node.  The returned io_context has its reference count
282 * incremented.
283 *
284 * This function always goes through task_lock() and it's better to use
285 * %current->io_context + get_io_context() for %current.
286 */
287struct io_context *get_task_io_context(struct task_struct *task,
288				       gfp_t gfp_flags, int node)
289{
290	struct io_context *ioc;
291
292	might_sleep_if(gfp_flags & __GFP_WAIT);
293
294	do {
295		task_lock(task);
296		ioc = task->io_context;
297		if (likely(ioc)) {
298			get_io_context(ioc);
299			task_unlock(task);
300			return ioc;
301		}
302		task_unlock(task);
303	} while (!create_task_io_context(task, gfp_flags, node));
304
305	return NULL;
306}
307EXPORT_SYMBOL(get_task_io_context);
308
309/**
310 * ioc_lookup_icq - lookup io_cq from ioc
311 * @ioc: the associated io_context
312 * @q: the associated request_queue
313 *
314 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
315 * with @q->queue_lock held.
316 */
317struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
318{
319	struct io_cq *icq;
320
321	lockdep_assert_held(q->queue_lock);
322
323	/*
324	 * icq's are indexed from @ioc using radix tree and hint pointer,
325	 * both of which are protected with RCU.  All removals are done
326	 * holding both q and ioc locks, and we're holding q lock - if we
327	 * find a icq which points to us, it's guaranteed to be valid.
328	 */
329	rcu_read_lock();
330	icq = rcu_dereference(ioc->icq_hint);
331	if (icq && icq->q == q)
332		goto out;
333
334	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
335	if (icq && icq->q == q)
336		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
337	else
338		icq = NULL;
339out:
340	rcu_read_unlock();
341	return icq;
342}
343EXPORT_SYMBOL(ioc_lookup_icq);
344
345/**
346 * ioc_create_icq - create and link io_cq
347 * @ioc: io_context of interest
348 * @q: request_queue of interest
349 * @gfp_mask: allocation mask
350 *
351 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
352 * will be created using @gfp_mask.
353 *
354 * The caller is responsible for ensuring @ioc won't go away and @q is
355 * alive and will stay alive until this function returns.
356 */
357struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
358			     gfp_t gfp_mask)
359{
360	struct elevator_type *et = q->elevator->type;
361	struct io_cq *icq;
362
363	/* allocate stuff */
364	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
365				    q->node);
366	if (!icq)
367		return NULL;
368
369	if (radix_tree_maybe_preload(gfp_mask) < 0) {
370		kmem_cache_free(et->icq_cache, icq);
371		return NULL;
372	}
373
374	icq->ioc = ioc;
375	icq->q = q;
376	INIT_LIST_HEAD(&icq->q_node);
377	INIT_HLIST_NODE(&icq->ioc_node);
378
379	/* lock both q and ioc and try to link @icq */
380	spin_lock_irq(q->queue_lock);
381	spin_lock(&ioc->lock);
382
383	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
384		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
385		list_add(&icq->q_node, &q->icq_list);
386		if (et->ops.elevator_init_icq_fn)
387			et->ops.elevator_init_icq_fn(icq);
 
 
388	} else {
389		kmem_cache_free(et->icq_cache, icq);
390		icq = ioc_lookup_icq(ioc, q);
391		if (!icq)
392			printk(KERN_ERR "cfq: icq link failed!\n");
393	}
394
395	spin_unlock(&ioc->lock);
396	spin_unlock_irq(q->queue_lock);
397	radix_tree_preload_end();
398	return icq;
399}
400
401static int __init blk_ioc_init(void)
402{
403	iocontext_cachep = kmem_cache_create("blkdev_ioc",
404			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
405	return 0;
406}
407subsys_initcall(blk_ioc_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to io context handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/slab.h>
 11#include <linux/sched/task.h>
 12
 13#include "blk.h"
 14
 15/*
 16 * For io context allocations
 17 */
 18static struct kmem_cache *iocontext_cachep;
 19
 20/**
 21 * get_io_context - increment reference count to io_context
 22 * @ioc: io_context to get
 23 *
 24 * Increment reference count to @ioc.
 25 */
 26void get_io_context(struct io_context *ioc)
 27{
 28	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 29	atomic_long_inc(&ioc->refcount);
 30}
 31EXPORT_SYMBOL(get_io_context);
 32
 33static void icq_free_icq_rcu(struct rcu_head *head)
 34{
 35	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 36
 37	kmem_cache_free(icq->__rcu_icq_cache, icq);
 38}
 39
 40/*
 41 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 42 * and queue locked for legacy.
 43 */
 44static void ioc_exit_icq(struct io_cq *icq)
 45{
 46	struct elevator_type *et = icq->q->elevator->type;
 47
 48	if (icq->flags & ICQ_EXITED)
 49		return;
 50
 51	if (et->uses_mq && et->ops.mq.exit_icq)
 52		et->ops.mq.exit_icq(icq);
 53	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
 54		et->ops.sq.elevator_exit_icq_fn(icq);
 55
 56	icq->flags |= ICQ_EXITED;
 57}
 58
 59/*
 60 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 61 * and queue locked for legacy.
 62 */
 63static void ioc_destroy_icq(struct io_cq *icq)
 64{
 65	struct io_context *ioc = icq->ioc;
 66	struct request_queue *q = icq->q;
 67	struct elevator_type *et = q->elevator->type;
 68
 69	lockdep_assert_held(&ioc->lock);
 
 70
 71	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 72	hlist_del_init(&icq->ioc_node);
 73	list_del_init(&icq->q_node);
 74
 75	/*
 76	 * Both setting lookup hint to and clearing it from @icq are done
 77	 * under queue_lock.  If it's not pointing to @icq now, it never
 78	 * will.  Hint assignment itself can race safely.
 79	 */
 80	if (rcu_access_pointer(ioc->icq_hint) == icq)
 81		rcu_assign_pointer(ioc->icq_hint, NULL);
 82
 83	ioc_exit_icq(icq);
 84
 85	/*
 86	 * @icq->q might have gone away by the time RCU callback runs
 87	 * making it impossible to determine icq_cache.  Record it in @icq.
 88	 */
 89	icq->__rcu_icq_cache = et->icq_cache;
 90	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 91}
 92
 93/*
 94 * Slow path for ioc release in put_io_context().  Performs double-lock
 95 * dancing to unlink all icq's and then frees ioc.
 96 */
 97static void ioc_release_fn(struct work_struct *work)
 98{
 99	struct io_context *ioc = container_of(work, struct io_context,
100					      release_work);
101	unsigned long flags;
102
103	/*
104	 * Exiting icq may call into put_io_context() through elevator
105	 * which will trigger lockdep warning.  The ioc's are guaranteed to
106	 * be different, use a different locking subclass here.  Use
107	 * irqsave variant as there's no spin_lock_irq_nested().
108	 */
109	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
110
111	while (!hlist_empty(&ioc->icq_list)) {
112		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
113						struct io_cq, ioc_node);
114		struct request_queue *q = icq->q;
115
116		if (spin_trylock(q->queue_lock)) {
117			ioc_destroy_icq(icq);
118			spin_unlock(q->queue_lock);
119		} else {
120			spin_unlock_irqrestore(&ioc->lock, flags);
121			cpu_relax();
122			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
123		}
124	}
125
126	spin_unlock_irqrestore(&ioc->lock, flags);
127
128	kmem_cache_free(iocontext_cachep, ioc);
129}
130
131/**
132 * put_io_context - put a reference of io_context
133 * @ioc: io_context to put
134 *
135 * Decrement reference count of @ioc and release it if the count reaches
136 * zero.
137 */
138void put_io_context(struct io_context *ioc)
139{
140	unsigned long flags;
141	bool free_ioc = false;
142
143	if (ioc == NULL)
144		return;
145
146	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
147
148	/*
149	 * Releasing ioc requires reverse order double locking and we may
150	 * already be holding a queue_lock.  Do it asynchronously from wq.
151	 */
152	if (atomic_long_dec_and_test(&ioc->refcount)) {
153		spin_lock_irqsave(&ioc->lock, flags);
154		if (!hlist_empty(&ioc->icq_list))
155			queue_work(system_power_efficient_wq,
156					&ioc->release_work);
157		else
158			free_ioc = true;
159		spin_unlock_irqrestore(&ioc->lock, flags);
160	}
161
162	if (free_ioc)
163		kmem_cache_free(iocontext_cachep, ioc);
164}
165EXPORT_SYMBOL(put_io_context);
166
167/**
168 * put_io_context_active - put active reference on ioc
169 * @ioc: ioc of interest
170 *
171 * Undo get_io_context_active().  If active reference reaches zero after
172 * put, @ioc can never issue further IOs and ioscheds are notified.
173 */
174void put_io_context_active(struct io_context *ioc)
175{
176	struct elevator_type *et;
177	unsigned long flags;
178	struct io_cq *icq;
179
180	if (!atomic_dec_and_test(&ioc->active_ref)) {
181		put_io_context(ioc);
182		return;
183	}
184
185	/*
186	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
187	 * reverse double locking.  Read comment in ioc_release_fn() for
188	 * explanation on the nested locking annotation.
189	 */
190retry:
191	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
192	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
193		if (icq->flags & ICQ_EXITED)
194			continue;
195
196		et = icq->q->elevator->type;
197		if (et->uses_mq) {
198			ioc_exit_icq(icq);
 
199		} else {
200			if (spin_trylock(icq->q->queue_lock)) {
201				ioc_exit_icq(icq);
202				spin_unlock(icq->q->queue_lock);
203			} else {
204				spin_unlock_irqrestore(&ioc->lock, flags);
205				cpu_relax();
206				goto retry;
207			}
208		}
209	}
210	spin_unlock_irqrestore(&ioc->lock, flags);
211
212	put_io_context(ioc);
213}
214
215/* Called by the exiting task */
216void exit_io_context(struct task_struct *task)
217{
218	struct io_context *ioc;
219
220	task_lock(task);
221	ioc = task->io_context;
222	task->io_context = NULL;
223	task_unlock(task);
224
225	atomic_dec(&ioc->nr_tasks);
226	put_io_context_active(ioc);
227}
228
229static void __ioc_clear_queue(struct list_head *icq_list)
230{
231	unsigned long flags;
232
233	while (!list_empty(icq_list)) {
234		struct io_cq *icq = list_entry(icq_list->next,
235					       struct io_cq, q_node);
236		struct io_context *ioc = icq->ioc;
237
238		spin_lock_irqsave(&ioc->lock, flags);
239		ioc_destroy_icq(icq);
240		spin_unlock_irqrestore(&ioc->lock, flags);
241	}
242}
243
244/**
245 * ioc_clear_queue - break any ioc association with the specified queue
246 * @q: request_queue being cleared
247 *
248 * Walk @q->icq_list and exit all io_cq's.
249 */
250void ioc_clear_queue(struct request_queue *q)
251{
252	LIST_HEAD(icq_list);
253
254	spin_lock_irq(q->queue_lock);
255	list_splice_init(&q->icq_list, &icq_list);
 
 
256
257	if (q->mq_ops) {
258		spin_unlock_irq(q->queue_lock);
259		__ioc_clear_queue(&icq_list);
260	} else {
261		__ioc_clear_queue(&icq_list);
262		spin_unlock_irq(q->queue_lock);
263	}
264}
265
266int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
267{
268	struct io_context *ioc;
269	int ret;
270
271	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
272				    node);
273	if (unlikely(!ioc))
274		return -ENOMEM;
275
276	/* initialize */
277	atomic_long_set(&ioc->refcount, 1);
278	atomic_set(&ioc->nr_tasks, 1);
279	atomic_set(&ioc->active_ref, 1);
280	spin_lock_init(&ioc->lock);
281	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
282	INIT_HLIST_HEAD(&ioc->icq_list);
283	INIT_WORK(&ioc->release_work, ioc_release_fn);
284
285	/*
286	 * Try to install.  ioc shouldn't be installed if someone else
287	 * already did or @task, which isn't %current, is exiting.  Note
288	 * that we need to allow ioc creation on exiting %current as exit
289	 * path may issue IOs from e.g. exit_files().  The exit path is
290	 * responsible for not issuing IO after exit_io_context().
291	 */
292	task_lock(task);
293	if (!task->io_context &&
294	    (task == current || !(task->flags & PF_EXITING)))
295		task->io_context = ioc;
296	else
297		kmem_cache_free(iocontext_cachep, ioc);
298
299	ret = task->io_context ? 0 : -EBUSY;
300
301	task_unlock(task);
302
303	return ret;
304}
305
306/**
307 * get_task_io_context - get io_context of a task
308 * @task: task of interest
309 * @gfp_flags: allocation flags, used if allocation is necessary
310 * @node: allocation node, used if allocation is necessary
311 *
312 * Return io_context of @task.  If it doesn't exist, it is created with
313 * @gfp_flags and @node.  The returned io_context has its reference count
314 * incremented.
315 *
316 * This function always goes through task_lock() and it's better to use
317 * %current->io_context + get_io_context() for %current.
318 */
319struct io_context *get_task_io_context(struct task_struct *task,
320				       gfp_t gfp_flags, int node)
321{
322	struct io_context *ioc;
323
324	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
325
326	do {
327		task_lock(task);
328		ioc = task->io_context;
329		if (likely(ioc)) {
330			get_io_context(ioc);
331			task_unlock(task);
332			return ioc;
333		}
334		task_unlock(task);
335	} while (!create_task_io_context(task, gfp_flags, node));
336
337	return NULL;
338}
339EXPORT_SYMBOL(get_task_io_context);
340
341/**
342 * ioc_lookup_icq - lookup io_cq from ioc
343 * @ioc: the associated io_context
344 * @q: the associated request_queue
345 *
346 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
347 * with @q->queue_lock held.
348 */
349struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
350{
351	struct io_cq *icq;
352
353	lockdep_assert_held(q->queue_lock);
354
355	/*
356	 * icq's are indexed from @ioc using radix tree and hint pointer,
357	 * both of which are protected with RCU.  All removals are done
358	 * holding both q and ioc locks, and we're holding q lock - if we
359	 * find a icq which points to us, it's guaranteed to be valid.
360	 */
361	rcu_read_lock();
362	icq = rcu_dereference(ioc->icq_hint);
363	if (icq && icq->q == q)
364		goto out;
365
366	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
367	if (icq && icq->q == q)
368		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
369	else
370		icq = NULL;
371out:
372	rcu_read_unlock();
373	return icq;
374}
375EXPORT_SYMBOL(ioc_lookup_icq);
376
377/**
378 * ioc_create_icq - create and link io_cq
379 * @ioc: io_context of interest
380 * @q: request_queue of interest
381 * @gfp_mask: allocation mask
382 *
383 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
384 * will be created using @gfp_mask.
385 *
386 * The caller is responsible for ensuring @ioc won't go away and @q is
387 * alive and will stay alive until this function returns.
388 */
389struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
390			     gfp_t gfp_mask)
391{
392	struct elevator_type *et = q->elevator->type;
393	struct io_cq *icq;
394
395	/* allocate stuff */
396	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
397				    q->node);
398	if (!icq)
399		return NULL;
400
401	if (radix_tree_maybe_preload(gfp_mask) < 0) {
402		kmem_cache_free(et->icq_cache, icq);
403		return NULL;
404	}
405
406	icq->ioc = ioc;
407	icq->q = q;
408	INIT_LIST_HEAD(&icq->q_node);
409	INIT_HLIST_NODE(&icq->ioc_node);
410
411	/* lock both q and ioc and try to link @icq */
412	spin_lock_irq(q->queue_lock);
413	spin_lock(&ioc->lock);
414
415	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
416		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
417		list_add(&icq->q_node, &q->icq_list);
418		if (et->uses_mq && et->ops.mq.init_icq)
419			et->ops.mq.init_icq(icq);
420		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
421			et->ops.sq.elevator_init_icq_fn(icq);
422	} else {
423		kmem_cache_free(et->icq_cache, icq);
424		icq = ioc_lookup_icq(ioc, q);
425		if (!icq)
426			printk(KERN_ERR "cfq: icq link failed!\n");
427	}
428
429	spin_unlock(&ioc->lock);
430	spin_unlock_irq(q->queue_lock);
431	radix_tree_preload_end();
432	return icq;
433}
434
435static int __init blk_ioc_init(void)
436{
437	iocontext_cachep = kmem_cache_create("blkdev_ioc",
438			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
439	return 0;
440}
441subsys_initcall(blk_ioc_init);