Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to io context handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/slab.h>
 11#include <linux/security.h>
 12#include <linux/sched/task.h>
 13
 14#include "blk.h"
 15#include "blk-mq-sched.h"
 16
 17/*
 18 * For io context allocations
 19 */
 20static struct kmem_cache *iocontext_cachep;
 21
 22#ifdef CONFIG_BLK_ICQ
 23/**
 24 * get_io_context - increment reference count to io_context
 25 * @ioc: io_context to get
 26 *
 27 * Increment reference count to @ioc.
 28 */
 29static void get_io_context(struct io_context *ioc)
 30{
 31	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 32	atomic_long_inc(&ioc->refcount);
 33}
 34
 35static void icq_free_icq_rcu(struct rcu_head *head)
 36{
 37	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 38
 39	kmem_cache_free(icq->__rcu_icq_cache, icq);
 40}
 41
 42/*
 43 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 44 * and queue locked for legacy.
 45 */
 46static void ioc_exit_icq(struct io_cq *icq)
 47{
 48	struct elevator_type *et = icq->q->elevator->type;
 49
 50	if (icq->flags & ICQ_EXITED)
 51		return;
 52
 53	if (et->ops.exit_icq)
 54		et->ops.exit_icq(icq);
 55
 56	icq->flags |= ICQ_EXITED;
 57}
 58
 59static void ioc_exit_icqs(struct io_context *ioc)
 60{
 61	struct io_cq *icq;
 62
 63	spin_lock_irq(&ioc->lock);
 64	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
 65		ioc_exit_icq(icq);
 66	spin_unlock_irq(&ioc->lock);
 67}
 68
 69/*
 70 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 71 * and queue locked for legacy.
 72 */
 73static void ioc_destroy_icq(struct io_cq *icq)
 74{
 75	struct io_context *ioc = icq->ioc;
 76	struct request_queue *q = icq->q;
 77	struct elevator_type *et = q->elevator->type;
 78
 79	lockdep_assert_held(&ioc->lock);
 80
 81	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 82	hlist_del_init(&icq->ioc_node);
 83	list_del_init(&icq->q_node);
 84
 85	/*
 86	 * Both setting lookup hint to and clearing it from @icq are done
 87	 * under queue_lock.  If it's not pointing to @icq now, it never
 88	 * will.  Hint assignment itself can race safely.
 89	 */
 90	if (rcu_access_pointer(ioc->icq_hint) == icq)
 91		rcu_assign_pointer(ioc->icq_hint, NULL);
 92
 93	ioc_exit_icq(icq);
 94
 95	/*
 96	 * @icq->q might have gone away by the time RCU callback runs
 97	 * making it impossible to determine icq_cache.  Record it in @icq.
 98	 */
 99	icq->__rcu_icq_cache = et->icq_cache;
100	icq->flags |= ICQ_DESTROYED;
101	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
102}
103
104/*
105 * Slow path for ioc release in put_io_context().  Performs double-lock
106 * dancing to unlink all icq's and then frees ioc.
107 */
108static void ioc_release_fn(struct work_struct *work)
109{
110	struct io_context *ioc = container_of(work, struct io_context,
111					      release_work);
112	spin_lock_irq(&ioc->lock);
113
114	while (!hlist_empty(&ioc->icq_list)) {
115		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
116						struct io_cq, ioc_node);
117		struct request_queue *q = icq->q;
118
119		if (spin_trylock(&q->queue_lock)) {
120			ioc_destroy_icq(icq);
121			spin_unlock(&q->queue_lock);
122		} else {
123			/* Make sure q and icq cannot be freed. */
124			rcu_read_lock();
125
126			/* Re-acquire the locks in the correct order. */
127			spin_unlock(&ioc->lock);
128			spin_lock(&q->queue_lock);
129			spin_lock(&ioc->lock);
130
131			/*
132			 * The icq may have been destroyed when the ioc lock
133			 * was released.
134			 */
135			if (!(icq->flags & ICQ_DESTROYED))
136				ioc_destroy_icq(icq);
137
138			spin_unlock(&q->queue_lock);
139			rcu_read_unlock();
140		}
141	}
142
143	spin_unlock_irq(&ioc->lock);
144
145	kmem_cache_free(iocontext_cachep, ioc);
146}
147
148/*
149 * Releasing icqs requires reverse order double locking and we may already be
150 * holding a queue_lock.  Do it asynchronously from a workqueue.
 
 
 
151 */
152static bool ioc_delay_free(struct io_context *ioc)
153{
154	unsigned long flags;
 
 
 
 
 
 
155
156	spin_lock_irqsave(&ioc->lock, flags);
157	if (!hlist_empty(&ioc->icq_list)) {
158		queue_work(system_power_efficient_wq, &ioc->release_work);
 
 
 
 
 
 
 
 
159		spin_unlock_irqrestore(&ioc->lock, flags);
160		return true;
161	}
162	spin_unlock_irqrestore(&ioc->lock, flags);
163	return false;
 
164}
165
166/**
167 * ioc_clear_queue - break any ioc association with the specified queue
168 * @q: request_queue being cleared
169 *
170 * Walk @q->icq_list and exit all io_cq's.
 
171 */
172void ioc_clear_queue(struct request_queue *q)
173{
174	LIST_HEAD(icq_list);
175
176	spin_lock_irq(&q->queue_lock);
177	list_splice_init(&q->icq_list, &icq_list);
178	spin_unlock_irq(&q->queue_lock);
 
179
180	rcu_read_lock();
181	while (!list_empty(&icq_list)) {
182		struct io_cq *icq =
183			list_entry(icq_list.next, struct io_cq, q_node);
184
185		spin_lock_irq(&icq->ioc->lock);
186		if (!(icq->flags & ICQ_DESTROYED))
187			ioc_destroy_icq(icq);
188		spin_unlock_irq(&icq->ioc->lock);
189	}
190	rcu_read_unlock();
191}
192#else /* CONFIG_BLK_ICQ */
193static inline void ioc_exit_icqs(struct io_context *ioc)
194{
195}
196static inline bool ioc_delay_free(struct io_context *ioc)
197{
198	return false;
199}
200#endif /* CONFIG_BLK_ICQ */
201
202/**
203 * put_io_context - put a reference of io_context
204 * @ioc: io_context to put
205 *
206 * Decrement reference count of @ioc and release it if the count reaches
207 * zero.
208 */
209void put_io_context(struct io_context *ioc)
210{
211	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
212	if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
213		kmem_cache_free(iocontext_cachep, ioc);
214}
215EXPORT_SYMBOL_GPL(put_io_context);
216
217/* Called by the exiting task */
218void exit_io_context(struct task_struct *task)
219{
220	struct io_context *ioc;
221
222	task_lock(task);
223	ioc = task->io_context;
224	task->io_context = NULL;
225	task_unlock(task);
226
227	if (atomic_dec_and_test(&ioc->active_ref)) {
228		ioc_exit_icqs(ioc);
229		put_io_context(ioc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230	}
 
231}
232
233static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234{
235	struct io_context *ioc;
 
236
237	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
238				    node);
239	if (unlikely(!ioc))
240		return NULL;
241
 
242	atomic_long_set(&ioc->refcount, 1);
 
243	atomic_set(&ioc->active_ref, 1);
244#ifdef CONFIG_BLK_ICQ
245	spin_lock_init(&ioc->lock);
246	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
247	INIT_HLIST_HEAD(&ioc->icq_list);
248	INIT_WORK(&ioc->release_work, ioc_release_fn);
249#endif
250	ioc->ioprio = IOPRIO_DEFAULT;
251
252	return ioc;
253}
254
255int set_task_ioprio(struct task_struct *task, int ioprio)
256{
257	int err;
258	const struct cred *cred = current_cred(), *tcred;
259
260	rcu_read_lock();
261	tcred = __task_cred(task);
262	if (!uid_eq(tcred->uid, cred->euid) &&
263	    !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
264		rcu_read_unlock();
265		return -EPERM;
266	}
267	rcu_read_unlock();
268
269	err = security_task_setioprio(task, ioprio);
270	if (err)
271		return err;
272
 
 
 
 
 
 
 
273	task_lock(task);
274	if (unlikely(!task->io_context)) {
275		struct io_context *ioc;
276
277		task_unlock(task);
 
278
279		ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
280		if (!ioc)
281			return -ENOMEM;
282
283		task_lock(task);
284		if (task->flags & PF_EXITING) {
285			kmem_cache_free(iocontext_cachep, ioc);
286			goto out;
287		}
288		if (task->io_context)
289			kmem_cache_free(iocontext_cachep, ioc);
290		else
291			task->io_context = ioc;
292	}
293	task->io_context->ioprio = ioprio;
294out:
295	task_unlock(task);
296	return 0;
 
297}
298EXPORT_SYMBOL_GPL(set_task_ioprio);
299
300int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301{
302	struct io_context *ioc = current->io_context;
303
304	/*
305	 * Share io context with parent, if CLONE_IO is set
306	 */
307	if (clone_flags & CLONE_IO) {
308		atomic_inc(&ioc->active_ref);
309		tsk->io_context = ioc;
310	} else if (ioprio_valid(ioc->ioprio)) {
311		tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
312		if (!tsk->io_context)
313			return -ENOMEM;
314		tsk->io_context->ioprio = ioc->ioprio;
315	}
316
317	return 0;
 
 
 
 
 
 
 
 
 
 
 
318}
319
320#ifdef CONFIG_BLK_ICQ
321/**
322 * ioc_lookup_icq - lookup io_cq from ioc
 
323 * @q: the associated request_queue
324 *
325 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
326 * with @q->queue_lock held.
327 */
328struct io_cq *ioc_lookup_icq(struct request_queue *q)
329{
330	struct io_context *ioc = current->io_context;
331	struct io_cq *icq;
332
333	lockdep_assert_held(&q->queue_lock);
334
335	/*
336	 * icq's are indexed from @ioc using radix tree and hint pointer,
337	 * both of which are protected with RCU.  All removals are done
338	 * holding both q and ioc locks, and we're holding q lock - if we
339	 * find a icq which points to us, it's guaranteed to be valid.
340	 */
341	rcu_read_lock();
342	icq = rcu_dereference(ioc->icq_hint);
343	if (icq && icq->q == q)
344		goto out;
345
346	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
347	if (icq && icq->q == q)
348		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
349	else
350		icq = NULL;
351out:
352	rcu_read_unlock();
353	return icq;
354}
355EXPORT_SYMBOL(ioc_lookup_icq);
356
357/**
358 * ioc_create_icq - create and link io_cq
 
359 * @q: request_queue of interest
 
360 *
361 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
362 * will be created using @gfp_mask.
363 *
364 * The caller is responsible for ensuring @ioc won't go away and @q is
365 * alive and will stay alive until this function returns.
366 */
367static struct io_cq *ioc_create_icq(struct request_queue *q)
 
368{
369	struct io_context *ioc = current->io_context;
370	struct elevator_type *et = q->elevator->type;
371	struct io_cq *icq;
372
373	/* allocate stuff */
374	icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
375				    q->node);
376	if (!icq)
377		return NULL;
378
379	if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
380		kmem_cache_free(et->icq_cache, icq);
381		return NULL;
382	}
383
384	icq->ioc = ioc;
385	icq->q = q;
386	INIT_LIST_HEAD(&icq->q_node);
387	INIT_HLIST_NODE(&icq->ioc_node);
388
389	/* lock both q and ioc and try to link @icq */
390	spin_lock_irq(&q->queue_lock);
391	spin_lock(&ioc->lock);
392
393	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
394		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
395		list_add(&icq->q_node, &q->icq_list);
396		if (et->ops.init_icq)
397			et->ops.init_icq(icq);
398	} else {
399		kmem_cache_free(et->icq_cache, icq);
400		icq = ioc_lookup_icq(q);
401		if (!icq)
402			printk(KERN_ERR "cfq: icq link failed!\n");
403	}
404
405	spin_unlock(&ioc->lock);
406	spin_unlock_irq(&q->queue_lock);
407	radix_tree_preload_end();
408	return icq;
409}
410
411struct io_cq *ioc_find_get_icq(struct request_queue *q)
412{
413	struct io_context *ioc = current->io_context;
414	struct io_cq *icq = NULL;
415
416	if (unlikely(!ioc)) {
417		ioc = alloc_io_context(GFP_ATOMIC, q->node);
418		if (!ioc)
419			return NULL;
420
421		task_lock(current);
422		if (current->io_context) {
423			kmem_cache_free(iocontext_cachep, ioc);
424			ioc = current->io_context;
425		} else {
426			current->io_context = ioc;
427		}
428
429		get_io_context(ioc);
430		task_unlock(current);
431	} else {
432		get_io_context(ioc);
433
434		spin_lock_irq(&q->queue_lock);
435		icq = ioc_lookup_icq(q);
436		spin_unlock_irq(&q->queue_lock);
437	}
438
439	if (!icq) {
440		icq = ioc_create_icq(q);
441		if (!icq) {
442			put_io_context(ioc);
443			return NULL;
444		}
445	}
446	return icq;
447}
448EXPORT_SYMBOL_GPL(ioc_find_get_icq);
449#endif /* CONFIG_BLK_ICQ */
450
451static int __init blk_ioc_init(void)
452{
453	iocontext_cachep = kmem_cache_create("blkdev_ioc",
454			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
455	return 0;
456}
457subsys_initcall(blk_ioc_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to io context handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/slab.h>
 
 11#include <linux/sched/task.h>
 12
 13#include "blk.h"
 
 14
 15/*
 16 * For io context allocations
 17 */
 18static struct kmem_cache *iocontext_cachep;
 19
 
 20/**
 21 * get_io_context - increment reference count to io_context
 22 * @ioc: io_context to get
 23 *
 24 * Increment reference count to @ioc.
 25 */
 26void get_io_context(struct io_context *ioc)
 27{
 28	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 29	atomic_long_inc(&ioc->refcount);
 30}
 31
 32static void icq_free_icq_rcu(struct rcu_head *head)
 33{
 34	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 35
 36	kmem_cache_free(icq->__rcu_icq_cache, icq);
 37}
 38
 39/*
 40 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 41 * and queue locked for legacy.
 42 */
 43static void ioc_exit_icq(struct io_cq *icq)
 44{
 45	struct elevator_type *et = icq->q->elevator->type;
 46
 47	if (icq->flags & ICQ_EXITED)
 48		return;
 49
 50	if (et->ops.exit_icq)
 51		et->ops.exit_icq(icq);
 52
 53	icq->flags |= ICQ_EXITED;
 54}
 55
 
 
 
 
 
 
 
 
 
 
 56/*
 57 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 58 * and queue locked for legacy.
 59 */
 60static void ioc_destroy_icq(struct io_cq *icq)
 61{
 62	struct io_context *ioc = icq->ioc;
 63	struct request_queue *q = icq->q;
 64	struct elevator_type *et = q->elevator->type;
 65
 66	lockdep_assert_held(&ioc->lock);
 67
 68	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 69	hlist_del_init(&icq->ioc_node);
 70	list_del_init(&icq->q_node);
 71
 72	/*
 73	 * Both setting lookup hint to and clearing it from @icq are done
 74	 * under queue_lock.  If it's not pointing to @icq now, it never
 75	 * will.  Hint assignment itself can race safely.
 76	 */
 77	if (rcu_access_pointer(ioc->icq_hint) == icq)
 78		rcu_assign_pointer(ioc->icq_hint, NULL);
 79
 80	ioc_exit_icq(icq);
 81
 82	/*
 83	 * @icq->q might have gone away by the time RCU callback runs
 84	 * making it impossible to determine icq_cache.  Record it in @icq.
 85	 */
 86	icq->__rcu_icq_cache = et->icq_cache;
 87	icq->flags |= ICQ_DESTROYED;
 88	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 89}
 90
 91/*
 92 * Slow path for ioc release in put_io_context().  Performs double-lock
 93 * dancing to unlink all icq's and then frees ioc.
 94 */
 95static void ioc_release_fn(struct work_struct *work)
 96{
 97	struct io_context *ioc = container_of(work, struct io_context,
 98					      release_work);
 99	spin_lock_irq(&ioc->lock);
100
101	while (!hlist_empty(&ioc->icq_list)) {
102		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
103						struct io_cq, ioc_node);
104		struct request_queue *q = icq->q;
105
106		if (spin_trylock(&q->queue_lock)) {
107			ioc_destroy_icq(icq);
108			spin_unlock(&q->queue_lock);
109		} else {
110			/* Make sure q and icq cannot be freed. */
111			rcu_read_lock();
112
113			/* Re-acquire the locks in the correct order. */
114			spin_unlock(&ioc->lock);
115			spin_lock(&q->queue_lock);
116			spin_lock(&ioc->lock);
117
118			/*
119			 * The icq may have been destroyed when the ioc lock
120			 * was released.
121			 */
122			if (!(icq->flags & ICQ_DESTROYED))
123				ioc_destroy_icq(icq);
124
125			spin_unlock(&q->queue_lock);
126			rcu_read_unlock();
127		}
128	}
129
130	spin_unlock_irq(&ioc->lock);
131
132	kmem_cache_free(iocontext_cachep, ioc);
133}
134
135/**
136 * put_io_context - put a reference of io_context
137 * @ioc: io_context to put
138 *
139 * Decrement reference count of @ioc and release it if the count reaches
140 * zero.
141 */
142void put_io_context(struct io_context *ioc)
143{
144	unsigned long flags;
145	bool free_ioc = false;
146
147	if (ioc == NULL)
148		return;
149
150	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
151
152	/*
153	 * Releasing ioc requires reverse order double locking and we may
154	 * already be holding a queue_lock.  Do it asynchronously from wq.
155	 */
156	if (atomic_long_dec_and_test(&ioc->refcount)) {
157		spin_lock_irqsave(&ioc->lock, flags);
158		if (!hlist_empty(&ioc->icq_list))
159			queue_work(system_power_efficient_wq,
160					&ioc->release_work);
161		else
162			free_ioc = true;
163		spin_unlock_irqrestore(&ioc->lock, flags);
 
164	}
165
166	if (free_ioc)
167		kmem_cache_free(iocontext_cachep, ioc);
168}
169
170/**
171 * put_io_context_active - put active reference on ioc
172 * @ioc: ioc of interest
173 *
174 * Undo get_io_context_active().  If active reference reaches zero after
175 * put, @ioc can never issue further IOs and ioscheds are notified.
176 */
177void put_io_context_active(struct io_context *ioc)
178{
179	struct io_cq *icq;
180
181	if (!atomic_dec_and_test(&ioc->active_ref)) {
182		put_io_context(ioc);
183		return;
184	}
185
186	spin_lock_irq(&ioc->lock);
187	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
188		if (icq->flags & ICQ_EXITED)
189			continue;
190
191		ioc_exit_icq(icq);
 
 
 
192	}
193	spin_unlock_irq(&ioc->lock);
 
 
 
 
 
 
 
 
 
 
194
195	put_io_context(ioc);
 
 
 
 
 
 
 
 
 
 
 
196}
 
197
198/* Called by the exiting task */
199void exit_io_context(struct task_struct *task)
200{
201	struct io_context *ioc;
202
203	task_lock(task);
204	ioc = task->io_context;
205	task->io_context = NULL;
206	task_unlock(task);
207
208	atomic_dec(&ioc->nr_tasks);
209	put_io_context_active(ioc);
210}
211
212static void __ioc_clear_queue(struct list_head *icq_list)
213{
214	unsigned long flags;
215
216	rcu_read_lock();
217	while (!list_empty(icq_list)) {
218		struct io_cq *icq = list_entry(icq_list->next,
219						struct io_cq, q_node);
220		struct io_context *ioc = icq->ioc;
221
222		spin_lock_irqsave(&ioc->lock, flags);
223		if (icq->flags & ICQ_DESTROYED) {
224			spin_unlock_irqrestore(&ioc->lock, flags);
225			continue;
226		}
227		ioc_destroy_icq(icq);
228		spin_unlock_irqrestore(&ioc->lock, flags);
229	}
230	rcu_read_unlock();
231}
232
233/**
234 * ioc_clear_queue - break any ioc association with the specified queue
235 * @q: request_queue being cleared
236 *
237 * Walk @q->icq_list and exit all io_cq's.
238 */
239void ioc_clear_queue(struct request_queue *q)
240{
241	LIST_HEAD(icq_list);
242
243	spin_lock_irq(&q->queue_lock);
244	list_splice_init(&q->icq_list, &icq_list);
245	spin_unlock_irq(&q->queue_lock);
246
247	__ioc_clear_queue(&icq_list);
248}
249
250int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
251{
252	struct io_context *ioc;
253	int ret;
254
255	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
256				    node);
257	if (unlikely(!ioc))
258		return -ENOMEM;
259
260	/* initialize */
261	atomic_long_set(&ioc->refcount, 1);
262	atomic_set(&ioc->nr_tasks, 1);
263	atomic_set(&ioc->active_ref, 1);
 
264	spin_lock_init(&ioc->lock);
265	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266	INIT_HLIST_HEAD(&ioc->icq_list);
267	INIT_WORK(&ioc->release_work, ioc_release_fn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
269	/*
270	 * Try to install.  ioc shouldn't be installed if someone else
271	 * already did or @task, which isn't %current, is exiting.  Note
272	 * that we need to allow ioc creation on exiting %current as exit
273	 * path may issue IOs from e.g. exit_files().  The exit path is
274	 * responsible for not issuing IO after exit_io_context().
275	 */
276	task_lock(task);
277	if (!task->io_context &&
278	    (task == current || !(task->flags & PF_EXITING)))
279		task->io_context = ioc;
280	else
281		kmem_cache_free(iocontext_cachep, ioc);
282
283	ret = task->io_context ? 0 : -EBUSY;
 
 
284
 
 
 
 
 
 
 
 
 
 
 
 
285	task_unlock(task);
286
287	return ret;
288}
 
289
290/**
291 * get_task_io_context - get io_context of a task
292 * @task: task of interest
293 * @gfp_flags: allocation flags, used if allocation is necessary
294 * @node: allocation node, used if allocation is necessary
295 *
296 * Return io_context of @task.  If it doesn't exist, it is created with
297 * @gfp_flags and @node.  The returned io_context has its reference count
298 * incremented.
299 *
300 * This function always goes through task_lock() and it's better to use
301 * %current->io_context + get_io_context() for %current.
302 */
303struct io_context *get_task_io_context(struct task_struct *task,
304				       gfp_t gfp_flags, int node)
305{
306	struct io_context *ioc;
307
308	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
 
 
 
 
 
 
 
 
 
 
 
309
310	do {
311		task_lock(task);
312		ioc = task->io_context;
313		if (likely(ioc)) {
314			get_io_context(ioc);
315			task_unlock(task);
316			return ioc;
317		}
318		task_unlock(task);
319	} while (!create_task_io_context(task, gfp_flags, node));
320
321	return NULL;
322}
323
 
324/**
325 * ioc_lookup_icq - lookup io_cq from ioc
326 * @ioc: the associated io_context
327 * @q: the associated request_queue
328 *
329 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
330 * with @q->queue_lock held.
331 */
332struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
333{
 
334	struct io_cq *icq;
335
336	lockdep_assert_held(&q->queue_lock);
337
338	/*
339	 * icq's are indexed from @ioc using radix tree and hint pointer,
340	 * both of which are protected with RCU.  All removals are done
341	 * holding both q and ioc locks, and we're holding q lock - if we
342	 * find a icq which points to us, it's guaranteed to be valid.
343	 */
344	rcu_read_lock();
345	icq = rcu_dereference(ioc->icq_hint);
346	if (icq && icq->q == q)
347		goto out;
348
349	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
350	if (icq && icq->q == q)
351		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
352	else
353		icq = NULL;
354out:
355	rcu_read_unlock();
356	return icq;
357}
358EXPORT_SYMBOL(ioc_lookup_icq);
359
360/**
361 * ioc_create_icq - create and link io_cq
362 * @ioc: io_context of interest
363 * @q: request_queue of interest
364 * @gfp_mask: allocation mask
365 *
366 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
367 * will be created using @gfp_mask.
368 *
369 * The caller is responsible for ensuring @ioc won't go away and @q is
370 * alive and will stay alive until this function returns.
371 */
372struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
373			     gfp_t gfp_mask)
374{
 
375	struct elevator_type *et = q->elevator->type;
376	struct io_cq *icq;
377
378	/* allocate stuff */
379	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
380				    q->node);
381	if (!icq)
382		return NULL;
383
384	if (radix_tree_maybe_preload(gfp_mask) < 0) {
385		kmem_cache_free(et->icq_cache, icq);
386		return NULL;
387	}
388
389	icq->ioc = ioc;
390	icq->q = q;
391	INIT_LIST_HEAD(&icq->q_node);
392	INIT_HLIST_NODE(&icq->ioc_node);
393
394	/* lock both q and ioc and try to link @icq */
395	spin_lock_irq(&q->queue_lock);
396	spin_lock(&ioc->lock);
397
398	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
399		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
400		list_add(&icq->q_node, &q->icq_list);
401		if (et->ops.init_icq)
402			et->ops.init_icq(icq);
403	} else {
404		kmem_cache_free(et->icq_cache, icq);
405		icq = ioc_lookup_icq(ioc, q);
406		if (!icq)
407			printk(KERN_ERR "cfq: icq link failed!\n");
408	}
409
410	spin_unlock(&ioc->lock);
411	spin_unlock_irq(&q->queue_lock);
412	radix_tree_preload_end();
413	return icq;
414}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
416static int __init blk_ioc_init(void)
417{
418	iocontext_cachep = kmem_cache_create("blkdev_ioc",
419			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
420	return 0;
421}
422subsys_initcall(blk_ioc_init);