Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Functions related to io context handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/slab.h>
 11
 12#include "blk.h"
 13
 14/*
 15 * For io context allocations
 16 */
 17static struct kmem_cache *iocontext_cachep;
 18
 19/**
 20 * get_io_context - increment reference count to io_context
 21 * @ioc: io_context to get
 22 *
 23 * Increment reference count to @ioc.
 24 */
 25void get_io_context(struct io_context *ioc)
 26{
 27	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 28	atomic_long_inc(&ioc->refcount);
 29}
 30EXPORT_SYMBOL(get_io_context);
 31
 32static void icq_free_icq_rcu(struct rcu_head *head)
 33{
 34	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 35
 36	kmem_cache_free(icq->__rcu_icq_cache, icq);
 37}
 38
 39/* Exit an icq. Called with both ioc and q locked. */
 40static void ioc_exit_icq(struct io_cq *icq)
 41{
 42	struct elevator_type *et = icq->q->elevator->type;
 43
 44	if (icq->flags & ICQ_EXITED)
 45		return;
 46
 47	if (et->ops.elevator_exit_icq_fn)
 48		et->ops.elevator_exit_icq_fn(icq);
 49
 50	icq->flags |= ICQ_EXITED;
 51}
 52
 53/* Release an icq.  Called with both ioc and q locked. */
 54static void ioc_destroy_icq(struct io_cq *icq)
 55{
 56	struct io_context *ioc = icq->ioc;
 57	struct request_queue *q = icq->q;
 58	struct elevator_type *et = q->elevator->type;
 59
 60	lockdep_assert_held(&ioc->lock);
 61	lockdep_assert_held(q->queue_lock);
 62
 63	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 64	hlist_del_init(&icq->ioc_node);
 65	list_del_init(&icq->q_node);
 66
 67	/*
 68	 * Both setting lookup hint to and clearing it from @icq are done
 69	 * under queue_lock.  If it's not pointing to @icq now, it never
 70	 * will.  Hint assignment itself can race safely.
 71	 */
 72	if (rcu_dereference_raw(ioc->icq_hint) == icq)
 73		rcu_assign_pointer(ioc->icq_hint, NULL);
 74
 75	ioc_exit_icq(icq);
 76
 77	/*
 78	 * @icq->q might have gone away by the time RCU callback runs
 79	 * making it impossible to determine icq_cache.  Record it in @icq.
 80	 */
 81	icq->__rcu_icq_cache = et->icq_cache;
 82	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 83}
 84
 85/*
 86 * Slow path for ioc release in put_io_context().  Performs double-lock
 87 * dancing to unlink all icq's and then frees ioc.
 88 */
 89static void ioc_release_fn(struct work_struct *work)
 90{
 91	struct io_context *ioc = container_of(work, struct io_context,
 92					      release_work);
 93	unsigned long flags;
 94
 95	/*
 96	 * Exiting icq may call into put_io_context() through elevator
 97	 * which will trigger lockdep warning.  The ioc's are guaranteed to
 98	 * be different, use a different locking subclass here.  Use
 99	 * irqsave variant as there's no spin_lock_irq_nested().
100	 */
101	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102
103	while (!hlist_empty(&ioc->icq_list)) {
104		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105						struct io_cq, ioc_node);
106		struct request_queue *q = icq->q;
107
108		if (spin_trylock(q->queue_lock)) {
109			ioc_destroy_icq(icq);
110			spin_unlock(q->queue_lock);
111		} else {
112			spin_unlock_irqrestore(&ioc->lock, flags);
113			cpu_relax();
114			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115		}
116	}
117
118	spin_unlock_irqrestore(&ioc->lock, flags);
119
120	kmem_cache_free(iocontext_cachep, ioc);
121}
122
123/**
124 * put_io_context - put a reference of io_context
125 * @ioc: io_context to put
126 *
127 * Decrement reference count of @ioc and release it if the count reaches
128 * zero.
129 */
130void put_io_context(struct io_context *ioc)
131{
132	unsigned long flags;
133	bool free_ioc = false;
134
135	if (ioc == NULL)
136		return;
137
138	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
139
140	/*
141	 * Releasing ioc requires reverse order double locking and we may
142	 * already be holding a queue_lock.  Do it asynchronously from wq.
143	 */
144	if (atomic_long_dec_and_test(&ioc->refcount)) {
145		spin_lock_irqsave(&ioc->lock, flags);
146		if (!hlist_empty(&ioc->icq_list))
147			schedule_work(&ioc->release_work);
148		else
149			free_ioc = true;
150		spin_unlock_irqrestore(&ioc->lock, flags);
151	}
152
153	if (free_ioc)
154		kmem_cache_free(iocontext_cachep, ioc);
 
 
 
155}
156EXPORT_SYMBOL(put_io_context);
157
158/**
159 * put_io_context_active - put active reference on ioc
160 * @ioc: ioc of interest
161 *
162 * Undo get_io_context_active().  If active reference reaches zero after
163 * put, @ioc can never issue further IOs and ioscheds are notified.
164 */
165void put_io_context_active(struct io_context *ioc)
166{
167	struct hlist_node *n;
168	unsigned long flags;
169	struct io_cq *icq;
170
171	if (!atomic_dec_and_test(&ioc->active_ref)) {
172		put_io_context(ioc);
173		return;
174	}
175
176	/*
177	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
178	 * reverse double locking.  Read comment in ioc_release_fn() for
179	 * explanation on the nested locking annotation.
180	 */
181retry:
182	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184		if (icq->flags & ICQ_EXITED)
185			continue;
186		if (spin_trylock(icq->q->queue_lock)) {
187			ioc_exit_icq(icq);
188			spin_unlock(icq->q->queue_lock);
189		} else {
190			spin_unlock_irqrestore(&ioc->lock, flags);
191			cpu_relax();
192			goto retry;
193		}
194	}
195	spin_unlock_irqrestore(&ioc->lock, flags);
196
197	put_io_context(ioc);
 
 
 
 
198}
199
200/* Called by the exiting task */
201void exit_io_context(struct task_struct *task)
202{
203	struct io_context *ioc;
204
205	task_lock(task);
206	ioc = task->io_context;
207	task->io_context = NULL;
208	task_unlock(task);
209
210	atomic_dec(&ioc->nr_tasks);
211	put_io_context_active(ioc);
212}
213
214/**
215 * ioc_clear_queue - break any ioc association with the specified queue
216 * @q: request_queue being cleared
217 *
218 * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
219 */
220void ioc_clear_queue(struct request_queue *q)
221{
222	lockdep_assert_held(q->queue_lock);
223
224	while (!list_empty(&q->icq_list)) {
225		struct io_cq *icq = list_entry(q->icq_list.next,
226					       struct io_cq, q_node);
227		struct io_context *ioc = icq->ioc;
228
229		spin_lock(&ioc->lock);
230		ioc_destroy_icq(icq);
231		spin_unlock(&ioc->lock);
232	}
233}
234
235int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
236{
237	struct io_context *ioc;
238	int ret;
239
240	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
241				    node);
242	if (unlikely(!ioc))
243		return -ENOMEM;
244
245	/* initialize */
246	atomic_long_set(&ioc->refcount, 1);
247	atomic_set(&ioc->nr_tasks, 1);
248	atomic_set(&ioc->active_ref, 1);
249	spin_lock_init(&ioc->lock);
250	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
251	INIT_HLIST_HEAD(&ioc->icq_list);
252	INIT_WORK(&ioc->release_work, ioc_release_fn);
253
254	/*
255	 * Try to install.  ioc shouldn't be installed if someone else
256	 * already did or @task, which isn't %current, is exiting.  Note
257	 * that we need to allow ioc creation on exiting %current as exit
258	 * path may issue IOs from e.g. exit_files().  The exit path is
259	 * responsible for not issuing IO after exit_io_context().
260	 */
261	task_lock(task);
262	if (!task->io_context &&
263	    (task == current || !(task->flags & PF_EXITING)))
264		task->io_context = ioc;
265	else
266		kmem_cache_free(iocontext_cachep, ioc);
267
268	ret = task->io_context ? 0 : -EBUSY;
269
270	task_unlock(task);
271
272	return ret;
273}
274
275/**
276 * get_task_io_context - get io_context of a task
277 * @task: task of interest
278 * @gfp_flags: allocation flags, used if allocation is necessary
279 * @node: allocation node, used if allocation is necessary
280 *
281 * Return io_context of @task.  If it doesn't exist, it is created with
282 * @gfp_flags and @node.  The returned io_context has its reference count
283 * incremented.
284 *
285 * This function always goes through task_lock() and it's better to use
286 * %current->io_context + get_io_context() for %current.
287 */
288struct io_context *get_task_io_context(struct task_struct *task,
289				       gfp_t gfp_flags, int node)
290{
291	struct io_context *ioc;
292
293	might_sleep_if(gfp_flags & __GFP_WAIT);
294
295	do {
296		task_lock(task);
297		ioc = task->io_context;
298		if (likely(ioc)) {
299			get_io_context(ioc);
300			task_unlock(task);
301			return ioc;
302		}
303		task_unlock(task);
304	} while (!create_task_io_context(task, gfp_flags, node));
305
306	return NULL;
307}
308EXPORT_SYMBOL(get_task_io_context);
309
310/**
311 * ioc_lookup_icq - lookup io_cq from ioc
312 * @ioc: the associated io_context
313 * @q: the associated request_queue
314 *
315 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
316 * with @q->queue_lock held.
317 */
318struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
319{
320	struct io_cq *icq;
321
322	lockdep_assert_held(q->queue_lock);
323
324	/*
325	 * icq's are indexed from @ioc using radix tree and hint pointer,
326	 * both of which are protected with RCU.  All removals are done
327	 * holding both q and ioc locks, and we're holding q lock - if we
328	 * find a icq which points to us, it's guaranteed to be valid.
329	 */
330	rcu_read_lock();
331	icq = rcu_dereference(ioc->icq_hint);
332	if (icq && icq->q == q)
333		goto out;
334
335	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
336	if (icq && icq->q == q)
337		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
338	else
339		icq = NULL;
340out:
341	rcu_read_unlock();
342	return icq;
343}
344EXPORT_SYMBOL(ioc_lookup_icq);
345
346/**
347 * ioc_create_icq - create and link io_cq
348 * @ioc: io_context of interest
349 * @q: request_queue of interest
350 * @gfp_mask: allocation mask
351 *
352 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
353 * will be created using @gfp_mask.
354 *
355 * The caller is responsible for ensuring @ioc won't go away and @q is
356 * alive and will stay alive until this function returns.
357 */
358struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
359			     gfp_t gfp_mask)
360{
361	struct elevator_type *et = q->elevator->type;
362	struct io_cq *icq;
363
364	/* allocate stuff */
365	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
366				    q->node);
367	if (!icq)
368		return NULL;
369
370	if (radix_tree_preload(gfp_mask) < 0) {
371		kmem_cache_free(et->icq_cache, icq);
372		return NULL;
373	}
374
375	icq->ioc = ioc;
376	icq->q = q;
377	INIT_LIST_HEAD(&icq->q_node);
378	INIT_HLIST_NODE(&icq->ioc_node);
379
380	/* lock both q and ioc and try to link @icq */
381	spin_lock_irq(q->queue_lock);
382	spin_lock(&ioc->lock);
383
384	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
385		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
386		list_add(&icq->q_node, &q->icq_list);
387		if (et->ops.elevator_init_icq_fn)
388			et->ops.elevator_init_icq_fn(icq);
389	} else {
390		kmem_cache_free(et->icq_cache, icq);
391		icq = ioc_lookup_icq(ioc, q);
392		if (!icq)
393			printk(KERN_ERR "cfq: icq link failed!\n");
394	}
395
396	spin_unlock(&ioc->lock);
397	spin_unlock_irq(q->queue_lock);
398	radix_tree_preload_end();
399	return icq;
400}
 
401
402static int __init blk_ioc_init(void)
403{
404	iocontext_cachep = kmem_cache_create("blkdev_ioc",
405			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
406	return 0;
407}
408subsys_initcall(blk_ioc_init);
v3.1
  1/*
  2 * Functions related to io context handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/init.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 10#include <linux/slab.h>
 11
 12#include "blk.h"
 13
 14/*
 15 * For io context allocations
 16 */
 17static struct kmem_cache *iocontext_cachep;
 18
 19static void cfq_dtor(struct io_context *ioc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20{
 21	if (!hlist_empty(&ioc->cic_list)) {
 22		struct cfq_io_context *cic;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
 25								cic_list);
 26		cic->dtor(ioc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27	}
 
 
 
 
 28}
 29
 30/*
 31 * IO Context helper functions. put_io_context() returns 1 if there are no
 32 * more users of this io context, 0 otherwise.
 
 
 
 33 */
 34int put_io_context(struct io_context *ioc)
 35{
 
 
 
 36	if (ioc == NULL)
 37		return 1;
 38
 39	BUG_ON(atomic_long_read(&ioc->refcount) == 0);
 40
 
 
 
 
 41	if (atomic_long_dec_and_test(&ioc->refcount)) {
 42		rcu_read_lock();
 43		cfq_dtor(ioc);
 44		rcu_read_unlock();
 
 
 
 
 45
 
 46		kmem_cache_free(iocontext_cachep, ioc);
 47		return 1;
 48	}
 49	return 0;
 50}
 51EXPORT_SYMBOL(put_io_context);
 52
 53static void cfq_exit(struct io_context *ioc)
 
 
 
 
 
 
 
 54{
 55	rcu_read_lock();
 
 
 
 
 
 
 
 56
 57	if (!hlist_empty(&ioc->cic_list)) {
 58		struct cfq_io_context *cic;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59
 60		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
 61								cic_list);
 62		cic->exit(ioc);
 63	}
 64	rcu_read_unlock();
 65}
 66
 67/* Called by the exiting task */
 68void exit_io_context(struct task_struct *task)
 69{
 70	struct io_context *ioc;
 71
 72	task_lock(task);
 73	ioc = task->io_context;
 74	task->io_context = NULL;
 75	task_unlock(task);
 76
 77	if (atomic_dec_and_test(&ioc->nr_tasks))
 78		cfq_exit(ioc);
 
 
 
 
 
 
 
 
 
 
 
 79
 80	put_io_context(ioc);
 
 
 
 
 
 
 
 
 81}
 82
 83struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
 84{
 85	struct io_context *ioc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 87	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
 88	if (ioc) {
 89		atomic_long_set(&ioc->refcount, 1);
 90		atomic_set(&ioc->nr_tasks, 1);
 91		spin_lock_init(&ioc->lock);
 92		ioc->ioprio_changed = 0;
 93		ioc->ioprio = 0;
 94		ioc->last_waited = 0; /* doesn't matter... */
 95		ioc->nr_batch_requests = 0; /* because this is 0 */
 96		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
 97		INIT_HLIST_HEAD(&ioc->cic_list);
 98		ioc->ioc_data = NULL;
 99#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
100		ioc->cgroup_changed = 0;
101#endif
102	}
 
103
104	return ioc;
105}
106
107/*
108 * If the current task has no IO context then create one and initialise it.
109 * Otherwise, return its existing IO context.
 
 
 
 
 
 
110 *
111 * This returned IO context doesn't have a specifically elevated refcount,
112 * but since the current task itself holds a reference, the context can be
113 * used in general code, so long as it stays within `current` context.
114 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{
117	struct task_struct *tsk = current;
118	struct io_context *ret;
119
120	ret = tsk->io_context;
121	if (likely(ret))
122		return ret;
123
124	ret = alloc_io_context(gfp_flags, node);
125	if (ret) {
126		/* make sure set_task_ioprio() sees the settings above */
127		smp_wmb();
128		tsk->io_context = ret;
129	}
 
130
131	return ret;
132}
 
133
134/*
135 * If the current task has no IO context then create one and initialise it.
136 * If it does have a context, take a ref on it.
 
137 *
138 * This is always called in the context of the task which submitted the I/O.
 
139 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node)
141{
142	struct io_context *ioc = NULL;
 
 
143
144	/*
145	 * Check for unlikely race with exiting task. ioc ref count is
146	 * zero when ioc is being detached.
 
 
147	 */
148	do {
149		ioc = current_io_context(gfp_flags, node);
150		if (unlikely(!ioc))
151			break;
152	} while (!atomic_long_inc_not_zero(&ioc->refcount));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154	return ioc;
 
 
 
155}
156EXPORT_SYMBOL(get_io_context);
157
158static int __init blk_ioc_init(void)
159{
160	iocontext_cachep = kmem_cache_create("blkdev_ioc",
161			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
162	return 0;
163}
164subsys_initcall(blk_ioc_init);