Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2007 Oracle.  All rights reserved.
  4 * Copyright (C) 2014 Fujitsu.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/kthread.h>
  8#include <linux/slab.h>
  9#include <linux/list.h>
 10#include <linux/spinlock.h>
 11#include <linux/freezer.h>
 12#include <trace/events/btrfs.h>
 13#include "async-thread.h"
 14#include "ctree.h"
 15
 16enum {
 17	WORK_DONE_BIT,
 18	WORK_ORDER_DONE_BIT,
 19};
 20
 21#define NO_THRESHOLD (-1)
 22#define DFT_THRESHOLD (32)
 23
 24struct btrfs_workqueue {
 25	struct workqueue_struct *normal_wq;
 26
 27	/* File system this workqueue services */
 28	struct btrfs_fs_info *fs_info;
 29
 30	/* List head pointing to ordered work list */
 31	struct list_head ordered_list;
 32
 33	/* Spinlock for ordered_list */
 34	spinlock_t list_lock;
 35
 36	/* Thresholding related variants */
 37	atomic_t pending;
 38
 39	/* Up limit of concurrency workers */
 40	int limit_active;
 41
 42	/* Current number of concurrency workers */
 43	int current_active;
 44
 45	/* Threshold to change current_active */
 46	int thresh;
 47	unsigned int count;
 48	spinlock_t thres_lock;
 49};
 50
 51struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52{
 53	return wq->fs_info;
 54}
 55
 56struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
 
 57{
 58	return work->wq->fs_info;
 59}
 60
 61bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
 62{
 63	/*
 64	 * We could compare wq->pending with num_online_cpus()
 65	 * to support "thresh == NO_THRESHOLD" case, but it requires
 66	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
 67	 * postpone it until someone needs the support of that case.
 68	 */
 69	if (wq->thresh == NO_THRESHOLD)
 70		return false;
 71
 72	return atomic_read(&wq->pending) > wq->thresh * 2;
 73}
 74
 75static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
 76				 struct btrfs_fs_info *fs_info)
 77{
 78	wq->fs_info = fs_info;
 79	atomic_set(&wq->pending, 0);
 80	INIT_LIST_HEAD(&wq->ordered_list);
 81	spin_lock_init(&wq->list_lock);
 82	spin_lock_init(&wq->thres_lock);
 83}
 84
 85struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
 86					      const char *name, unsigned int flags,
 87					      int limit_active, int thresh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88{
 89	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
 90
 91	if (!ret)
 92		return NULL;
 93
 94	btrfs_init_workqueue(ret, fs_info);
 95
 96	ret->limit_active = limit_active;
 
 97	if (thresh == 0)
 98		thresh = DFT_THRESHOLD;
 99	/* For low threshold, disabling threshold is a better choice */
100	if (thresh < DFT_THRESHOLD) {
101		ret->current_active = limit_active;
102		ret->thresh = NO_THRESHOLD;
103	} else {
104		/*
105		 * For threshold-able wq, let its concurrency grow on demand.
106		 * Use minimal max_active at alloc time to reduce resource
107		 * usage.
108		 */
109		ret->current_active = 1;
110		ret->thresh = thresh;
111	}
112
113	ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
114					 name);
 
 
 
 
 
 
115	if (!ret->normal_wq) {
116		kfree(ret);
117		return NULL;
118	}
119
120	trace_btrfs_workqueue_alloc(ret, name);
 
 
 
121	return ret;
122}
123
124struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
125				struct btrfs_fs_info *fs_info, const char *name,
126				unsigned int flags)
 
 
 
 
 
127{
128	struct btrfs_workqueue *ret;
129
130	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
131	if (!ret)
132		return NULL;
133
134	btrfs_init_workqueue(ret, fs_info);
135
136	/* Ordered workqueues don't allow @max_active adjustments. */
137	ret->limit_active = 1;
138	ret->current_active = 1;
139	ret->thresh = NO_THRESHOLD;
140
141	ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
142	if (!ret->normal_wq) {
143		kfree(ret);
144		return NULL;
145	}
146
147	trace_btrfs_workqueue_alloc(ret, name);
 
 
 
 
 
 
 
 
148	return ret;
149}
150
151/*
152 * Hook for threshold which will be called in btrfs_queue_work.
153 * This hook WILL be called in IRQ handler context,
154 * so workqueue_set_max_active MUST NOT be called in this hook
155 */
156static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
157{
158	if (wq->thresh == NO_THRESHOLD)
159		return;
160	atomic_inc(&wq->pending);
161}
162
163/*
164 * Hook for threshold which will be called before executing the work,
165 * This hook is called in kthread content.
166 * So workqueue_set_max_active is called here.
167 */
168static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
169{
170	int new_current_active;
171	long pending;
172	int need_change = 0;
173
174	if (wq->thresh == NO_THRESHOLD)
175		return;
176
177	atomic_dec(&wq->pending);
178	spin_lock(&wq->thres_lock);
179	/*
180	 * Use wq->count to limit the calling frequency of
181	 * workqueue_set_max_active.
182	 */
183	wq->count++;
184	wq->count %= (wq->thresh / 4);
185	if (!wq->count)
186		goto  out;
187	new_current_active = wq->current_active;
188
189	/*
190	 * pending may be changed later, but it's OK since we really
191	 * don't need it so accurate to calculate new_max_active.
192	 */
193	pending = atomic_read(&wq->pending);
194	if (pending > wq->thresh)
195		new_current_active++;
196	if (pending < wq->thresh / 2)
197		new_current_active--;
198	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
199	if (new_current_active != wq->current_active)  {
200		need_change = 1;
201		wq->current_active = new_current_active;
202	}
203out:
204	spin_unlock(&wq->thres_lock);
205
206	if (need_change) {
207		workqueue_set_max_active(wq->normal_wq, wq->current_active);
208	}
209}
210
211static void run_ordered_work(struct btrfs_workqueue *wq,
212			     struct btrfs_work *self)
213{
214	struct list_head *list = &wq->ordered_list;
215	struct btrfs_work *work;
216	spinlock_t *lock = &wq->list_lock;
217	unsigned long flags;
218	bool free_self = false;
219
220	while (1) {
 
 
221		spin_lock_irqsave(lock, flags);
222		if (list_empty(list))
223			break;
224		work = list_entry(list->next, struct btrfs_work,
225				  ordered_list);
226		if (!test_bit(WORK_DONE_BIT, &work->flags))
227			break;
228		/*
229		 * Orders all subsequent loads after reading WORK_DONE_BIT,
230		 * paired with the smp_mb__before_atomic in btrfs_work_helper
231		 * this guarantees that the ordered function will see all
232		 * updates from ordinary work function.
233		 */
234		smp_rmb();
235
236		/*
237		 * we are going to call the ordered done function, but
238		 * we leave the work item on the list as a barrier so
239		 * that later work items that are done don't have their
240		 * functions called before this one returns
241		 */
242		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
243			break;
244		trace_btrfs_ordered_sched(work);
245		spin_unlock_irqrestore(lock, flags);
246		work->ordered_func(work, false);
247
248		/* now take the lock again and drop our item from the list */
249		spin_lock_irqsave(lock, flags);
250		list_del(&work->ordered_list);
251		spin_unlock_irqrestore(lock, flags);
252
253		if (work == self) {
254			/*
255			 * This is the work item that the worker is currently
256			 * executing.
257			 *
258			 * The kernel workqueue code guarantees non-reentrancy
259			 * of work items. I.e., if a work item with the same
260			 * address and work function is queued twice, the second
261			 * execution is blocked until the first one finishes. A
262			 * work item may be freed and recycled with the same
263			 * work function; the workqueue code assumes that the
264			 * original work item cannot depend on the recycled work
265			 * item in that case (see find_worker_executing_work()).
266			 *
267			 * Note that different types of Btrfs work can depend on
268			 * each other, and one type of work on one Btrfs
269			 * filesystem may even depend on the same type of work
270			 * on another Btrfs filesystem via, e.g., a loop device.
271			 * Therefore, we must not allow the current work item to
272			 * be recycled until we are really done, otherwise we
273			 * break the above assumption and can deadlock.
274			 */
275			free_self = true;
276		} else {
277			/*
278			 * We don't want to call the ordered free functions with
279			 * the lock held.
280			 */
281			work->ordered_func(work, true);
282			/* NB: work must not be dereferenced past this point. */
283			trace_btrfs_all_work_done(wq->fs_info, work);
284		}
285	}
286	spin_unlock_irqrestore(lock, flags);
287
288	if (free_self) {
289		self->ordered_func(self, true);
290		/* NB: self must not be dereferenced past this point. */
291		trace_btrfs_all_work_done(wq->fs_info, self);
292	}
293}
294
295static void btrfs_work_helper(struct work_struct *normal_work)
296{
297	struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
298					       normal_work);
299	struct btrfs_workqueue *wq = work->wq;
300	int need_order = 0;
301
302	/*
303	 * We should not touch things inside work in the following cases:
304	 * 1) after work->func() if it has no ordered_func(..., true) to free
305	 *    Since the struct is freed in work->func().
306	 * 2) after setting WORK_DONE_BIT
307	 *    The work may be freed in other threads almost instantly.
308	 * So we save the needed things here.
309	 */
310	if (work->ordered_func)
311		need_order = 1;
 
 
 
312
313	trace_btrfs_work_sched(work);
314	thresh_exec_hook(wq);
315	work->func(work);
316	if (need_order) {
317		/*
318		 * Ensures all memory accesses done in the work function are
319		 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
320		 * which is going to executed the ordered work sees them.
321		 * Pairs with the smp_rmb in run_ordered_work.
322		 */
323		smp_mb__before_atomic();
324		set_bit(WORK_DONE_BIT, &work->flags);
325		run_ordered_work(wq, work);
326	} else {
327		/* NB: work must not be dereferenced past this point. */
328		trace_btrfs_all_work_done(wq->fs_info, work);
329	}
 
 
330}
331
332void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
333		     btrfs_ordered_func_t ordered_func)
 
 
334{
335	work->func = func;
336	work->ordered_func = ordered_func;
337	INIT_WORK(&work->normal_work, btrfs_work_helper);
 
338	INIT_LIST_HEAD(&work->ordered_list);
339	work->flags = 0;
340}
341
342void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
 
343{
344	unsigned long flags;
345
346	work->wq = wq;
347	thresh_queue_hook(wq);
348	if (work->ordered_func) {
349		spin_lock_irqsave(&wq->list_lock, flags);
350		list_add_tail(&work->ordered_list, &wq->ordered_list);
351		spin_unlock_irqrestore(&wq->list_lock, flags);
352	}
353	trace_btrfs_work_queued(work);
354	queue_work(wq->normal_wq, &work->normal_work);
355}
356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
358{
359	if (!wq)
360		return;
361	destroy_workqueue(wq->normal_wq);
362	trace_btrfs_workqueue_destroy(wq);
 
363	kfree(wq);
364}
365
366void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
367{
368	if (wq)
369		wq->limit_active = limit_active;
 
 
 
370}
371
372void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
373{
374	flush_workqueue(wq->normal_wq);
375}
v4.10.11
 
  1/*
  2 * Copyright (C) 2007 Oracle.  All rights reserved.
  3 * Copyright (C) 2014 Fujitsu.  All rights reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public
  7 * License v2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 12 * General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public
 15 * License along with this program; if not, write to the
 16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 17 * Boston, MA 021110-1307, USA.
 18 */
 19
 20#include <linux/kthread.h>
 21#include <linux/slab.h>
 22#include <linux/list.h>
 23#include <linux/spinlock.h>
 24#include <linux/freezer.h>
 
 25#include "async-thread.h"
 26#include "ctree.h"
 27
 28#define WORK_DONE_BIT 0
 29#define WORK_ORDER_DONE_BIT 1
 30#define WORK_HIGH_PRIO_BIT 2
 
 31
 32#define NO_THRESHOLD (-1)
 33#define DFT_THRESHOLD (32)
 34
 35struct __btrfs_workqueue {
 36	struct workqueue_struct *normal_wq;
 37
 38	/* File system this workqueue services */
 39	struct btrfs_fs_info *fs_info;
 40
 41	/* List head pointing to ordered work list */
 42	struct list_head ordered_list;
 43
 44	/* Spinlock for ordered_list */
 45	spinlock_t list_lock;
 46
 47	/* Thresholding related variants */
 48	atomic_t pending;
 49
 50	/* Up limit of concurrency workers */
 51	int limit_active;
 52
 53	/* Current number of concurrency workers */
 54	int current_active;
 55
 56	/* Threshold to change current_active */
 57	int thresh;
 58	unsigned int count;
 59	spinlock_t thres_lock;
 60};
 61
 62struct btrfs_workqueue {
 63	struct __btrfs_workqueue *normal;
 64	struct __btrfs_workqueue *high;
 65};
 66
 67static void normal_work_helper(struct btrfs_work *work);
 68
 69#define BTRFS_WORK_HELPER(name)					\
 70void btrfs_##name(struct work_struct *arg)				\
 71{									\
 72	struct btrfs_work *work = container_of(arg, struct btrfs_work,	\
 73					       normal_work);		\
 74	normal_work_helper(work);					\
 75}
 76
 77struct btrfs_fs_info *
 78btrfs_workqueue_owner(struct __btrfs_workqueue *wq)
 79{
 80	return wq->fs_info;
 81}
 82
 83struct btrfs_fs_info *
 84btrfs_work_owner(struct btrfs_work *work)
 85{
 86	return work->wq->fs_info;
 87}
 88
 89bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
 90{
 91	/*
 92	 * We could compare wq->normal->pending with num_online_cpus()
 93	 * to support "thresh == NO_THRESHOLD" case, but it requires
 94	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
 95	 * postpone it until someone needs the support of that case.
 96	 */
 97	if (wq->normal->thresh == NO_THRESHOLD)
 98		return false;
 99
100	return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
 
 
 
 
 
 
 
 
 
 
101}
102
103BTRFS_WORK_HELPER(worker_helper);
104BTRFS_WORK_HELPER(delalloc_helper);
105BTRFS_WORK_HELPER(flush_delalloc_helper);
106BTRFS_WORK_HELPER(cache_helper);
107BTRFS_WORK_HELPER(submit_helper);
108BTRFS_WORK_HELPER(fixup_helper);
109BTRFS_WORK_HELPER(endio_helper);
110BTRFS_WORK_HELPER(endio_meta_helper);
111BTRFS_WORK_HELPER(endio_meta_write_helper);
112BTRFS_WORK_HELPER(endio_raid56_helper);
113BTRFS_WORK_HELPER(endio_repair_helper);
114BTRFS_WORK_HELPER(rmw_helper);
115BTRFS_WORK_HELPER(endio_write_helper);
116BTRFS_WORK_HELPER(freespace_write_helper);
117BTRFS_WORK_HELPER(delayed_meta_helper);
118BTRFS_WORK_HELPER(readahead_helper);
119BTRFS_WORK_HELPER(qgroup_rescan_helper);
120BTRFS_WORK_HELPER(extent_refs_helper);
121BTRFS_WORK_HELPER(scrub_helper);
122BTRFS_WORK_HELPER(scrubwrc_helper);
123BTRFS_WORK_HELPER(scrubnc_helper);
124BTRFS_WORK_HELPER(scrubparity_helper);
125
126static struct __btrfs_workqueue *
127__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
128			unsigned int flags, int limit_active, int thresh)
129{
130	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
131
132	if (!ret)
133		return NULL;
134
135	ret->fs_info = fs_info;
 
136	ret->limit_active = limit_active;
137	atomic_set(&ret->pending, 0);
138	if (thresh == 0)
139		thresh = DFT_THRESHOLD;
140	/* For low threshold, disabling threshold is a better choice */
141	if (thresh < DFT_THRESHOLD) {
142		ret->current_active = limit_active;
143		ret->thresh = NO_THRESHOLD;
144	} else {
145		/*
146		 * For threshold-able wq, let its concurrency grow on demand.
147		 * Use minimal max_active at alloc time to reduce resource
148		 * usage.
149		 */
150		ret->current_active = 1;
151		ret->thresh = thresh;
152	}
153
154	if (flags & WQ_HIGHPRI)
155		ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
156						 ret->current_active, "btrfs",
157						 name);
158	else
159		ret->normal_wq = alloc_workqueue("%s-%s", flags,
160						 ret->current_active, "btrfs",
161						 name);
162	if (!ret->normal_wq) {
163		kfree(ret);
164		return NULL;
165	}
166
167	INIT_LIST_HEAD(&ret->ordered_list);
168	spin_lock_init(&ret->list_lock);
169	spin_lock_init(&ret->thres_lock);
170	trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
171	return ret;
172}
173
174static inline void
175__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
176
177struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
178					      const char *name,
179					      unsigned int flags,
180					      int limit_active,
181					      int thresh)
182{
183	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
184
 
185	if (!ret)
186		return NULL;
187
188	ret->normal = __btrfs_alloc_workqueue(fs_info, name,
189					      flags & ~WQ_HIGHPRI,
190					      limit_active, thresh);
191	if (!ret->normal) {
 
 
 
 
 
192		kfree(ret);
193		return NULL;
194	}
195
196	if (flags & WQ_HIGHPRI) {
197		ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
198						    limit_active, thresh);
199		if (!ret->high) {
200			__btrfs_destroy_workqueue(ret->normal);
201			kfree(ret);
202			return NULL;
203		}
204	}
205	return ret;
206}
207
208/*
209 * Hook for threshold which will be called in btrfs_queue_work.
210 * This hook WILL be called in IRQ handler context,
211 * so workqueue_set_max_active MUST NOT be called in this hook
212 */
213static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
214{
215	if (wq->thresh == NO_THRESHOLD)
216		return;
217	atomic_inc(&wq->pending);
218}
219
220/*
221 * Hook for threshold which will be called before executing the work,
222 * This hook is called in kthread content.
223 * So workqueue_set_max_active is called here.
224 */
225static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
226{
227	int new_current_active;
228	long pending;
229	int need_change = 0;
230
231	if (wq->thresh == NO_THRESHOLD)
232		return;
233
234	atomic_dec(&wq->pending);
235	spin_lock(&wq->thres_lock);
236	/*
237	 * Use wq->count to limit the calling frequency of
238	 * workqueue_set_max_active.
239	 */
240	wq->count++;
241	wq->count %= (wq->thresh / 4);
242	if (!wq->count)
243		goto  out;
244	new_current_active = wq->current_active;
245
246	/*
247	 * pending may be changed later, but it's OK since we really
248	 * don't need it so accurate to calculate new_max_active.
249	 */
250	pending = atomic_read(&wq->pending);
251	if (pending > wq->thresh)
252		new_current_active++;
253	if (pending < wq->thresh / 2)
254		new_current_active--;
255	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
256	if (new_current_active != wq->current_active)  {
257		need_change = 1;
258		wq->current_active = new_current_active;
259	}
260out:
261	spin_unlock(&wq->thres_lock);
262
263	if (need_change) {
264		workqueue_set_max_active(wq->normal_wq, wq->current_active);
265	}
266}
267
268static void run_ordered_work(struct __btrfs_workqueue *wq)
 
269{
270	struct list_head *list = &wq->ordered_list;
271	struct btrfs_work *work;
272	spinlock_t *lock = &wq->list_lock;
273	unsigned long flags;
 
274
275	while (1) {
276		void *wtag;
277
278		spin_lock_irqsave(lock, flags);
279		if (list_empty(list))
280			break;
281		work = list_entry(list->next, struct btrfs_work,
282				  ordered_list);
283		if (!test_bit(WORK_DONE_BIT, &work->flags))
284			break;
 
 
 
 
 
 
 
285
286		/*
287		 * we are going to call the ordered done function, but
288		 * we leave the work item on the list as a barrier so
289		 * that later work items that are done don't have their
290		 * functions called before this one returns
291		 */
292		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
293			break;
294		trace_btrfs_ordered_sched(work);
295		spin_unlock_irqrestore(lock, flags);
296		work->ordered_func(work);
297
298		/* now take the lock again and drop our item from the list */
299		spin_lock_irqsave(lock, flags);
300		list_del(&work->ordered_list);
301		spin_unlock_irqrestore(lock, flags);
302
303		/*
304		 * We don't want to call the ordered free functions with the
305		 * lock held though. Save the work as tag for the trace event,
306		 * because the callback could free the structure.
307		 */
308		wtag = work;
309		work->ordered_free(work);
310		trace_btrfs_all_work_done(wq->fs_info, wtag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311	}
312	spin_unlock_irqrestore(lock, flags);
 
 
 
 
 
 
313}
314
315static void normal_work_helper(struct btrfs_work *work)
316{
317	struct __btrfs_workqueue *wq;
318	void *wtag;
 
319	int need_order = 0;
320
321	/*
322	 * We should not touch things inside work in the following cases:
323	 * 1) after work->func() if it has no ordered_free
324	 *    Since the struct is freed in work->func().
325	 * 2) after setting WORK_DONE_BIT
326	 *    The work may be freed in other threads almost instantly.
327	 * So we save the needed things here.
328	 */
329	if (work->ordered_func)
330		need_order = 1;
331	wq = work->wq;
332	/* Safe for tracepoints in case work gets freed by the callback */
333	wtag = work;
334
335	trace_btrfs_work_sched(work);
336	thresh_exec_hook(wq);
337	work->func(work);
338	if (need_order) {
 
 
 
 
 
 
 
339		set_bit(WORK_DONE_BIT, &work->flags);
340		run_ordered_work(wq);
 
 
 
341	}
342	if (!need_order)
343		trace_btrfs_all_work_done(wq->fs_info, wtag);
344}
345
346void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
347		     btrfs_func_t func,
348		     btrfs_func_t ordered_func,
349		     btrfs_func_t ordered_free)
350{
351	work->func = func;
352	work->ordered_func = ordered_func;
353	work->ordered_free = ordered_free;
354	INIT_WORK(&work->normal_work, uniq_func);
355	INIT_LIST_HEAD(&work->ordered_list);
356	work->flags = 0;
357}
358
359static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
360				      struct btrfs_work *work)
361{
362	unsigned long flags;
363
364	work->wq = wq;
365	thresh_queue_hook(wq);
366	if (work->ordered_func) {
367		spin_lock_irqsave(&wq->list_lock, flags);
368		list_add_tail(&work->ordered_list, &wq->ordered_list);
369		spin_unlock_irqrestore(&wq->list_lock, flags);
370	}
371	trace_btrfs_work_queued(work);
372	queue_work(wq->normal_wq, &work->normal_work);
373}
374
375void btrfs_queue_work(struct btrfs_workqueue *wq,
376		      struct btrfs_work *work)
377{
378	struct __btrfs_workqueue *dest_wq;
379
380	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
381		dest_wq = wq->high;
382	else
383		dest_wq = wq->normal;
384	__btrfs_queue_work(dest_wq, work);
385}
386
387static inline void
388__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
389{
390	destroy_workqueue(wq->normal_wq);
391	trace_btrfs_workqueue_destroy(wq);
392	kfree(wq);
393}
394
395void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
396{
397	if (!wq)
398		return;
399	if (wq->high)
400		__btrfs_destroy_workqueue(wq->high);
401	__btrfs_destroy_workqueue(wq->normal);
402	kfree(wq);
403}
404
405void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
406{
407	if (!wq)
408		return;
409	wq->normal->limit_active = limit_active;
410	if (wq->high)
411		wq->high->limit_active = limit_active;
412}
413
414void btrfs_set_work_high_priority(struct btrfs_work *work)
415{
416	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
417}