Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) 2007 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/kthread.h>
 20#include <linux/slab.h>
 21#include <linux/list.h>
 22#include <linux/spinlock.h>
 23#include <linux/freezer.h>
 
 24#include "async-thread.h"
 25
 26#define WORK_QUEUED_BIT 0
 27#define WORK_DONE_BIT 1
 28#define WORK_ORDER_DONE_BIT 2
 29#define WORK_HIGH_PRIO_BIT 3
 30
 31/*
 32 * container for the kthread task pointer and the list of pending work
 33 * One of these is allocated per thread.
 34 */
 35struct btrfs_worker_thread {
 36	/* pool we belong to */
 37	struct btrfs_workers *workers;
 38
 39	/* list of struct btrfs_work that are waiting for service */
 40	struct list_head pending;
 41	struct list_head prio_pending;
 42
 43	/* list of worker threads from struct btrfs_workers */
 44	struct list_head worker_list;
 45
 46	/* kthread */
 47	struct task_struct *task;
 48
 49	/* number of things on the pending list */
 50	atomic_t num_pending;
 51
 52	/* reference counter for this struct */
 53	atomic_t refs;
 54
 55	unsigned long sequence;
 
 56
 57	/* protects the pending list. */
 58	spinlock_t lock;
 59
 60	/* set to non-zero when this thread is already awake and kicking */
 61	int working;
 62
 63	/* are we currently idle */
 64	int idle;
 
 
 65};
 66
 67/*
 68 * btrfs_start_workers uses kthread_run, which can block waiting for memory
 69 * for a very long time.  It will actually throttle on page writeback,
 70 * and so it may not make progress until after our btrfs worker threads
 71 * process all of the pending work structs in their queue
 72 *
 73 * This means we can't use btrfs_start_workers from inside a btrfs worker
 74 * thread that is used as part of cleaning dirty memory, which pretty much
 75 * involves all of the worker threads.
 76 *
 77 * Instead we have a helper queue who never has more than one thread
 78 * where we scheduler thread start operations.  This worker_start struct
 79 * is used to contain the work and hold a pointer to the queue that needs
 80 * another worker.
 81 */
 82struct worker_start {
 83	struct btrfs_work work;
 84	struct btrfs_workers *queue;
 85};
 86
 87static void start_new_worker_func(struct btrfs_work *work)
 88{
 89	struct worker_start *start;
 90	start = container_of(work, struct worker_start, work);
 91	btrfs_start_workers(start->queue, 1);
 92	kfree(start);
 93}
 94
 95static int start_new_worker(struct btrfs_workers *queue)
 96{
 97	struct worker_start *start;
 98	int ret;
 99
100	start = kzalloc(sizeof(*start), GFP_NOFS);
101	if (!start)
102		return -ENOMEM;
103
104	start->work.func = start_new_worker_func;
105	start->queue = queue;
106	ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
107	if (ret)
108		kfree(start);
109	return ret;
110}
111
112/*
113 * helper function to move a thread onto the idle list after it
114 * has finished some requests.
115 */
116static void check_idle_worker(struct btrfs_worker_thread *worker)
117{
118	if (!worker->idle && atomic_read(&worker->num_pending) <
119	    worker->workers->idle_thresh / 2) {
120		unsigned long flags;
121		spin_lock_irqsave(&worker->workers->lock, flags);
122		worker->idle = 1;
123
124		/* the list may be empty if the worker is just starting */
125		if (!list_empty(&worker->worker_list)) {
126			list_move(&worker->worker_list,
127				 &worker->workers->idle_list);
128		}
129		spin_unlock_irqrestore(&worker->workers->lock, flags);
130	}
131}
132
133/*
134 * helper function to move a thread off the idle list after new
135 * pending work is added.
136 */
137static void check_busy_worker(struct btrfs_worker_thread *worker)
138{
139	if (worker->idle && atomic_read(&worker->num_pending) >=
140	    worker->workers->idle_thresh) {
141		unsigned long flags;
142		spin_lock_irqsave(&worker->workers->lock, flags);
143		worker->idle = 0;
144
145		if (!list_empty(&worker->worker_list)) {
146			list_move_tail(&worker->worker_list,
147				      &worker->workers->worker_list);
148		}
149		spin_unlock_irqrestore(&worker->workers->lock, flags);
150	}
151}
152
153static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
154{
155	struct btrfs_workers *workers = worker->workers;
156	unsigned long flags;
157
158	rmb();
159	if (!workers->atomic_start_pending)
160		return;
161
162	spin_lock_irqsave(&workers->lock, flags);
163	if (!workers->atomic_start_pending)
164		goto out;
165
166	workers->atomic_start_pending = 0;
167	if (workers->num_workers + workers->num_workers_starting >=
168	    workers->max_workers)
169		goto out;
170
171	workers->num_workers_starting += 1;
172	spin_unlock_irqrestore(&workers->lock, flags);
173	start_new_worker(workers);
174	return;
175
176out:
177	spin_unlock_irqrestore(&workers->lock, flags);
178}
179
180static noinline int run_ordered_completions(struct btrfs_workers *workers,
181					    struct btrfs_work *work)
 
182{
183	if (!workers->ordered)
184		return 0;
185
186	set_bit(WORK_DONE_BIT, &work->flags);
187
188	spin_lock(&workers->order_lock);
 
189
190	while (1) {
191		if (!list_empty(&workers->prio_order_list)) {
192			work = list_entry(workers->prio_order_list.next,
193					  struct btrfs_work, order_list);
194		} else if (!list_empty(&workers->order_list)) {
195			work = list_entry(workers->order_list.next,
196					  struct btrfs_work, order_list);
197		} else {
198			break;
199		}
200		if (!test_bit(WORK_DONE_BIT, &work->flags))
201			break;
202
203		/* we are going to call the ordered done function, but
204		 * we leave the work item on the list as a barrier so
205		 * that later work items that are done don't have their
206		 * functions called before this one returns
 
 
 
 
 
 
 
 
207		 */
208		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
209			break;
210
211		spin_unlock(&workers->order_lock);
212
213		work->ordered_func(work);
214
215		/* now take the lock again and call the freeing code */
216		spin_lock(&workers->order_lock);
217		list_del(&work->order_list);
218		work->ordered_free(work);
219	}
220
221	spin_unlock(&workers->order_lock);
222	return 0;
223}
224
225static void put_worker(struct btrfs_worker_thread *worker)
226{
227	if (atomic_dec_and_test(&worker->refs))
228		kfree(worker);
229}
230
231static int try_worker_shutdown(struct btrfs_worker_thread *worker)
232{
233	int freeit = 0;
234
235	spin_lock_irq(&worker->lock);
236	spin_lock(&worker->workers->lock);
237	if (worker->workers->num_workers > 1 &&
238	    worker->idle &&
239	    !worker->working &&
240	    !list_empty(&worker->worker_list) &&
241	    list_empty(&worker->prio_pending) &&
242	    list_empty(&worker->pending) &&
243	    atomic_read(&worker->num_pending) == 0) {
244		freeit = 1;
245		list_del_init(&worker->worker_list);
246		worker->workers->num_workers--;
247	}
248	spin_unlock(&worker->workers->lock);
249	spin_unlock_irq(&worker->lock);
250
251	if (freeit)
252		put_worker(worker);
253	return freeit;
254}
255
256static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
257					struct list_head *prio_head,
258					struct list_head *head)
259{
260	struct btrfs_work *work = NULL;
261	struct list_head *cur = NULL;
262
263	if(!list_empty(prio_head))
264		cur = prio_head->next;
265
266	smp_mb();
267	if (!list_empty(&worker->prio_pending))
268		goto refill;
269
270	if (!list_empty(head))
271		cur = head->next;
272
273	if (cur)
274		goto out;
275
276refill:
277	spin_lock_irq(&worker->lock);
278	list_splice_tail_init(&worker->prio_pending, prio_head);
279	list_splice_tail_init(&worker->pending, head);
280
281	if (!list_empty(prio_head))
282		cur = prio_head->next;
283	else if (!list_empty(head))
284		cur = head->next;
285	spin_unlock_irq(&worker->lock);
286
287	if (!cur)
288		goto out_fail;
289
290out:
291	work = list_entry(cur, struct btrfs_work, list);
292
293out_fail:
294	return work;
295}
296
297/*
298 * main loop for servicing work items
299 */
300static int worker_loop(void *arg)
301{
302	struct btrfs_worker_thread *worker = arg;
303	struct list_head head;
304	struct list_head prio_head;
305	struct btrfs_work *work;
306
307	INIT_LIST_HEAD(&head);
308	INIT_LIST_HEAD(&prio_head);
309
310	do {
311again:
312		while (1) {
313
314
315			work = get_next_work(worker, &prio_head, &head);
316			if (!work)
317				break;
318
319			list_del(&work->list);
320			clear_bit(WORK_QUEUED_BIT, &work->flags);
321
322			work->worker = worker;
323
324			work->func(work);
325
326			atomic_dec(&worker->num_pending);
327			/*
328			 * unless this is an ordered work queue,
329			 * 'work' was probably freed by func above.
330			 */
331			run_ordered_completions(worker->workers, work);
332
333			check_pending_worker_creates(worker);
334
335		}
336
337		spin_lock_irq(&worker->lock);
338		check_idle_worker(worker);
339
340		if (freezing(current)) {
341			worker->working = 0;
342			spin_unlock_irq(&worker->lock);
343			refrigerator();
344		} else {
345			spin_unlock_irq(&worker->lock);
346			if (!kthread_should_stop()) {
347				cpu_relax();
348				/*
349				 * we've dropped the lock, did someone else
350				 * jump_in?
351				 */
352				smp_mb();
353				if (!list_empty(&worker->pending) ||
354				    !list_empty(&worker->prio_pending))
355					continue;
356
357				/*
358				 * this short schedule allows more work to
359				 * come in without the queue functions
360				 * needing to go through wake_up_process()
361				 *
362				 * worker->working is still 1, so nobody
363				 * is going to try and wake us up
364				 */
365				schedule_timeout(1);
366				smp_mb();
367				if (!list_empty(&worker->pending) ||
368				    !list_empty(&worker->prio_pending))
369					continue;
370
371				if (kthread_should_stop())
372					break;
373
374				/* still no more work?, sleep for real */
375				spin_lock_irq(&worker->lock);
376				set_current_state(TASK_INTERRUPTIBLE);
377				if (!list_empty(&worker->pending) ||
378				    !list_empty(&worker->prio_pending)) {
379					spin_unlock_irq(&worker->lock);
380					set_current_state(TASK_RUNNING);
381					goto again;
382				}
383
384				/*
385				 * this makes sure we get a wakeup when someone
386				 * adds something new to the queue
387				 */
388				worker->working = 0;
389				spin_unlock_irq(&worker->lock);
390
391				if (!kthread_should_stop()) {
392					schedule_timeout(HZ * 120);
393					if (!worker->working &&
394					    try_worker_shutdown(worker)) {
395						return 0;
396					}
397				}
398			}
399			__set_current_state(TASK_RUNNING);
400		}
401	} while (!kthread_should_stop());
402	return 0;
403}
404
405/*
406 * this will wait for all the worker threads to shutdown
407 */
408int btrfs_stop_workers(struct btrfs_workers *workers)
409{
410	struct list_head *cur;
411	struct btrfs_worker_thread *worker;
412	int can_stop;
413
414	spin_lock_irq(&workers->lock);
415	list_splice_init(&workers->idle_list, &workers->worker_list);
416	while (!list_empty(&workers->worker_list)) {
417		cur = workers->worker_list.next;
418		worker = list_entry(cur, struct btrfs_worker_thread,
419				    worker_list);
420
421		atomic_inc(&worker->refs);
422		workers->num_workers -= 1;
423		if (!list_empty(&worker->worker_list)) {
424			list_del_init(&worker->worker_list);
425			put_worker(worker);
426			can_stop = 1;
427		} else
428			can_stop = 0;
429		spin_unlock_irq(&workers->lock);
430		if (can_stop)
431			kthread_stop(worker->task);
432		spin_lock_irq(&workers->lock);
433		put_worker(worker);
434	}
435	spin_unlock_irq(&workers->lock);
436	return 0;
437}
438
439/*
440 * simple init on struct btrfs_workers
441 */
442void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
443			struct btrfs_workers *async_helper)
444{
445	workers->num_workers = 0;
446	workers->num_workers_starting = 0;
447	INIT_LIST_HEAD(&workers->worker_list);
448	INIT_LIST_HEAD(&workers->idle_list);
449	INIT_LIST_HEAD(&workers->order_list);
450	INIT_LIST_HEAD(&workers->prio_order_list);
451	spin_lock_init(&workers->lock);
452	spin_lock_init(&workers->order_lock);
453	workers->max_workers = max;
454	workers->idle_thresh = 32;
455	workers->name = name;
456	workers->ordered = 0;
457	workers->atomic_start_pending = 0;
458	workers->atomic_worker_start = async_helper;
459}
460
461/*
462 * starts new worker threads.  This does not enforce the max worker
463 * count in case you need to temporarily go past it.
 
464 */
465static int __btrfs_start_workers(struct btrfs_workers *workers,
466				 int num_workers)
467{
468	struct btrfs_worker_thread *worker;
469	int ret = 0;
470	int i;
471
472	for (i = 0; i < num_workers; i++) {
473		worker = kzalloc(sizeof(*worker), GFP_NOFS);
474		if (!worker) {
475			ret = -ENOMEM;
476			goto fail;
477		}
478
479		INIT_LIST_HEAD(&worker->pending);
480		INIT_LIST_HEAD(&worker->prio_pending);
481		INIT_LIST_HEAD(&worker->worker_list);
482		spin_lock_init(&worker->lock);
483
484		atomic_set(&worker->num_pending, 0);
485		atomic_set(&worker->refs, 1);
486		worker->workers = workers;
487		worker->task = kthread_run(worker_loop, worker,
488					   "btrfs-%s-%d", workers->name,
489					   workers->num_workers + i);
490		if (IS_ERR(worker->task)) {
491			ret = PTR_ERR(worker->task);
492			kfree(worker);
493			goto fail;
494		}
495		spin_lock_irq(&workers->lock);
496		list_add_tail(&worker->worker_list, &workers->idle_list);
497		worker->idle = 1;
498		workers->num_workers++;
499		workers->num_workers_starting--;
500		WARN_ON(workers->num_workers_starting < 0);
501		spin_unlock_irq(&workers->lock);
502	}
503	return 0;
504fail:
505	btrfs_stop_workers(workers);
506	return ret;
507}
508
509int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
510{
511	spin_lock_irq(&workers->lock);
512	workers->num_workers_starting += num_workers;
513	spin_unlock_irq(&workers->lock);
514	return __btrfs_start_workers(workers, num_workers);
515}
516
517/*
518 * run through the list and find a worker thread that doesn't have a lot
519 * to do right now.  This can return null if we aren't yet at the thread
520 * count limit and all of the threads are busy.
521 */
522static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
523{
524	struct btrfs_worker_thread *worker;
525	struct list_head *next;
526	int enforce_min;
527
528	enforce_min = (workers->num_workers + workers->num_workers_starting) <
529		workers->max_workers;
530
 
 
531	/*
532	 * if we find an idle thread, don't move it to the end of the
533	 * idle list.  This improves the chance that the next submission
534	 * will reuse the same thread, and maybe catch it while it is still
535	 * working
536	 */
537	if (!list_empty(&workers->idle_list)) {
538		next = workers->idle_list.next;
539		worker = list_entry(next, struct btrfs_worker_thread,
540				    worker_list);
541		return worker;
542	}
543	if (enforce_min || list_empty(&workers->worker_list))
544		return NULL;
545
546	/*
547	 * if we pick a busy task, move the task to the end of the list.
548	 * hopefully this will keep things somewhat evenly balanced.
549	 * Do the move in batches based on the sequence number.  This groups
550	 * requests submitted at roughly the same time onto the same worker.
551	 */
552	next = workers->worker_list.next;
553	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
554	worker->sequence++;
555
556	if (worker->sequence % workers->idle_thresh == 0)
557		list_move_tail(next, &workers->worker_list);
558	return worker;
 
 
 
 
 
 
 
 
 
559}
560
561/*
562 * selects a worker thread to take the next job.  This will either find
563 * an idle worker, start a new worker up to the max count, or just return
564 * one of the existing busy workers.
565 */
566static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
567{
568	struct btrfs_worker_thread *worker;
 
 
569	unsigned long flags;
570	struct list_head *fallback;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571
572again:
573	spin_lock_irqsave(&workers->lock, flags);
574	worker = next_worker(workers);
575
576	if (!worker) {
577		if (workers->num_workers + workers->num_workers_starting >=
578		    workers->max_workers) {
579			goto fallback;
580		} else if (workers->atomic_worker_start) {
581			workers->atomic_start_pending = 1;
582			goto fallback;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583		} else {
584			workers->num_workers_starting++;
585			spin_unlock_irqrestore(&workers->lock, flags);
586			/* we're below the limit, start another worker */
587			__btrfs_start_workers(workers, 1);
588			goto again;
 
 
589		}
590	}
591	goto found;
592
593fallback:
594	fallback = NULL;
595	/*
596	 * we have failed to find any workers, just
597	 * return the first one we can find.
598	 */
599	if (!list_empty(&workers->worker_list))
600		fallback = workers->worker_list.next;
601	if (!list_empty(&workers->idle_list))
602		fallback = workers->idle_list.next;
603	BUG_ON(!fallback);
604	worker = list_entry(fallback,
605		  struct btrfs_worker_thread, worker_list);
606found:
607	/*
608	 * this makes sure the worker doesn't exit before it is placed
609	 * onto a busy/idle list
610	 */
611	atomic_inc(&worker->num_pending);
612	spin_unlock_irqrestore(&workers->lock, flags);
613	return worker;
614}
615
616/*
617 * btrfs_requeue_work just puts the work item back on the tail of the list
618 * it was taken from.  It is intended for use with long running work functions
619 * that make some progress and want to give the cpu up for others.
620 */
621int btrfs_requeue_work(struct btrfs_work *work)
622{
623	struct btrfs_worker_thread *worker = work->worker;
624	unsigned long flags;
625	int wake = 0;
626
627	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
628		goto out;
629
630	spin_lock_irqsave(&worker->lock, flags);
631	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
632		list_add_tail(&work->list, &worker->prio_pending);
633	else
634		list_add_tail(&work->list, &worker->pending);
635	atomic_inc(&worker->num_pending);
636
637	/* by definition we're busy, take ourselves off the idle
638	 * list
639	 */
640	if (worker->idle) {
641		spin_lock(&worker->workers->lock);
642		worker->idle = 0;
643		list_move_tail(&worker->worker_list,
644			      &worker->workers->worker_list);
645		spin_unlock(&worker->workers->lock);
646	}
647	if (!worker->working) {
648		wake = 1;
649		worker->working = 1;
650	}
651
652	if (wake)
653		wake_up_process(worker->task);
654	spin_unlock_irqrestore(&worker->lock, flags);
655out:
656
657	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
658}
659
660void btrfs_set_work_high_prio(struct btrfs_work *work)
 
661{
662	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
 
 
 
 
663}
664
665/*
666 * places a struct btrfs_work into the pending queue of one of the kthreads
667 */
668int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
669{
670	struct btrfs_worker_thread *worker;
671	unsigned long flags;
672	int wake = 0;
673
674	/* don't requeue something already on a list */
675	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
676		goto out;
677
678	worker = find_worker(workers);
679	if (workers->ordered) {
680		/*
681		 * you're not allowed to do ordered queues from an
682		 * interrupt handler
683		 */
684		spin_lock(&workers->order_lock);
685		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
686			list_add_tail(&work->order_list,
687				      &workers->prio_order_list);
688		} else {
689			list_add_tail(&work->order_list, &workers->order_list);
690		}
691		spin_unlock(&workers->order_lock);
692	} else {
693		INIT_LIST_HEAD(&work->order_list);
694	}
 
 
 
695
696	spin_lock_irqsave(&worker->lock, flags);
697
698	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
699		list_add_tail(&work->list, &worker->prio_pending);
700	else
701		list_add_tail(&work->list, &worker->pending);
702	check_busy_worker(worker);
 
703
704	/*
705	 * avoid calling into wake_up_process if this thread has already
706	 * been kicked
707	 */
708	if (!worker->working)
709		wake = 1;
710	worker->working = 1;
711
712	if (wake)
713		wake_up_process(worker->task);
714	spin_unlock_irqrestore(&worker->lock, flags);
715
716out:
717	return 0;
 
718}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2007 Oracle.  All rights reserved.
  4 * Copyright (C) 2014 Fujitsu.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/kthread.h>
  8#include <linux/slab.h>
  9#include <linux/list.h>
 10#include <linux/spinlock.h>
 11#include <linux/freezer.h>
 12#include <trace/events/btrfs.h>
 13#include "async-thread.h"
 14
 15enum {
 16	WORK_DONE_BIT,
 17	WORK_ORDER_DONE_BIT,
 18};
 
 
 
 
 
 
 
 
 19
 20#define NO_THRESHOLD (-1)
 21#define DFT_THRESHOLD (32)
 
 22
 23struct btrfs_workqueue {
 24	struct workqueue_struct *normal_wq;
 25
 26	/* File system this workqueue services */
 27	struct btrfs_fs_info *fs_info;
 28
 29	/* List head pointing to ordered work list */
 30	struct list_head ordered_list;
 31
 32	/* Spinlock for ordered_list */
 33	spinlock_t list_lock;
 34
 35	/* Thresholding related variants */
 36	atomic_t pending;
 37
 38	/* Up limit of concurrency workers */
 39	int limit_active;
 40
 41	/* Current number of concurrency workers */
 42	int current_active;
 43
 44	/* Threshold to change current_active */
 45	int thresh;
 46	unsigned int count;
 47	spinlock_t thres_lock;
 48};
 49
 50struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51{
 52	return wq->fs_info;
 
 
 
 53}
 54
 55struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
 56{
 57	return work->wq->fs_info;
 
 
 
 
 
 
 
 
 
 
 
 
 58}
 59
 60bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
 
 
 
 
 61{
 62	/*
 63	 * We could compare wq->pending with num_online_cpus()
 64	 * to support "thresh == NO_THRESHOLD" case, but it requires
 65	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
 66	 * postpone it until someone needs the support of that case.
 67	 */
 68	if (wq->thresh == NO_THRESHOLD)
 69		return false;
 
 
 
 
 
 
 70
 71	return atomic_read(&wq->pending) > wq->thresh * 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72}
 73
 74static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
 75				 struct btrfs_fs_info *fs_info)
 76{
 77	wq->fs_info = fs_info;
 78	atomic_set(&wq->pending, 0);
 79	INIT_LIST_HEAD(&wq->ordered_list);
 80	spin_lock_init(&wq->list_lock);
 81	spin_lock_init(&wq->thres_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82}
 83
 84struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
 85					      const char *name, unsigned int flags,
 86					      int limit_active, int thresh)
 87{
 88	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
 
 
 
 89
 90	if (!ret)
 91		return NULL;
 92
 93	btrfs_init_workqueue(ret, fs_info);
 
 
 
 
 
 
 
 
 
 
 
 94
 95	ret->limit_active = limit_active;
 96	if (thresh == 0)
 97		thresh = DFT_THRESHOLD;
 98	/* For low threshold, disabling threshold is a better choice */
 99	if (thresh < DFT_THRESHOLD) {
100		ret->current_active = limit_active;
101		ret->thresh = NO_THRESHOLD;
102	} else {
103		/*
104		 * For threshold-able wq, let its concurrency grow on demand.
105		 * Use minimal max_active at alloc time to reduce resource
106		 * usage.
107		 */
108		ret->current_active = 1;
109		ret->thresh = thresh;
 
 
 
 
 
 
 
 
 
110	}
111
112	ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
113					 name);
114	if (!ret->normal_wq) {
115		kfree(ret);
116		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119	trace_btrfs_workqueue_alloc(ret, name);
120	return ret;
 
 
 
121}
122
123struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
124				struct btrfs_fs_info *fs_info, const char *name,
125				unsigned int flags)
 
126{
127	struct btrfs_workqueue *ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
129	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
130	if (!ret)
131		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
133	btrfs_init_workqueue(ret, fs_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
135	/* Ordered workqueues don't allow @max_active adjustments. */
136	ret->limit_active = 1;
137	ret->current_active = 1;
138	ret->thresh = NO_THRESHOLD;
139
140	ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
141	if (!ret->normal_wq) {
142		kfree(ret);
143		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144	}
 
 
 
145
146	trace_btrfs_workqueue_alloc(ret, name);
147	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148}
149
150/*
151 * Hook for threshold which will be called in btrfs_queue_work.
152 * This hook WILL be called in IRQ handler context,
153 * so workqueue_set_max_active MUST NOT be called in this hook
154 */
155static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
 
156{
157	if (wq->thresh == NO_THRESHOLD)
158		return;
159	atomic_inc(&wq->pending);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160}
161
162/*
163 * Hook for threshold which will be called before executing the work,
164 * This hook is called in kthread content.
165 * So workqueue_set_max_active is called here.
166 */
167static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
168{
169	int new_current_active;
170	long pending;
171	int need_change = 0;
172
173	if (wq->thresh == NO_THRESHOLD)
174		return;
175
176	atomic_dec(&wq->pending);
177	spin_lock(&wq->thres_lock);
178	/*
179	 * Use wq->count to limit the calling frequency of
180	 * workqueue_set_max_active.
 
 
181	 */
182	wq->count++;
183	wq->count %= (wq->thresh / 4);
184	if (!wq->count)
185		goto  out;
186	new_current_active = wq->current_active;
 
 
 
187
188	/*
189	 * pending may be changed later, but it's OK since we really
190	 * don't need it so accurate to calculate new_max_active.
 
 
191	 */
192	pending = atomic_read(&wq->pending);
193	if (pending > wq->thresh)
194		new_current_active++;
195	if (pending < wq->thresh / 2)
196		new_current_active--;
197	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
198	if (new_current_active != wq->current_active)  {
199		need_change = 1;
200		wq->current_active = new_current_active;
201	}
202out:
203	spin_unlock(&wq->thres_lock);
204
205	if (need_change) {
206		workqueue_set_max_active(wq->normal_wq, wq->current_active);
207	}
208}
209
210static void run_ordered_work(struct btrfs_workqueue *wq,
211			     struct btrfs_work *self)
 
 
 
 
212{
213	struct list_head *list = &wq->ordered_list;
214	struct btrfs_work *work;
215	spinlock_t *lock = &wq->list_lock;
216	unsigned long flags;
217	bool free_self = false;
218
219	while (1) {
220		spin_lock_irqsave(lock, flags);
221		if (list_empty(list))
222			break;
223		work = list_entry(list->next, struct btrfs_work,
224				  ordered_list);
225		if (!test_bit(WORK_DONE_BIT, &work->flags))
226			break;
227		/*
228		 * Orders all subsequent loads after reading WORK_DONE_BIT,
229		 * paired with the smp_mb__before_atomic in btrfs_work_helper
230		 * this guarantees that the ordered function will see all
231		 * updates from ordinary work function.
232		 */
233		smp_rmb();
234
235		/*
236		 * we are going to call the ordered done function, but
237		 * we leave the work item on the list as a barrier so
238		 * that later work items that are done don't have their
239		 * functions called before this one returns
240		 */
241		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
242			break;
243		trace_btrfs_ordered_sched(work);
244		spin_unlock_irqrestore(lock, flags);
245		work->ordered_func(work, false);
246
247		/* now take the lock again and drop our item from the list */
248		spin_lock_irqsave(lock, flags);
249		list_del(&work->ordered_list);
250		spin_unlock_irqrestore(lock, flags);
251
252		if (work == self) {
253			/*
254			 * This is the work item that the worker is currently
255			 * executing.
256			 *
257			 * The kernel workqueue code guarantees non-reentrancy
258			 * of work items. I.e., if a work item with the same
259			 * address and work function is queued twice, the second
260			 * execution is blocked until the first one finishes. A
261			 * work item may be freed and recycled with the same
262			 * work function; the workqueue code assumes that the
263			 * original work item cannot depend on the recycled work
264			 * item in that case (see find_worker_executing_work()).
265			 *
266			 * Note that different types of Btrfs work can depend on
267			 * each other, and one type of work on one Btrfs
268			 * filesystem may even depend on the same type of work
269			 * on another Btrfs filesystem via, e.g., a loop device.
270			 * Therefore, we must not allow the current work item to
271			 * be recycled until we are really done, otherwise we
272			 * break the above assumption and can deadlock.
273			 */
274			free_self = true;
275		} else {
276			/*
277			 * We don't want to call the ordered free functions with
278			 * the lock held.
279			 */
280			work->ordered_func(work, true);
281			/* NB: work must not be dereferenced past this point. */
282			trace_btrfs_all_work_done(wq->fs_info, work);
283		}
284	}
285	spin_unlock_irqrestore(lock, flags);
286
287	if (free_self) {
288		self->ordered_func(self, true);
289		/* NB: self must not be dereferenced past this point. */
290		trace_btrfs_all_work_done(wq->fs_info, self);
291	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292}
293
294static void btrfs_work_helper(struct work_struct *normal_work)
 
 
 
 
 
295{
296	struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
297					       normal_work);
298	struct btrfs_workqueue *wq = work->wq;
299	int need_order = 0;
 
 
300
301	/*
302	 * We should not touch things inside work in the following cases:
303	 * 1) after work->func() if it has no ordered_func(..., true) to free
304	 *    Since the struct is freed in work->func().
305	 * 2) after setting WORK_DONE_BIT
306	 *    The work may be freed in other threads almost instantly.
307	 * So we save the needed things here.
 
 
308	 */
309	if (work->ordered_func)
310		need_order = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
312	trace_btrfs_work_sched(work);
313	thresh_exec_hook(wq);
314	work->func(work);
315	if (need_order) {
316		/*
317		 * Ensures all memory accesses done in the work function are
318		 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
319		 * which is going to executed the ordered work sees them.
320		 * Pairs with the smp_rmb in run_ordered_work.
321		 */
322		smp_mb__before_atomic();
323		set_bit(WORK_DONE_BIT, &work->flags);
324		run_ordered_work(wq, work);
325	} else {
326		/* NB: work must not be dereferenced past this point. */
327		trace_btrfs_all_work_done(wq->fs_info, work);
328	}
329}
330
331void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
332		     btrfs_ordered_func_t ordered_func)
333{
334	work->func = func;
335	work->ordered_func = ordered_func;
336	INIT_WORK(&work->normal_work, btrfs_work_helper);
337	INIT_LIST_HEAD(&work->ordered_list);
338	work->flags = 0;
339}
340
341void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
 
 
 
342{
 
343	unsigned long flags;
 
 
 
 
 
344
345	work->wq = wq;
346	thresh_queue_hook(wq);
347	if (work->ordered_func) {
348		spin_lock_irqsave(&wq->list_lock, flags);
349		list_add_tail(&work->ordered_list, &wq->ordered_list);
350		spin_unlock_irqrestore(&wq->list_lock, flags);
 
 
 
 
 
 
 
 
 
 
351	}
352	trace_btrfs_work_queued(work);
353	queue_work(wq->normal_wq, &work->normal_work);
354}
355
356void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
357{
358	if (!wq)
359		return;
360	destroy_workqueue(wq->normal_wq);
361	trace_btrfs_workqueue_destroy(wq);
362	kfree(wq);
363}
364
365void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
366{
367	if (wq)
368		wq->limit_active = limit_active;
369}
 
 
 
 
 
 
370
371void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
372{
373	flush_workqueue(wq->normal_wq);
374}