Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 */
6
7#include <linux/kthread.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11#include <linux/freezer.h>
12#include <trace/events/btrfs.h>
13#include "async-thread.h"
14
15enum {
16 WORK_DONE_BIT,
17 WORK_ORDER_DONE_BIT,
18};
19
20#define NO_THRESHOLD (-1)
21#define DFT_THRESHOLD (32)
22
23struct btrfs_workqueue {
24 struct workqueue_struct *normal_wq;
25
26 /* File system this workqueue services */
27 struct btrfs_fs_info *fs_info;
28
29 /* List head pointing to ordered work list */
30 struct list_head ordered_list;
31
32 /* Spinlock for ordered_list */
33 spinlock_t list_lock;
34
35 /* Thresholding related variants */
36 atomic_t pending;
37
38 /* Up limit of concurrency workers */
39 int limit_active;
40
41 /* Current number of concurrency workers */
42 int current_active;
43
44 /* Threshold to change current_active */
45 int thresh;
46 unsigned int count;
47 spinlock_t thres_lock;
48};
49
50struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
51{
52 return wq->fs_info;
53}
54
55struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
56{
57 return work->wq->fs_info;
58}
59
60bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
61{
62 /*
63 * We could compare wq->pending with num_online_cpus()
64 * to support "thresh == NO_THRESHOLD" case, but it requires
65 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
66 * postpone it until someone needs the support of that case.
67 */
68 if (wq->thresh == NO_THRESHOLD)
69 return false;
70
71 return atomic_read(&wq->pending) > wq->thresh * 2;
72}
73
74static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
75 struct btrfs_fs_info *fs_info)
76{
77 wq->fs_info = fs_info;
78 atomic_set(&wq->pending, 0);
79 INIT_LIST_HEAD(&wq->ordered_list);
80 spin_lock_init(&wq->list_lock);
81 spin_lock_init(&wq->thres_lock);
82}
83
84struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
85 const char *name, unsigned int flags,
86 int limit_active, int thresh)
87{
88 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
89
90 if (!ret)
91 return NULL;
92
93 btrfs_init_workqueue(ret, fs_info);
94
95 ret->limit_active = limit_active;
96 if (thresh == 0)
97 thresh = DFT_THRESHOLD;
98 /* For low threshold, disabling threshold is a better choice */
99 if (thresh < DFT_THRESHOLD) {
100 ret->current_active = limit_active;
101 ret->thresh = NO_THRESHOLD;
102 } else {
103 /*
104 * For threshold-able wq, let its concurrency grow on demand.
105 * Use minimal max_active at alloc time to reduce resource
106 * usage.
107 */
108 ret->current_active = 1;
109 ret->thresh = thresh;
110 }
111
112 ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
113 name);
114 if (!ret->normal_wq) {
115 kfree(ret);
116 return NULL;
117 }
118
119 trace_btrfs_workqueue_alloc(ret, name);
120 return ret;
121}
122
123struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
124 struct btrfs_fs_info *fs_info, const char *name,
125 unsigned int flags)
126{
127 struct btrfs_workqueue *ret;
128
129 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
130 if (!ret)
131 return NULL;
132
133 btrfs_init_workqueue(ret, fs_info);
134
135 /* Ordered workqueues don't allow @max_active adjustments. */
136 ret->limit_active = 1;
137 ret->current_active = 1;
138 ret->thresh = NO_THRESHOLD;
139
140 ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
141 if (!ret->normal_wq) {
142 kfree(ret);
143 return NULL;
144 }
145
146 trace_btrfs_workqueue_alloc(ret, name);
147 return ret;
148}
149
150/*
151 * Hook for threshold which will be called in btrfs_queue_work.
152 * This hook WILL be called in IRQ handler context,
153 * so workqueue_set_max_active MUST NOT be called in this hook
154 */
155static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
156{
157 if (wq->thresh == NO_THRESHOLD)
158 return;
159 atomic_inc(&wq->pending);
160}
161
162/*
163 * Hook for threshold which will be called before executing the work,
164 * This hook is called in kthread content.
165 * So workqueue_set_max_active is called here.
166 */
167static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
168{
169 int new_current_active;
170 long pending;
171 int need_change = 0;
172
173 if (wq->thresh == NO_THRESHOLD)
174 return;
175
176 atomic_dec(&wq->pending);
177 spin_lock(&wq->thres_lock);
178 /*
179 * Use wq->count to limit the calling frequency of
180 * workqueue_set_max_active.
181 */
182 wq->count++;
183 wq->count %= (wq->thresh / 4);
184 if (!wq->count)
185 goto out;
186 new_current_active = wq->current_active;
187
188 /*
189 * pending may be changed later, but it's OK since we really
190 * don't need it so accurate to calculate new_max_active.
191 */
192 pending = atomic_read(&wq->pending);
193 if (pending > wq->thresh)
194 new_current_active++;
195 if (pending < wq->thresh / 2)
196 new_current_active--;
197 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
198 if (new_current_active != wq->current_active) {
199 need_change = 1;
200 wq->current_active = new_current_active;
201 }
202out:
203 spin_unlock(&wq->thres_lock);
204
205 if (need_change) {
206 workqueue_set_max_active(wq->normal_wq, wq->current_active);
207 }
208}
209
210static void run_ordered_work(struct btrfs_workqueue *wq,
211 struct btrfs_work *self)
212{
213 struct list_head *list = &wq->ordered_list;
214 struct btrfs_work *work;
215 spinlock_t *lock = &wq->list_lock;
216 unsigned long flags;
217 bool free_self = false;
218
219 while (1) {
220 spin_lock_irqsave(lock, flags);
221 if (list_empty(list))
222 break;
223 work = list_entry(list->next, struct btrfs_work,
224 ordered_list);
225 if (!test_bit(WORK_DONE_BIT, &work->flags))
226 break;
227 /*
228 * Orders all subsequent loads after reading WORK_DONE_BIT,
229 * paired with the smp_mb__before_atomic in btrfs_work_helper
230 * this guarantees that the ordered function will see all
231 * updates from ordinary work function.
232 */
233 smp_rmb();
234
235 /*
236 * we are going to call the ordered done function, but
237 * we leave the work item on the list as a barrier so
238 * that later work items that are done don't have their
239 * functions called before this one returns
240 */
241 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
242 break;
243 trace_btrfs_ordered_sched(work);
244 spin_unlock_irqrestore(lock, flags);
245 work->ordered_func(work, false);
246
247 /* now take the lock again and drop our item from the list */
248 spin_lock_irqsave(lock, flags);
249 list_del(&work->ordered_list);
250 spin_unlock_irqrestore(lock, flags);
251
252 if (work == self) {
253 /*
254 * This is the work item that the worker is currently
255 * executing.
256 *
257 * The kernel workqueue code guarantees non-reentrancy
258 * of work items. I.e., if a work item with the same
259 * address and work function is queued twice, the second
260 * execution is blocked until the first one finishes. A
261 * work item may be freed and recycled with the same
262 * work function; the workqueue code assumes that the
263 * original work item cannot depend on the recycled work
264 * item in that case (see find_worker_executing_work()).
265 *
266 * Note that different types of Btrfs work can depend on
267 * each other, and one type of work on one Btrfs
268 * filesystem may even depend on the same type of work
269 * on another Btrfs filesystem via, e.g., a loop device.
270 * Therefore, we must not allow the current work item to
271 * be recycled until we are really done, otherwise we
272 * break the above assumption and can deadlock.
273 */
274 free_self = true;
275 } else {
276 /*
277 * We don't want to call the ordered free functions with
278 * the lock held.
279 */
280 work->ordered_func(work, true);
281 /* NB: work must not be dereferenced past this point. */
282 trace_btrfs_all_work_done(wq->fs_info, work);
283 }
284 }
285 spin_unlock_irqrestore(lock, flags);
286
287 if (free_self) {
288 self->ordered_func(self, true);
289 /* NB: self must not be dereferenced past this point. */
290 trace_btrfs_all_work_done(wq->fs_info, self);
291 }
292}
293
294static void btrfs_work_helper(struct work_struct *normal_work)
295{
296 struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
297 normal_work);
298 struct btrfs_workqueue *wq = work->wq;
299 int need_order = 0;
300
301 /*
302 * We should not touch things inside work in the following cases:
303 * 1) after work->func() if it has no ordered_func(..., true) to free
304 * Since the struct is freed in work->func().
305 * 2) after setting WORK_DONE_BIT
306 * The work may be freed in other threads almost instantly.
307 * So we save the needed things here.
308 */
309 if (work->ordered_func)
310 need_order = 1;
311
312 trace_btrfs_work_sched(work);
313 thresh_exec_hook(wq);
314 work->func(work);
315 if (need_order) {
316 /*
317 * Ensures all memory accesses done in the work function are
318 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
319 * which is going to executed the ordered work sees them.
320 * Pairs with the smp_rmb in run_ordered_work.
321 */
322 smp_mb__before_atomic();
323 set_bit(WORK_DONE_BIT, &work->flags);
324 run_ordered_work(wq, work);
325 } else {
326 /* NB: work must not be dereferenced past this point. */
327 trace_btrfs_all_work_done(wq->fs_info, work);
328 }
329}
330
331void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
332 btrfs_ordered_func_t ordered_func)
333{
334 work->func = func;
335 work->ordered_func = ordered_func;
336 INIT_WORK(&work->normal_work, btrfs_work_helper);
337 INIT_LIST_HEAD(&work->ordered_list);
338 work->flags = 0;
339}
340
341void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
342{
343 unsigned long flags;
344
345 work->wq = wq;
346 thresh_queue_hook(wq);
347 if (work->ordered_func) {
348 spin_lock_irqsave(&wq->list_lock, flags);
349 list_add_tail(&work->ordered_list, &wq->ordered_list);
350 spin_unlock_irqrestore(&wq->list_lock, flags);
351 }
352 trace_btrfs_work_queued(work);
353 queue_work(wq->normal_wq, &work->normal_work);
354}
355
356void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
357{
358 if (!wq)
359 return;
360 destroy_workqueue(wq->normal_wq);
361 trace_btrfs_workqueue_destroy(wq);
362 kfree(wq);
363}
364
365void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
366{
367 if (wq)
368 wq->limit_active = limit_active;
369}
370
371void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
372{
373 flush_workqueue(wq->normal_wq);
374}
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kthread.h>
20#include <linux/slab.h>
21#include <linux/list.h>
22#include <linux/spinlock.h>
23#include <linux/freezer.h>
24#include "async-thread.h"
25
26#define WORK_QUEUED_BIT 0
27#define WORK_DONE_BIT 1
28#define WORK_ORDER_DONE_BIT 2
29#define WORK_HIGH_PRIO_BIT 3
30
31/*
32 * container for the kthread task pointer and the list of pending work
33 * One of these is allocated per thread.
34 */
35struct btrfs_worker_thread {
36 /* pool we belong to */
37 struct btrfs_workers *workers;
38
39 /* list of struct btrfs_work that are waiting for service */
40 struct list_head pending;
41 struct list_head prio_pending;
42
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list;
45
46 /* kthread */
47 struct task_struct *task;
48
49 /* number of things on the pending list */
50 atomic_t num_pending;
51
52 /* reference counter for this struct */
53 atomic_t refs;
54
55 unsigned long sequence;
56
57 /* protects the pending list. */
58 spinlock_t lock;
59
60 /* set to non-zero when this thread is already awake and kicking */
61 int working;
62
63 /* are we currently idle */
64 int idle;
65};
66
67static int __btrfs_start_workers(struct btrfs_workers *workers);
68
69/*
70 * btrfs_start_workers uses kthread_run, which can block waiting for memory
71 * for a very long time. It will actually throttle on page writeback,
72 * and so it may not make progress until after our btrfs worker threads
73 * process all of the pending work structs in their queue
74 *
75 * This means we can't use btrfs_start_workers from inside a btrfs worker
76 * thread that is used as part of cleaning dirty memory, which pretty much
77 * involves all of the worker threads.
78 *
79 * Instead we have a helper queue who never has more than one thread
80 * where we scheduler thread start operations. This worker_start struct
81 * is used to contain the work and hold a pointer to the queue that needs
82 * another worker.
83 */
84struct worker_start {
85 struct btrfs_work work;
86 struct btrfs_workers *queue;
87};
88
89static void start_new_worker_func(struct btrfs_work *work)
90{
91 struct worker_start *start;
92 start = container_of(work, struct worker_start, work);
93 __btrfs_start_workers(start->queue);
94 kfree(start);
95}
96
97/*
98 * helper function to move a thread onto the idle list after it
99 * has finished some requests.
100 */
101static void check_idle_worker(struct btrfs_worker_thread *worker)
102{
103 if (!worker->idle && atomic_read(&worker->num_pending) <
104 worker->workers->idle_thresh / 2) {
105 unsigned long flags;
106 spin_lock_irqsave(&worker->workers->lock, flags);
107 worker->idle = 1;
108
109 /* the list may be empty if the worker is just starting */
110 if (!list_empty(&worker->worker_list)) {
111 list_move(&worker->worker_list,
112 &worker->workers->idle_list);
113 }
114 spin_unlock_irqrestore(&worker->workers->lock, flags);
115 }
116}
117
118/*
119 * helper function to move a thread off the idle list after new
120 * pending work is added.
121 */
122static void check_busy_worker(struct btrfs_worker_thread *worker)
123{
124 if (worker->idle && atomic_read(&worker->num_pending) >=
125 worker->workers->idle_thresh) {
126 unsigned long flags;
127 spin_lock_irqsave(&worker->workers->lock, flags);
128 worker->idle = 0;
129
130 if (!list_empty(&worker->worker_list)) {
131 list_move_tail(&worker->worker_list,
132 &worker->workers->worker_list);
133 }
134 spin_unlock_irqrestore(&worker->workers->lock, flags);
135 }
136}
137
138static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
139{
140 struct btrfs_workers *workers = worker->workers;
141 struct worker_start *start;
142 unsigned long flags;
143
144 rmb();
145 if (!workers->atomic_start_pending)
146 return;
147
148 start = kzalloc(sizeof(*start), GFP_NOFS);
149 if (!start)
150 return;
151
152 start->work.func = start_new_worker_func;
153 start->queue = workers;
154
155 spin_lock_irqsave(&workers->lock, flags);
156 if (!workers->atomic_start_pending)
157 goto out;
158
159 workers->atomic_start_pending = 0;
160 if (workers->num_workers + workers->num_workers_starting >=
161 workers->max_workers)
162 goto out;
163
164 workers->num_workers_starting += 1;
165 spin_unlock_irqrestore(&workers->lock, flags);
166 btrfs_queue_worker(workers->atomic_worker_start, &start->work);
167 return;
168
169out:
170 kfree(start);
171 spin_unlock_irqrestore(&workers->lock, flags);
172}
173
174static noinline void run_ordered_completions(struct btrfs_workers *workers,
175 struct btrfs_work *work)
176{
177 if (!workers->ordered)
178 return;
179
180 set_bit(WORK_DONE_BIT, &work->flags);
181
182 spin_lock(&workers->order_lock);
183
184 while (1) {
185 if (!list_empty(&workers->prio_order_list)) {
186 work = list_entry(workers->prio_order_list.next,
187 struct btrfs_work, order_list);
188 } else if (!list_empty(&workers->order_list)) {
189 work = list_entry(workers->order_list.next,
190 struct btrfs_work, order_list);
191 } else {
192 break;
193 }
194 if (!test_bit(WORK_DONE_BIT, &work->flags))
195 break;
196
197 /* we are going to call the ordered done function, but
198 * we leave the work item on the list as a barrier so
199 * that later work items that are done don't have their
200 * functions called before this one returns
201 */
202 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
203 break;
204
205 spin_unlock(&workers->order_lock);
206
207 work->ordered_func(work);
208
209 /* now take the lock again and drop our item from the list */
210 spin_lock(&workers->order_lock);
211 list_del(&work->order_list);
212 spin_unlock(&workers->order_lock);
213
214 /*
215 * we don't want to call the ordered free functions
216 * with the lock held though
217 */
218 work->ordered_free(work);
219 spin_lock(&workers->order_lock);
220 }
221
222 spin_unlock(&workers->order_lock);
223}
224
225static void put_worker(struct btrfs_worker_thread *worker)
226{
227 if (atomic_dec_and_test(&worker->refs))
228 kfree(worker);
229}
230
231static int try_worker_shutdown(struct btrfs_worker_thread *worker)
232{
233 int freeit = 0;
234
235 spin_lock_irq(&worker->lock);
236 spin_lock(&worker->workers->lock);
237 if (worker->workers->num_workers > 1 &&
238 worker->idle &&
239 !worker->working &&
240 !list_empty(&worker->worker_list) &&
241 list_empty(&worker->prio_pending) &&
242 list_empty(&worker->pending) &&
243 atomic_read(&worker->num_pending) == 0) {
244 freeit = 1;
245 list_del_init(&worker->worker_list);
246 worker->workers->num_workers--;
247 }
248 spin_unlock(&worker->workers->lock);
249 spin_unlock_irq(&worker->lock);
250
251 if (freeit)
252 put_worker(worker);
253 return freeit;
254}
255
256static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
257 struct list_head *prio_head,
258 struct list_head *head)
259{
260 struct btrfs_work *work = NULL;
261 struct list_head *cur = NULL;
262
263 if(!list_empty(prio_head))
264 cur = prio_head->next;
265
266 smp_mb();
267 if (!list_empty(&worker->prio_pending))
268 goto refill;
269
270 if (!list_empty(head))
271 cur = head->next;
272
273 if (cur)
274 goto out;
275
276refill:
277 spin_lock_irq(&worker->lock);
278 list_splice_tail_init(&worker->prio_pending, prio_head);
279 list_splice_tail_init(&worker->pending, head);
280
281 if (!list_empty(prio_head))
282 cur = prio_head->next;
283 else if (!list_empty(head))
284 cur = head->next;
285 spin_unlock_irq(&worker->lock);
286
287 if (!cur)
288 goto out_fail;
289
290out:
291 work = list_entry(cur, struct btrfs_work, list);
292
293out_fail:
294 return work;
295}
296
297/*
298 * main loop for servicing work items
299 */
300static int worker_loop(void *arg)
301{
302 struct btrfs_worker_thread *worker = arg;
303 struct list_head head;
304 struct list_head prio_head;
305 struct btrfs_work *work;
306
307 INIT_LIST_HEAD(&head);
308 INIT_LIST_HEAD(&prio_head);
309
310 do {
311again:
312 while (1) {
313
314
315 work = get_next_work(worker, &prio_head, &head);
316 if (!work)
317 break;
318
319 list_del(&work->list);
320 clear_bit(WORK_QUEUED_BIT, &work->flags);
321
322 work->worker = worker;
323
324 work->func(work);
325
326 atomic_dec(&worker->num_pending);
327 /*
328 * unless this is an ordered work queue,
329 * 'work' was probably freed by func above.
330 */
331 run_ordered_completions(worker->workers, work);
332
333 check_pending_worker_creates(worker);
334 cond_resched();
335 }
336
337 spin_lock_irq(&worker->lock);
338 check_idle_worker(worker);
339
340 if (freezing(current)) {
341 worker->working = 0;
342 spin_unlock_irq(&worker->lock);
343 try_to_freeze();
344 } else {
345 spin_unlock_irq(&worker->lock);
346 if (!kthread_should_stop()) {
347 cpu_relax();
348 /*
349 * we've dropped the lock, did someone else
350 * jump_in?
351 */
352 smp_mb();
353 if (!list_empty(&worker->pending) ||
354 !list_empty(&worker->prio_pending))
355 continue;
356
357 /*
358 * this short schedule allows more work to
359 * come in without the queue functions
360 * needing to go through wake_up_process()
361 *
362 * worker->working is still 1, so nobody
363 * is going to try and wake us up
364 */
365 schedule_timeout(1);
366 smp_mb();
367 if (!list_empty(&worker->pending) ||
368 !list_empty(&worker->prio_pending))
369 continue;
370
371 if (kthread_should_stop())
372 break;
373
374 /* still no more work?, sleep for real */
375 spin_lock_irq(&worker->lock);
376 set_current_state(TASK_INTERRUPTIBLE);
377 if (!list_empty(&worker->pending) ||
378 !list_empty(&worker->prio_pending)) {
379 spin_unlock_irq(&worker->lock);
380 set_current_state(TASK_RUNNING);
381 goto again;
382 }
383
384 /*
385 * this makes sure we get a wakeup when someone
386 * adds something new to the queue
387 */
388 worker->working = 0;
389 spin_unlock_irq(&worker->lock);
390
391 if (!kthread_should_stop()) {
392 schedule_timeout(HZ * 120);
393 if (!worker->working &&
394 try_worker_shutdown(worker)) {
395 return 0;
396 }
397 }
398 }
399 __set_current_state(TASK_RUNNING);
400 }
401 } while (!kthread_should_stop());
402 return 0;
403}
404
405/*
406 * this will wait for all the worker threads to shutdown
407 */
408void btrfs_stop_workers(struct btrfs_workers *workers)
409{
410 struct list_head *cur;
411 struct btrfs_worker_thread *worker;
412 int can_stop;
413
414 spin_lock_irq(&workers->lock);
415 list_splice_init(&workers->idle_list, &workers->worker_list);
416 while (!list_empty(&workers->worker_list)) {
417 cur = workers->worker_list.next;
418 worker = list_entry(cur, struct btrfs_worker_thread,
419 worker_list);
420
421 atomic_inc(&worker->refs);
422 workers->num_workers -= 1;
423 if (!list_empty(&worker->worker_list)) {
424 list_del_init(&worker->worker_list);
425 put_worker(worker);
426 can_stop = 1;
427 } else
428 can_stop = 0;
429 spin_unlock_irq(&workers->lock);
430 if (can_stop)
431 kthread_stop(worker->task);
432 spin_lock_irq(&workers->lock);
433 put_worker(worker);
434 }
435 spin_unlock_irq(&workers->lock);
436}
437
438/*
439 * simple init on struct btrfs_workers
440 */
441void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
442 struct btrfs_workers *async_helper)
443{
444 workers->num_workers = 0;
445 workers->num_workers_starting = 0;
446 INIT_LIST_HEAD(&workers->worker_list);
447 INIT_LIST_HEAD(&workers->idle_list);
448 INIT_LIST_HEAD(&workers->order_list);
449 INIT_LIST_HEAD(&workers->prio_order_list);
450 spin_lock_init(&workers->lock);
451 spin_lock_init(&workers->order_lock);
452 workers->max_workers = max;
453 workers->idle_thresh = 32;
454 workers->name = name;
455 workers->ordered = 0;
456 workers->atomic_start_pending = 0;
457 workers->atomic_worker_start = async_helper;
458}
459
460/*
461 * starts new worker threads. This does not enforce the max worker
462 * count in case you need to temporarily go past it.
463 */
464static int __btrfs_start_workers(struct btrfs_workers *workers)
465{
466 struct btrfs_worker_thread *worker;
467 int ret = 0;
468
469 worker = kzalloc(sizeof(*worker), GFP_NOFS);
470 if (!worker) {
471 ret = -ENOMEM;
472 goto fail;
473 }
474
475 INIT_LIST_HEAD(&worker->pending);
476 INIT_LIST_HEAD(&worker->prio_pending);
477 INIT_LIST_HEAD(&worker->worker_list);
478 spin_lock_init(&worker->lock);
479
480 atomic_set(&worker->num_pending, 0);
481 atomic_set(&worker->refs, 1);
482 worker->workers = workers;
483 worker->task = kthread_run(worker_loop, worker,
484 "btrfs-%s-%d", workers->name,
485 workers->num_workers + 1);
486 if (IS_ERR(worker->task)) {
487 ret = PTR_ERR(worker->task);
488 kfree(worker);
489 goto fail;
490 }
491 spin_lock_irq(&workers->lock);
492 list_add_tail(&worker->worker_list, &workers->idle_list);
493 worker->idle = 1;
494 workers->num_workers++;
495 workers->num_workers_starting--;
496 WARN_ON(workers->num_workers_starting < 0);
497 spin_unlock_irq(&workers->lock);
498
499 return 0;
500fail:
501 spin_lock_irq(&workers->lock);
502 workers->num_workers_starting--;
503 spin_unlock_irq(&workers->lock);
504 return ret;
505}
506
507int btrfs_start_workers(struct btrfs_workers *workers)
508{
509 spin_lock_irq(&workers->lock);
510 workers->num_workers_starting++;
511 spin_unlock_irq(&workers->lock);
512 return __btrfs_start_workers(workers);
513}
514
515/*
516 * run through the list and find a worker thread that doesn't have a lot
517 * to do right now. This can return null if we aren't yet at the thread
518 * count limit and all of the threads are busy.
519 */
520static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
521{
522 struct btrfs_worker_thread *worker;
523 struct list_head *next;
524 int enforce_min;
525
526 enforce_min = (workers->num_workers + workers->num_workers_starting) <
527 workers->max_workers;
528
529 /*
530 * if we find an idle thread, don't move it to the end of the
531 * idle list. This improves the chance that the next submission
532 * will reuse the same thread, and maybe catch it while it is still
533 * working
534 */
535 if (!list_empty(&workers->idle_list)) {
536 next = workers->idle_list.next;
537 worker = list_entry(next, struct btrfs_worker_thread,
538 worker_list);
539 return worker;
540 }
541 if (enforce_min || list_empty(&workers->worker_list))
542 return NULL;
543
544 /*
545 * if we pick a busy task, move the task to the end of the list.
546 * hopefully this will keep things somewhat evenly balanced.
547 * Do the move in batches based on the sequence number. This groups
548 * requests submitted at roughly the same time onto the same worker.
549 */
550 next = workers->worker_list.next;
551 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
552 worker->sequence++;
553
554 if (worker->sequence % workers->idle_thresh == 0)
555 list_move_tail(next, &workers->worker_list);
556 return worker;
557}
558
559/*
560 * selects a worker thread to take the next job. This will either find
561 * an idle worker, start a new worker up to the max count, or just return
562 * one of the existing busy workers.
563 */
564static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
565{
566 struct btrfs_worker_thread *worker;
567 unsigned long flags;
568 struct list_head *fallback;
569 int ret;
570
571 spin_lock_irqsave(&workers->lock, flags);
572again:
573 worker = next_worker(workers);
574
575 if (!worker) {
576 if (workers->num_workers + workers->num_workers_starting >=
577 workers->max_workers) {
578 goto fallback;
579 } else if (workers->atomic_worker_start) {
580 workers->atomic_start_pending = 1;
581 goto fallback;
582 } else {
583 workers->num_workers_starting++;
584 spin_unlock_irqrestore(&workers->lock, flags);
585 /* we're below the limit, start another worker */
586 ret = __btrfs_start_workers(workers);
587 spin_lock_irqsave(&workers->lock, flags);
588 if (ret)
589 goto fallback;
590 goto again;
591 }
592 }
593 goto found;
594
595fallback:
596 fallback = NULL;
597 /*
598 * we have failed to find any workers, just
599 * return the first one we can find.
600 */
601 if (!list_empty(&workers->worker_list))
602 fallback = workers->worker_list.next;
603 if (!list_empty(&workers->idle_list))
604 fallback = workers->idle_list.next;
605 BUG_ON(!fallback);
606 worker = list_entry(fallback,
607 struct btrfs_worker_thread, worker_list);
608found:
609 /*
610 * this makes sure the worker doesn't exit before it is placed
611 * onto a busy/idle list
612 */
613 atomic_inc(&worker->num_pending);
614 spin_unlock_irqrestore(&workers->lock, flags);
615 return worker;
616}
617
618/*
619 * btrfs_requeue_work just puts the work item back on the tail of the list
620 * it was taken from. It is intended for use with long running work functions
621 * that make some progress and want to give the cpu up for others.
622 */
623void btrfs_requeue_work(struct btrfs_work *work)
624{
625 struct btrfs_worker_thread *worker = work->worker;
626 unsigned long flags;
627 int wake = 0;
628
629 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
630 return;
631
632 spin_lock_irqsave(&worker->lock, flags);
633 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
634 list_add_tail(&work->list, &worker->prio_pending);
635 else
636 list_add_tail(&work->list, &worker->pending);
637 atomic_inc(&worker->num_pending);
638
639 /* by definition we're busy, take ourselves off the idle
640 * list
641 */
642 if (worker->idle) {
643 spin_lock(&worker->workers->lock);
644 worker->idle = 0;
645 list_move_tail(&worker->worker_list,
646 &worker->workers->worker_list);
647 spin_unlock(&worker->workers->lock);
648 }
649 if (!worker->working) {
650 wake = 1;
651 worker->working = 1;
652 }
653
654 if (wake)
655 wake_up_process(worker->task);
656 spin_unlock_irqrestore(&worker->lock, flags);
657}
658
659void btrfs_set_work_high_prio(struct btrfs_work *work)
660{
661 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
662}
663
664/*
665 * places a struct btrfs_work into the pending queue of one of the kthreads
666 */
667void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
668{
669 struct btrfs_worker_thread *worker;
670 unsigned long flags;
671 int wake = 0;
672
673 /* don't requeue something already on a list */
674 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
675 return;
676
677 worker = find_worker(workers);
678 if (workers->ordered) {
679 /*
680 * you're not allowed to do ordered queues from an
681 * interrupt handler
682 */
683 spin_lock(&workers->order_lock);
684 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
685 list_add_tail(&work->order_list,
686 &workers->prio_order_list);
687 } else {
688 list_add_tail(&work->order_list, &workers->order_list);
689 }
690 spin_unlock(&workers->order_lock);
691 } else {
692 INIT_LIST_HEAD(&work->order_list);
693 }
694
695 spin_lock_irqsave(&worker->lock, flags);
696
697 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
698 list_add_tail(&work->list, &worker->prio_pending);
699 else
700 list_add_tail(&work->list, &worker->pending);
701 check_busy_worker(worker);
702
703 /*
704 * avoid calling into wake_up_process if this thread has already
705 * been kicked
706 */
707 if (!worker->working)
708 wake = 1;
709 worker->working = 1;
710
711 if (wake)
712 wake_up_process(worker->task);
713 spin_unlock_irqrestore(&worker->lock, flags);
714}