Loading...
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/spinlock.h>
24#include <linux/freezer.h>
25#include "async-thread.h"
26#include "ctree.h"
27
28#define WORK_DONE_BIT 0
29#define WORK_ORDER_DONE_BIT 1
30#define WORK_HIGH_PRIO_BIT 2
31
32#define NO_THRESHOLD (-1)
33#define DFT_THRESHOLD (32)
34
35struct __btrfs_workqueue {
36 struct workqueue_struct *normal_wq;
37 /* List head pointing to ordered work list */
38 struct list_head ordered_list;
39
40 /* Spinlock for ordered_list */
41 spinlock_t list_lock;
42
43 /* Thresholding related variants */
44 atomic_t pending;
45
46 /* Up limit of concurrency workers */
47 int limit_active;
48
49 /* Current number of concurrency workers */
50 int current_active;
51
52 /* Threshold to change current_active */
53 int thresh;
54 unsigned int count;
55 spinlock_t thres_lock;
56};
57
58struct btrfs_workqueue {
59 struct __btrfs_workqueue *normal;
60 struct __btrfs_workqueue *high;
61};
62
63static void normal_work_helper(struct btrfs_work *work);
64
65#define BTRFS_WORK_HELPER(name) \
66void btrfs_##name(struct work_struct *arg) \
67{ \
68 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
69 normal_work); \
70 normal_work_helper(work); \
71}
72
73BTRFS_WORK_HELPER(worker_helper);
74BTRFS_WORK_HELPER(delalloc_helper);
75BTRFS_WORK_HELPER(flush_delalloc_helper);
76BTRFS_WORK_HELPER(cache_helper);
77BTRFS_WORK_HELPER(submit_helper);
78BTRFS_WORK_HELPER(fixup_helper);
79BTRFS_WORK_HELPER(endio_helper);
80BTRFS_WORK_HELPER(endio_meta_helper);
81BTRFS_WORK_HELPER(endio_meta_write_helper);
82BTRFS_WORK_HELPER(endio_raid56_helper);
83BTRFS_WORK_HELPER(endio_repair_helper);
84BTRFS_WORK_HELPER(rmw_helper);
85BTRFS_WORK_HELPER(endio_write_helper);
86BTRFS_WORK_HELPER(freespace_write_helper);
87BTRFS_WORK_HELPER(delayed_meta_helper);
88BTRFS_WORK_HELPER(readahead_helper);
89BTRFS_WORK_HELPER(qgroup_rescan_helper);
90BTRFS_WORK_HELPER(extent_refs_helper);
91BTRFS_WORK_HELPER(scrub_helper);
92BTRFS_WORK_HELPER(scrubwrc_helper);
93BTRFS_WORK_HELPER(scrubnc_helper);
94BTRFS_WORK_HELPER(scrubparity_helper);
95
96static struct __btrfs_workqueue *
97__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
98 int thresh)
99{
100 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
101
102 if (!ret)
103 return NULL;
104
105 ret->limit_active = limit_active;
106 atomic_set(&ret->pending, 0);
107 if (thresh == 0)
108 thresh = DFT_THRESHOLD;
109 /* For low threshold, disabling threshold is a better choice */
110 if (thresh < DFT_THRESHOLD) {
111 ret->current_active = limit_active;
112 ret->thresh = NO_THRESHOLD;
113 } else {
114 /*
115 * For threshold-able wq, let its concurrency grow on demand.
116 * Use minimal max_active at alloc time to reduce resource
117 * usage.
118 */
119 ret->current_active = 1;
120 ret->thresh = thresh;
121 }
122
123 if (flags & WQ_HIGHPRI)
124 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
125 ret->current_active, "btrfs",
126 name);
127 else
128 ret->normal_wq = alloc_workqueue("%s-%s", flags,
129 ret->current_active, "btrfs",
130 name);
131 if (!ret->normal_wq) {
132 kfree(ret);
133 return NULL;
134 }
135
136 INIT_LIST_HEAD(&ret->ordered_list);
137 spin_lock_init(&ret->list_lock);
138 spin_lock_init(&ret->thres_lock);
139 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
140 return ret;
141}
142
143static inline void
144__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
145
146struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
147 unsigned int flags,
148 int limit_active,
149 int thresh)
150{
151 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
152
153 if (!ret)
154 return NULL;
155
156 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
157 limit_active, thresh);
158 if (!ret->normal) {
159 kfree(ret);
160 return NULL;
161 }
162
163 if (flags & WQ_HIGHPRI) {
164 ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
165 thresh);
166 if (!ret->high) {
167 __btrfs_destroy_workqueue(ret->normal);
168 kfree(ret);
169 return NULL;
170 }
171 }
172 return ret;
173}
174
175/*
176 * Hook for threshold which will be called in btrfs_queue_work.
177 * This hook WILL be called in IRQ handler context,
178 * so workqueue_set_max_active MUST NOT be called in this hook
179 */
180static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
181{
182 if (wq->thresh == NO_THRESHOLD)
183 return;
184 atomic_inc(&wq->pending);
185}
186
187/*
188 * Hook for threshold which will be called before executing the work,
189 * This hook is called in kthread content.
190 * So workqueue_set_max_active is called here.
191 */
192static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
193{
194 int new_current_active;
195 long pending;
196 int need_change = 0;
197
198 if (wq->thresh == NO_THRESHOLD)
199 return;
200
201 atomic_dec(&wq->pending);
202 spin_lock(&wq->thres_lock);
203 /*
204 * Use wq->count to limit the calling frequency of
205 * workqueue_set_max_active.
206 */
207 wq->count++;
208 wq->count %= (wq->thresh / 4);
209 if (!wq->count)
210 goto out;
211 new_current_active = wq->current_active;
212
213 /*
214 * pending may be changed later, but it's OK since we really
215 * don't need it so accurate to calculate new_max_active.
216 */
217 pending = atomic_read(&wq->pending);
218 if (pending > wq->thresh)
219 new_current_active++;
220 if (pending < wq->thresh / 2)
221 new_current_active--;
222 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
223 if (new_current_active != wq->current_active) {
224 need_change = 1;
225 wq->current_active = new_current_active;
226 }
227out:
228 spin_unlock(&wq->thres_lock);
229
230 if (need_change) {
231 workqueue_set_max_active(wq->normal_wq, wq->current_active);
232 }
233}
234
235static void run_ordered_work(struct __btrfs_workqueue *wq)
236{
237 struct list_head *list = &wq->ordered_list;
238 struct btrfs_work *work;
239 spinlock_t *lock = &wq->list_lock;
240 unsigned long flags;
241
242 while (1) {
243 spin_lock_irqsave(lock, flags);
244 if (list_empty(list))
245 break;
246 work = list_entry(list->next, struct btrfs_work,
247 ordered_list);
248 if (!test_bit(WORK_DONE_BIT, &work->flags))
249 break;
250
251 /*
252 * we are going to call the ordered done function, but
253 * we leave the work item on the list as a barrier so
254 * that later work items that are done don't have their
255 * functions called before this one returns
256 */
257 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
258 break;
259 trace_btrfs_ordered_sched(work);
260 spin_unlock_irqrestore(lock, flags);
261 work->ordered_func(work);
262
263 /* now take the lock again and drop our item from the list */
264 spin_lock_irqsave(lock, flags);
265 list_del(&work->ordered_list);
266 spin_unlock_irqrestore(lock, flags);
267
268 /*
269 * we don't want to call the ordered free functions
270 * with the lock held though
271 */
272 work->ordered_free(work);
273 trace_btrfs_all_work_done(work);
274 }
275 spin_unlock_irqrestore(lock, flags);
276}
277
278static void normal_work_helper(struct btrfs_work *work)
279{
280 struct __btrfs_workqueue *wq;
281 int need_order = 0;
282
283 /*
284 * We should not touch things inside work in the following cases:
285 * 1) after work->func() if it has no ordered_free
286 * Since the struct is freed in work->func().
287 * 2) after setting WORK_DONE_BIT
288 * The work may be freed in other threads almost instantly.
289 * So we save the needed things here.
290 */
291 if (work->ordered_func)
292 need_order = 1;
293 wq = work->wq;
294
295 trace_btrfs_work_sched(work);
296 thresh_exec_hook(wq);
297 work->func(work);
298 if (need_order) {
299 set_bit(WORK_DONE_BIT, &work->flags);
300 run_ordered_work(wq);
301 }
302 if (!need_order)
303 trace_btrfs_all_work_done(work);
304}
305
306void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
307 btrfs_func_t func,
308 btrfs_func_t ordered_func,
309 btrfs_func_t ordered_free)
310{
311 work->func = func;
312 work->ordered_func = ordered_func;
313 work->ordered_free = ordered_free;
314 INIT_WORK(&work->normal_work, uniq_func);
315 INIT_LIST_HEAD(&work->ordered_list);
316 work->flags = 0;
317}
318
319static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
320 struct btrfs_work *work)
321{
322 unsigned long flags;
323
324 work->wq = wq;
325 thresh_queue_hook(wq);
326 if (work->ordered_func) {
327 spin_lock_irqsave(&wq->list_lock, flags);
328 list_add_tail(&work->ordered_list, &wq->ordered_list);
329 spin_unlock_irqrestore(&wq->list_lock, flags);
330 }
331 trace_btrfs_work_queued(work);
332 queue_work(wq->normal_wq, &work->normal_work);
333}
334
335void btrfs_queue_work(struct btrfs_workqueue *wq,
336 struct btrfs_work *work)
337{
338 struct __btrfs_workqueue *dest_wq;
339
340 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
341 dest_wq = wq->high;
342 else
343 dest_wq = wq->normal;
344 __btrfs_queue_work(dest_wq, work);
345}
346
347static inline void
348__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
349{
350 destroy_workqueue(wq->normal_wq);
351 trace_btrfs_workqueue_destroy(wq);
352 kfree(wq);
353}
354
355void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
356{
357 if (!wq)
358 return;
359 if (wq->high)
360 __btrfs_destroy_workqueue(wq->high);
361 __btrfs_destroy_workqueue(wq->normal);
362 kfree(wq);
363}
364
365void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
366{
367 if (!wq)
368 return;
369 wq->normal->limit_active = limit_active;
370 if (wq->high)
371 wq->high->limit_active = limit_active;
372}
373
374void btrfs_set_work_high_priority(struct btrfs_work *work)
375{
376 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
377}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 */
6
7#include <linux/kthread.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11#include <linux/freezer.h>
12#include <trace/events/btrfs.h>
13#include "async-thread.h"
14#include "ctree.h"
15
16enum {
17 WORK_DONE_BIT,
18 WORK_ORDER_DONE_BIT,
19};
20
21#define NO_THRESHOLD (-1)
22#define DFT_THRESHOLD (32)
23
24struct btrfs_workqueue {
25 struct workqueue_struct *normal_wq;
26
27 /* File system this workqueue services */
28 struct btrfs_fs_info *fs_info;
29
30 /* List head pointing to ordered work list */
31 struct list_head ordered_list;
32
33 /* Spinlock for ordered_list */
34 spinlock_t list_lock;
35
36 /* Thresholding related variants */
37 atomic_t pending;
38
39 /* Up limit of concurrency workers */
40 int limit_active;
41
42 /* Current number of concurrency workers */
43 int current_active;
44
45 /* Threshold to change current_active */
46 int thresh;
47 unsigned int count;
48 spinlock_t thres_lock;
49};
50
51struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
52{
53 return wq->fs_info;
54}
55
56struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
57{
58 return work->wq->fs_info;
59}
60
61bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
62{
63 /*
64 * We could compare wq->pending with num_online_cpus()
65 * to support "thresh == NO_THRESHOLD" case, but it requires
66 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
67 * postpone it until someone needs the support of that case.
68 */
69 if (wq->thresh == NO_THRESHOLD)
70 return false;
71
72 return atomic_read(&wq->pending) > wq->thresh * 2;
73}
74
75static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
76 struct btrfs_fs_info *fs_info)
77{
78 wq->fs_info = fs_info;
79 atomic_set(&wq->pending, 0);
80 INIT_LIST_HEAD(&wq->ordered_list);
81 spin_lock_init(&wq->list_lock);
82 spin_lock_init(&wq->thres_lock);
83}
84
85struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
86 const char *name, unsigned int flags,
87 int limit_active, int thresh)
88{
89 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
90
91 if (!ret)
92 return NULL;
93
94 btrfs_init_workqueue(ret, fs_info);
95
96 ret->limit_active = limit_active;
97 if (thresh == 0)
98 thresh = DFT_THRESHOLD;
99 /* For low threshold, disabling threshold is a better choice */
100 if (thresh < DFT_THRESHOLD) {
101 ret->current_active = limit_active;
102 ret->thresh = NO_THRESHOLD;
103 } else {
104 /*
105 * For threshold-able wq, let its concurrency grow on demand.
106 * Use minimal max_active at alloc time to reduce resource
107 * usage.
108 */
109 ret->current_active = 1;
110 ret->thresh = thresh;
111 }
112
113 ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
114 name);
115 if (!ret->normal_wq) {
116 kfree(ret);
117 return NULL;
118 }
119
120 trace_btrfs_workqueue_alloc(ret, name);
121 return ret;
122}
123
124struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
125 struct btrfs_fs_info *fs_info, const char *name,
126 unsigned int flags)
127{
128 struct btrfs_workqueue *ret;
129
130 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
131 if (!ret)
132 return NULL;
133
134 btrfs_init_workqueue(ret, fs_info);
135
136 /* Ordered workqueues don't allow @max_active adjustments. */
137 ret->limit_active = 1;
138 ret->current_active = 1;
139 ret->thresh = NO_THRESHOLD;
140
141 ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
142 if (!ret->normal_wq) {
143 kfree(ret);
144 return NULL;
145 }
146
147 trace_btrfs_workqueue_alloc(ret, name);
148 return ret;
149}
150
151/*
152 * Hook for threshold which will be called in btrfs_queue_work.
153 * This hook WILL be called in IRQ handler context,
154 * so workqueue_set_max_active MUST NOT be called in this hook
155 */
156static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
157{
158 if (wq->thresh == NO_THRESHOLD)
159 return;
160 atomic_inc(&wq->pending);
161}
162
163/*
164 * Hook for threshold which will be called before executing the work,
165 * This hook is called in kthread content.
166 * So workqueue_set_max_active is called here.
167 */
168static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
169{
170 int new_current_active;
171 long pending;
172 int need_change = 0;
173
174 if (wq->thresh == NO_THRESHOLD)
175 return;
176
177 atomic_dec(&wq->pending);
178 spin_lock(&wq->thres_lock);
179 /*
180 * Use wq->count to limit the calling frequency of
181 * workqueue_set_max_active.
182 */
183 wq->count++;
184 wq->count %= (wq->thresh / 4);
185 if (!wq->count)
186 goto out;
187 new_current_active = wq->current_active;
188
189 /*
190 * pending may be changed later, but it's OK since we really
191 * don't need it so accurate to calculate new_max_active.
192 */
193 pending = atomic_read(&wq->pending);
194 if (pending > wq->thresh)
195 new_current_active++;
196 if (pending < wq->thresh / 2)
197 new_current_active--;
198 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
199 if (new_current_active != wq->current_active) {
200 need_change = 1;
201 wq->current_active = new_current_active;
202 }
203out:
204 spin_unlock(&wq->thres_lock);
205
206 if (need_change) {
207 workqueue_set_max_active(wq->normal_wq, wq->current_active);
208 }
209}
210
211static void run_ordered_work(struct btrfs_workqueue *wq,
212 struct btrfs_work *self)
213{
214 struct list_head *list = &wq->ordered_list;
215 struct btrfs_work *work;
216 spinlock_t *lock = &wq->list_lock;
217 unsigned long flags;
218 bool free_self = false;
219
220 while (1) {
221 spin_lock_irqsave(lock, flags);
222 if (list_empty(list))
223 break;
224 work = list_entry(list->next, struct btrfs_work,
225 ordered_list);
226 if (!test_bit(WORK_DONE_BIT, &work->flags))
227 break;
228 /*
229 * Orders all subsequent loads after reading WORK_DONE_BIT,
230 * paired with the smp_mb__before_atomic in btrfs_work_helper
231 * this guarantees that the ordered function will see all
232 * updates from ordinary work function.
233 */
234 smp_rmb();
235
236 /*
237 * we are going to call the ordered done function, but
238 * we leave the work item on the list as a barrier so
239 * that later work items that are done don't have their
240 * functions called before this one returns
241 */
242 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
243 break;
244 trace_btrfs_ordered_sched(work);
245 spin_unlock_irqrestore(lock, flags);
246 work->ordered_func(work, false);
247
248 /* now take the lock again and drop our item from the list */
249 spin_lock_irqsave(lock, flags);
250 list_del(&work->ordered_list);
251 spin_unlock_irqrestore(lock, flags);
252
253 if (work == self) {
254 /*
255 * This is the work item that the worker is currently
256 * executing.
257 *
258 * The kernel workqueue code guarantees non-reentrancy
259 * of work items. I.e., if a work item with the same
260 * address and work function is queued twice, the second
261 * execution is blocked until the first one finishes. A
262 * work item may be freed and recycled with the same
263 * work function; the workqueue code assumes that the
264 * original work item cannot depend on the recycled work
265 * item in that case (see find_worker_executing_work()).
266 *
267 * Note that different types of Btrfs work can depend on
268 * each other, and one type of work on one Btrfs
269 * filesystem may even depend on the same type of work
270 * on another Btrfs filesystem via, e.g., a loop device.
271 * Therefore, we must not allow the current work item to
272 * be recycled until we are really done, otherwise we
273 * break the above assumption and can deadlock.
274 */
275 free_self = true;
276 } else {
277 /*
278 * We don't want to call the ordered free functions with
279 * the lock held.
280 */
281 work->ordered_func(work, true);
282 /* NB: work must not be dereferenced past this point. */
283 trace_btrfs_all_work_done(wq->fs_info, work);
284 }
285 }
286 spin_unlock_irqrestore(lock, flags);
287
288 if (free_self) {
289 self->ordered_func(self, true);
290 /* NB: self must not be dereferenced past this point. */
291 trace_btrfs_all_work_done(wq->fs_info, self);
292 }
293}
294
295static void btrfs_work_helper(struct work_struct *normal_work)
296{
297 struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
298 normal_work);
299 struct btrfs_workqueue *wq = work->wq;
300 int need_order = 0;
301
302 /*
303 * We should not touch things inside work in the following cases:
304 * 1) after work->func() if it has no ordered_func(..., true) to free
305 * Since the struct is freed in work->func().
306 * 2) after setting WORK_DONE_BIT
307 * The work may be freed in other threads almost instantly.
308 * So we save the needed things here.
309 */
310 if (work->ordered_func)
311 need_order = 1;
312
313 trace_btrfs_work_sched(work);
314 thresh_exec_hook(wq);
315 work->func(work);
316 if (need_order) {
317 /*
318 * Ensures all memory accesses done in the work function are
319 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
320 * which is going to executed the ordered work sees them.
321 * Pairs with the smp_rmb in run_ordered_work.
322 */
323 smp_mb__before_atomic();
324 set_bit(WORK_DONE_BIT, &work->flags);
325 run_ordered_work(wq, work);
326 } else {
327 /* NB: work must not be dereferenced past this point. */
328 trace_btrfs_all_work_done(wq->fs_info, work);
329 }
330}
331
332void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
333 btrfs_ordered_func_t ordered_func)
334{
335 work->func = func;
336 work->ordered_func = ordered_func;
337 INIT_WORK(&work->normal_work, btrfs_work_helper);
338 INIT_LIST_HEAD(&work->ordered_list);
339 work->flags = 0;
340}
341
342void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
343{
344 unsigned long flags;
345
346 work->wq = wq;
347 thresh_queue_hook(wq);
348 if (work->ordered_func) {
349 spin_lock_irqsave(&wq->list_lock, flags);
350 list_add_tail(&work->ordered_list, &wq->ordered_list);
351 spin_unlock_irqrestore(&wq->list_lock, flags);
352 }
353 trace_btrfs_work_queued(work);
354 queue_work(wq->normal_wq, &work->normal_work);
355}
356
357void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
358{
359 if (!wq)
360 return;
361 destroy_workqueue(wq->normal_wq);
362 trace_btrfs_workqueue_destroy(wq);
363 kfree(wq);
364}
365
366void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
367{
368 if (wq)
369 wq->limit_active = limit_active;
370}
371
372void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
373{
374 flush_workqueue(wq->normal_wq);
375}