Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 */
6
7#include <linux/kthread.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11#include <linux/freezer.h>
12#include "async-thread.h"
13#include "ctree.h"
14
15enum {
16 WORK_DONE_BIT,
17 WORK_ORDER_DONE_BIT,
18};
19
20#define NO_THRESHOLD (-1)
21#define DFT_THRESHOLD (32)
22
23struct btrfs_workqueue {
24 struct workqueue_struct *normal_wq;
25
26 /* File system this workqueue services */
27 struct btrfs_fs_info *fs_info;
28
29 /* List head pointing to ordered work list */
30 struct list_head ordered_list;
31
32 /* Spinlock for ordered_list */
33 spinlock_t list_lock;
34
35 /* Thresholding related variants */
36 atomic_t pending;
37
38 /* Up limit of concurrency workers */
39 int limit_active;
40
41 /* Current number of concurrency workers */
42 int current_active;
43
44 /* Threshold to change current_active */
45 int thresh;
46 unsigned int count;
47 spinlock_t thres_lock;
48};
49
50struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
51{
52 return wq->fs_info;
53}
54
55struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
56{
57 return work->wq->fs_info;
58}
59
60bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
61{
62 /*
63 * We could compare wq->pending with num_online_cpus()
64 * to support "thresh == NO_THRESHOLD" case, but it requires
65 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
66 * postpone it until someone needs the support of that case.
67 */
68 if (wq->thresh == NO_THRESHOLD)
69 return false;
70
71 return atomic_read(&wq->pending) > wq->thresh * 2;
72}
73
74struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
75 const char *name, unsigned int flags,
76 int limit_active, int thresh)
77{
78 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
79
80 if (!ret)
81 return NULL;
82
83 ret->fs_info = fs_info;
84 ret->limit_active = limit_active;
85 atomic_set(&ret->pending, 0);
86 if (thresh == 0)
87 thresh = DFT_THRESHOLD;
88 /* For low threshold, disabling threshold is a better choice */
89 if (thresh < DFT_THRESHOLD) {
90 ret->current_active = limit_active;
91 ret->thresh = NO_THRESHOLD;
92 } else {
93 /*
94 * For threshold-able wq, let its concurrency grow on demand.
95 * Use minimal max_active at alloc time to reduce resource
96 * usage.
97 */
98 ret->current_active = 1;
99 ret->thresh = thresh;
100 }
101
102 ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
103 name);
104 if (!ret->normal_wq) {
105 kfree(ret);
106 return NULL;
107 }
108
109 INIT_LIST_HEAD(&ret->ordered_list);
110 spin_lock_init(&ret->list_lock);
111 spin_lock_init(&ret->thres_lock);
112 trace_btrfs_workqueue_alloc(ret, name);
113 return ret;
114}
115
116/*
117 * Hook for threshold which will be called in btrfs_queue_work.
118 * This hook WILL be called in IRQ handler context,
119 * so workqueue_set_max_active MUST NOT be called in this hook
120 */
121static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
122{
123 if (wq->thresh == NO_THRESHOLD)
124 return;
125 atomic_inc(&wq->pending);
126}
127
128/*
129 * Hook for threshold which will be called before executing the work,
130 * This hook is called in kthread content.
131 * So workqueue_set_max_active is called here.
132 */
133static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
134{
135 int new_current_active;
136 long pending;
137 int need_change = 0;
138
139 if (wq->thresh == NO_THRESHOLD)
140 return;
141
142 atomic_dec(&wq->pending);
143 spin_lock(&wq->thres_lock);
144 /*
145 * Use wq->count to limit the calling frequency of
146 * workqueue_set_max_active.
147 */
148 wq->count++;
149 wq->count %= (wq->thresh / 4);
150 if (!wq->count)
151 goto out;
152 new_current_active = wq->current_active;
153
154 /*
155 * pending may be changed later, but it's OK since we really
156 * don't need it so accurate to calculate new_max_active.
157 */
158 pending = atomic_read(&wq->pending);
159 if (pending > wq->thresh)
160 new_current_active++;
161 if (pending < wq->thresh / 2)
162 new_current_active--;
163 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
164 if (new_current_active != wq->current_active) {
165 need_change = 1;
166 wq->current_active = new_current_active;
167 }
168out:
169 spin_unlock(&wq->thres_lock);
170
171 if (need_change) {
172 workqueue_set_max_active(wq->normal_wq, wq->current_active);
173 }
174}
175
176static void run_ordered_work(struct btrfs_workqueue *wq,
177 struct btrfs_work *self)
178{
179 struct list_head *list = &wq->ordered_list;
180 struct btrfs_work *work;
181 spinlock_t *lock = &wq->list_lock;
182 unsigned long flags;
183 bool free_self = false;
184
185 while (1) {
186 spin_lock_irqsave(lock, flags);
187 if (list_empty(list))
188 break;
189 work = list_entry(list->next, struct btrfs_work,
190 ordered_list);
191 if (!test_bit(WORK_DONE_BIT, &work->flags))
192 break;
193 /*
194 * Orders all subsequent loads after reading WORK_DONE_BIT,
195 * paired with the smp_mb__before_atomic in btrfs_work_helper
196 * this guarantees that the ordered function will see all
197 * updates from ordinary work function.
198 */
199 smp_rmb();
200
201 /*
202 * we are going to call the ordered done function, but
203 * we leave the work item on the list as a barrier so
204 * that later work items that are done don't have their
205 * functions called before this one returns
206 */
207 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
208 break;
209 trace_btrfs_ordered_sched(work);
210 spin_unlock_irqrestore(lock, flags);
211 work->ordered_func(work);
212
213 /* now take the lock again and drop our item from the list */
214 spin_lock_irqsave(lock, flags);
215 list_del(&work->ordered_list);
216 spin_unlock_irqrestore(lock, flags);
217
218 if (work == self) {
219 /*
220 * This is the work item that the worker is currently
221 * executing.
222 *
223 * The kernel workqueue code guarantees non-reentrancy
224 * of work items. I.e., if a work item with the same
225 * address and work function is queued twice, the second
226 * execution is blocked until the first one finishes. A
227 * work item may be freed and recycled with the same
228 * work function; the workqueue code assumes that the
229 * original work item cannot depend on the recycled work
230 * item in that case (see find_worker_executing_work()).
231 *
232 * Note that different types of Btrfs work can depend on
233 * each other, and one type of work on one Btrfs
234 * filesystem may even depend on the same type of work
235 * on another Btrfs filesystem via, e.g., a loop device.
236 * Therefore, we must not allow the current work item to
237 * be recycled until we are really done, otherwise we
238 * break the above assumption and can deadlock.
239 */
240 free_self = true;
241 } else {
242 /*
243 * We don't want to call the ordered free functions with
244 * the lock held.
245 */
246 work->ordered_free(work);
247 /* NB: work must not be dereferenced past this point. */
248 trace_btrfs_all_work_done(wq->fs_info, work);
249 }
250 }
251 spin_unlock_irqrestore(lock, flags);
252
253 if (free_self) {
254 self->ordered_free(self);
255 /* NB: self must not be dereferenced past this point. */
256 trace_btrfs_all_work_done(wq->fs_info, self);
257 }
258}
259
260static void btrfs_work_helper(struct work_struct *normal_work)
261{
262 struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
263 normal_work);
264 struct btrfs_workqueue *wq = work->wq;
265 int need_order = 0;
266
267 /*
268 * We should not touch things inside work in the following cases:
269 * 1) after work->func() if it has no ordered_free
270 * Since the struct is freed in work->func().
271 * 2) after setting WORK_DONE_BIT
272 * The work may be freed in other threads almost instantly.
273 * So we save the needed things here.
274 */
275 if (work->ordered_func)
276 need_order = 1;
277
278 trace_btrfs_work_sched(work);
279 thresh_exec_hook(wq);
280 work->func(work);
281 if (need_order) {
282 /*
283 * Ensures all memory accesses done in the work function are
284 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
285 * which is going to executed the ordered work sees them.
286 * Pairs with the smp_rmb in run_ordered_work.
287 */
288 smp_mb__before_atomic();
289 set_bit(WORK_DONE_BIT, &work->flags);
290 run_ordered_work(wq, work);
291 } else {
292 /* NB: work must not be dereferenced past this point. */
293 trace_btrfs_all_work_done(wq->fs_info, work);
294 }
295}
296
297void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
298 btrfs_func_t ordered_func, btrfs_func_t ordered_free)
299{
300 work->func = func;
301 work->ordered_func = ordered_func;
302 work->ordered_free = ordered_free;
303 INIT_WORK(&work->normal_work, btrfs_work_helper);
304 INIT_LIST_HEAD(&work->ordered_list);
305 work->flags = 0;
306}
307
308void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
309{
310 unsigned long flags;
311
312 work->wq = wq;
313 thresh_queue_hook(wq);
314 if (work->ordered_func) {
315 spin_lock_irqsave(&wq->list_lock, flags);
316 list_add_tail(&work->ordered_list, &wq->ordered_list);
317 spin_unlock_irqrestore(&wq->list_lock, flags);
318 }
319 trace_btrfs_work_queued(work);
320 queue_work(wq->normal_wq, &work->normal_work);
321}
322
323void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
324{
325 if (!wq)
326 return;
327 destroy_workqueue(wq->normal_wq);
328 trace_btrfs_workqueue_destroy(wq);
329 kfree(wq);
330}
331
332void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
333{
334 if (wq)
335 wq->limit_active = limit_active;
336}
337
338void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
339{
340 flush_workqueue(wq->normal_wq);
341}
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/spinlock.h>
24#include <linux/freezer.h>
25#include <linux/workqueue.h>
26#include "async-thread.h"
27#include "ctree.h"
28
29#define WORK_DONE_BIT 0
30#define WORK_ORDER_DONE_BIT 1
31#define WORK_HIGH_PRIO_BIT 2
32
33#define NO_THRESHOLD (-1)
34#define DFT_THRESHOLD (32)
35
36struct __btrfs_workqueue {
37 struct workqueue_struct *normal_wq;
38 /* List head pointing to ordered work list */
39 struct list_head ordered_list;
40
41 /* Spinlock for ordered_list */
42 spinlock_t list_lock;
43
44 /* Thresholding related variants */
45 atomic_t pending;
46 int max_active;
47 int current_max;
48 int thresh;
49 unsigned int count;
50 spinlock_t thres_lock;
51};
52
53struct btrfs_workqueue {
54 struct __btrfs_workqueue *normal;
55 struct __btrfs_workqueue *high;
56};
57
58static inline struct __btrfs_workqueue
59*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
60 int thresh)
61{
62 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
63
64 if (unlikely(!ret))
65 return NULL;
66
67 ret->max_active = max_active;
68 atomic_set(&ret->pending, 0);
69 if (thresh == 0)
70 thresh = DFT_THRESHOLD;
71 /* For low threshold, disabling threshold is a better choice */
72 if (thresh < DFT_THRESHOLD) {
73 ret->current_max = max_active;
74 ret->thresh = NO_THRESHOLD;
75 } else {
76 ret->current_max = 1;
77 ret->thresh = thresh;
78 }
79
80 if (flags & WQ_HIGHPRI)
81 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
82 ret->max_active,
83 "btrfs", name);
84 else
85 ret->normal_wq = alloc_workqueue("%s-%s", flags,
86 ret->max_active, "btrfs",
87 name);
88 if (unlikely(!ret->normal_wq)) {
89 kfree(ret);
90 return NULL;
91 }
92
93 INIT_LIST_HEAD(&ret->ordered_list);
94 spin_lock_init(&ret->list_lock);
95 spin_lock_init(&ret->thres_lock);
96 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
97 return ret;
98}
99
100static inline void
101__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
102
103struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
104 int flags,
105 int max_active,
106 int thresh)
107{
108 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
109
110 if (unlikely(!ret))
111 return NULL;
112
113 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
114 max_active, thresh);
115 if (unlikely(!ret->normal)) {
116 kfree(ret);
117 return NULL;
118 }
119
120 if (flags & WQ_HIGHPRI) {
121 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
122 thresh);
123 if (unlikely(!ret->high)) {
124 __btrfs_destroy_workqueue(ret->normal);
125 kfree(ret);
126 return NULL;
127 }
128 }
129 return ret;
130}
131
132/*
133 * Hook for threshold which will be called in btrfs_queue_work.
134 * This hook WILL be called in IRQ handler context,
135 * so workqueue_set_max_active MUST NOT be called in this hook
136 */
137static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
138{
139 if (wq->thresh == NO_THRESHOLD)
140 return;
141 atomic_inc(&wq->pending);
142}
143
144/*
145 * Hook for threshold which will be called before executing the work,
146 * This hook is called in kthread content.
147 * So workqueue_set_max_active is called here.
148 */
149static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
150{
151 int new_max_active;
152 long pending;
153 int need_change = 0;
154
155 if (wq->thresh == NO_THRESHOLD)
156 return;
157
158 atomic_dec(&wq->pending);
159 spin_lock(&wq->thres_lock);
160 /*
161 * Use wq->count to limit the calling frequency of
162 * workqueue_set_max_active.
163 */
164 wq->count++;
165 wq->count %= (wq->thresh / 4);
166 if (!wq->count)
167 goto out;
168 new_max_active = wq->current_max;
169
170 /*
171 * pending may be changed later, but it's OK since we really
172 * don't need it so accurate to calculate new_max_active.
173 */
174 pending = atomic_read(&wq->pending);
175 if (pending > wq->thresh)
176 new_max_active++;
177 if (pending < wq->thresh / 2)
178 new_max_active--;
179 new_max_active = clamp_val(new_max_active, 1, wq->max_active);
180 if (new_max_active != wq->current_max) {
181 need_change = 1;
182 wq->current_max = new_max_active;
183 }
184out:
185 spin_unlock(&wq->thres_lock);
186
187 if (need_change) {
188 workqueue_set_max_active(wq->normal_wq, wq->current_max);
189 }
190}
191
192static void run_ordered_work(struct __btrfs_workqueue *wq)
193{
194 struct list_head *list = &wq->ordered_list;
195 struct btrfs_work *work;
196 spinlock_t *lock = &wq->list_lock;
197 unsigned long flags;
198
199 while (1) {
200 spin_lock_irqsave(lock, flags);
201 if (list_empty(list))
202 break;
203 work = list_entry(list->next, struct btrfs_work,
204 ordered_list);
205 if (!test_bit(WORK_DONE_BIT, &work->flags))
206 break;
207
208 /*
209 * we are going to call the ordered done function, but
210 * we leave the work item on the list as a barrier so
211 * that later work items that are done don't have their
212 * functions called before this one returns
213 */
214 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
215 break;
216 trace_btrfs_ordered_sched(work);
217 spin_unlock_irqrestore(lock, flags);
218 work->ordered_func(work);
219
220 /* now take the lock again and drop our item from the list */
221 spin_lock_irqsave(lock, flags);
222 list_del(&work->ordered_list);
223 spin_unlock_irqrestore(lock, flags);
224
225 /*
226 * we don't want to call the ordered free functions
227 * with the lock held though
228 */
229 work->ordered_free(work);
230 trace_btrfs_all_work_done(work);
231 }
232 spin_unlock_irqrestore(lock, flags);
233}
234
235static void normal_work_helper(struct work_struct *arg)
236{
237 struct btrfs_work *work;
238 struct __btrfs_workqueue *wq;
239 int need_order = 0;
240
241 work = container_of(arg, struct btrfs_work, normal_work);
242 /*
243 * We should not touch things inside work in the following cases:
244 * 1) after work->func() if it has no ordered_free
245 * Since the struct is freed in work->func().
246 * 2) after setting WORK_DONE_BIT
247 * The work may be freed in other threads almost instantly.
248 * So we save the needed things here.
249 */
250 if (work->ordered_func)
251 need_order = 1;
252 wq = work->wq;
253
254 trace_btrfs_work_sched(work);
255 thresh_exec_hook(wq);
256 work->func(work);
257 if (need_order) {
258 set_bit(WORK_DONE_BIT, &work->flags);
259 run_ordered_work(wq);
260 }
261 if (!need_order)
262 trace_btrfs_all_work_done(work);
263}
264
265void btrfs_init_work(struct btrfs_work *work,
266 btrfs_func_t func,
267 btrfs_func_t ordered_func,
268 btrfs_func_t ordered_free)
269{
270 work->func = func;
271 work->ordered_func = ordered_func;
272 work->ordered_free = ordered_free;
273 INIT_WORK(&work->normal_work, normal_work_helper);
274 INIT_LIST_HEAD(&work->ordered_list);
275 work->flags = 0;
276}
277
278static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
279 struct btrfs_work *work)
280{
281 unsigned long flags;
282
283 work->wq = wq;
284 thresh_queue_hook(wq);
285 if (work->ordered_func) {
286 spin_lock_irqsave(&wq->list_lock, flags);
287 list_add_tail(&work->ordered_list, &wq->ordered_list);
288 spin_unlock_irqrestore(&wq->list_lock, flags);
289 }
290 queue_work(wq->normal_wq, &work->normal_work);
291 trace_btrfs_work_queued(work);
292}
293
294void btrfs_queue_work(struct btrfs_workqueue *wq,
295 struct btrfs_work *work)
296{
297 struct __btrfs_workqueue *dest_wq;
298
299 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
300 dest_wq = wq->high;
301 else
302 dest_wq = wq->normal;
303 __btrfs_queue_work(dest_wq, work);
304}
305
306static inline void
307__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
308{
309 destroy_workqueue(wq->normal_wq);
310 trace_btrfs_workqueue_destroy(wq);
311 kfree(wq);
312}
313
314void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
315{
316 if (!wq)
317 return;
318 if (wq->high)
319 __btrfs_destroy_workqueue(wq->high);
320 __btrfs_destroy_workqueue(wq->normal);
321 kfree(wq);
322}
323
324void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
325{
326 if (!wq)
327 return;
328 wq->normal->max_active = max;
329 if (wq->high)
330 wq->high->max_active = max;
331}
332
333void btrfs_set_work_high_priority(struct btrfs_work *work)
334{
335 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
336}