Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/jiffies.h>
4#include <linux/kernel.h>
5#include <linux/ktime.h>
6#include <linux/list.h>
7#include <linux/math64.h>
8#include <linux/sizes.h>
9#include <linux/workqueue.h>
10#include "ctree.h"
11#include "block-group.h"
12#include "discard.h"
13#include "free-space-cache.h"
14#include "fs.h"
15
16/*
17 * This contains the logic to handle async discard.
18 *
19 * Async discard manages trimming of free space outside of transaction commit.
20 * Discarding is done by managing the block_groups on a LRU list based on free
21 * space recency. Two passes are used to first prioritize discarding extents
22 * and then allow for trimming in the bitmap the best opportunity to coalesce.
23 * The block_groups are maintained on multiple lists to allow for multiple
24 * passes with different discard filter requirements. A delayed work item is
25 * used to manage discarding with timeout determined by a max of the delay
26 * incurred by the iops rate limit, the byte rate limit, and the max delay of
27 * BTRFS_DISCARD_MAX_DELAY.
28 *
29 * Note, this only keeps track of block_groups that are explicitly for data.
30 * Mixed block_groups are not supported.
31 *
32 * The first list is special to manage discarding of fully free block groups.
33 * This is necessary because we issue a final trim for a full free block group
34 * after forgetting it. When a block group becomes unused, instead of directly
35 * being added to the unused_bgs list, we add it to this first list. Then
36 * from there, if it becomes fully discarded, we place it onto the unused_bgs
37 * list.
38 *
39 * The in-memory free space cache serves as the backing state for discard.
40 * Consequently this means there is no persistence. We opt to load all the
41 * block groups in as not discarded, so the mount case degenerates to the
42 * crashing case.
43 *
44 * As the free space cache uses bitmaps, there exists a tradeoff between
45 * ease/efficiency for find_free_extent() and the accuracy of discard state.
46 * Here we opt to let untrimmed regions merge with everything while only letting
47 * trimmed regions merge with other trimmed regions. This can cause
48 * overtrimming, but the coalescing benefit seems to be worth it. Additionally,
49 * bitmap state is tracked as a whole. If we're able to fully trim a bitmap,
50 * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in,
51 * this resets the state and we will retry trimming the whole bitmap. This is a
52 * tradeoff between discard state accuracy and the cost of accounting.
53 */
54
55/* This is an initial delay to give some chance for block reuse */
56#define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
57#define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
58
59/* Target completion latency of discarding all discardable extents */
60#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
61#define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
62#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
63#define BTRFS_DISCARD_MAX_IOPS (10U)
64
65/* Monotonically decreasing minimum length filters after index 0 */
66static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
67 0,
68 BTRFS_ASYNC_DISCARD_MAX_FILTER,
69 BTRFS_ASYNC_DISCARD_MIN_FILTER
70};
71
72static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
73 struct btrfs_block_group *block_group)
74{
75 return &discard_ctl->discard_list[block_group->discard_index];
76}
77
78static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
79 struct btrfs_block_group *block_group)
80{
81 if (!btrfs_run_discard_work(discard_ctl))
82 return;
83
84 if (list_empty(&block_group->discard_list) ||
85 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
86 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED)
87 block_group->discard_index = BTRFS_DISCARD_INDEX_START;
88 block_group->discard_eligible_time = (ktime_get_ns() +
89 BTRFS_DISCARD_DELAY);
90 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
91 }
92
93 list_move_tail(&block_group->discard_list,
94 get_discard_list(discard_ctl, block_group));
95}
96
97static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
98 struct btrfs_block_group *block_group)
99{
100 if (!btrfs_is_block_group_data_only(block_group))
101 return;
102
103 spin_lock(&discard_ctl->lock);
104 __add_to_discard_list(discard_ctl, block_group);
105 spin_unlock(&discard_ctl->lock);
106}
107
108static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
109 struct btrfs_block_group *block_group)
110{
111 spin_lock(&discard_ctl->lock);
112
113 if (!btrfs_run_discard_work(discard_ctl)) {
114 spin_unlock(&discard_ctl->lock);
115 return;
116 }
117
118 list_del_init(&block_group->discard_list);
119
120 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
121 block_group->discard_eligible_time = (ktime_get_ns() +
122 BTRFS_DISCARD_UNUSED_DELAY);
123 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
124 list_add_tail(&block_group->discard_list,
125 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
126
127 spin_unlock(&discard_ctl->lock);
128}
129
130static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
131 struct btrfs_block_group *block_group)
132{
133 bool running = false;
134
135 spin_lock(&discard_ctl->lock);
136
137 if (block_group == discard_ctl->block_group) {
138 running = true;
139 discard_ctl->block_group = NULL;
140 }
141
142 block_group->discard_eligible_time = 0;
143 list_del_init(&block_group->discard_list);
144
145 spin_unlock(&discard_ctl->lock);
146
147 return running;
148}
149
150/*
151 * Find block_group that's up next for discarding.
152 *
153 * @discard_ctl: discard control
154 * @now: current time
155 *
156 * Iterate over the discard lists to find the next block_group up for
157 * discarding checking the discard_eligible_time of block_group.
158 */
159static struct btrfs_block_group *find_next_block_group(
160 struct btrfs_discard_ctl *discard_ctl,
161 u64 now)
162{
163 struct btrfs_block_group *ret_block_group = NULL, *block_group;
164 int i;
165
166 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
167 struct list_head *discard_list = &discard_ctl->discard_list[i];
168
169 if (!list_empty(discard_list)) {
170 block_group = list_first_entry(discard_list,
171 struct btrfs_block_group,
172 discard_list);
173
174 if (!ret_block_group)
175 ret_block_group = block_group;
176
177 if (ret_block_group->discard_eligible_time < now)
178 break;
179
180 if (ret_block_group->discard_eligible_time >
181 block_group->discard_eligible_time)
182 ret_block_group = block_group;
183 }
184 }
185
186 return ret_block_group;
187}
188
189/*
190 * Look up next block group and set it for use.
191 *
192 * @discard_ctl: discard control
193 * @discard_state: the discard_state of the block_group after state management
194 * @discard_index: the discard_index of the block_group after state management
195 * @now: time when discard was invoked, in ns
196 *
197 * Wrap find_next_block_group() and set the block_group to be in use.
198 * @discard_state's control flow is managed here. Variables related to
199 * @discard_state are reset here as needed (eg. @discard_cursor). @discard_state
200 * and @discard_index are remembered as it may change while we're discarding,
201 * but we want the discard to execute in the context determined here.
202 */
203static struct btrfs_block_group *peek_discard_list(
204 struct btrfs_discard_ctl *discard_ctl,
205 enum btrfs_discard_state *discard_state,
206 int *discard_index, u64 now)
207{
208 struct btrfs_block_group *block_group;
209
210 spin_lock(&discard_ctl->lock);
211again:
212 block_group = find_next_block_group(discard_ctl, now);
213
214 if (block_group && now >= block_group->discard_eligible_time) {
215 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
216 block_group->used != 0) {
217 if (btrfs_is_block_group_data_only(block_group))
218 __add_to_discard_list(discard_ctl, block_group);
219 else
220 list_del_init(&block_group->discard_list);
221 goto again;
222 }
223 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
224 block_group->discard_cursor = block_group->start;
225 block_group->discard_state = BTRFS_DISCARD_EXTENTS;
226 }
227 discard_ctl->block_group = block_group;
228 }
229 if (block_group) {
230 *discard_state = block_group->discard_state;
231 *discard_index = block_group->discard_index;
232 }
233 spin_unlock(&discard_ctl->lock);
234
235 return block_group;
236}
237
238/*
239 * Update a block group's filters.
240 *
241 * @block_group: block group of interest
242 * @bytes: recently freed region size after coalescing
243 *
244 * Async discard maintains multiple lists with progressively smaller filters
245 * to prioritize discarding based on size. Should a free space that matches
246 * a larger filter be returned to the free_space_cache, prioritize that discard
247 * by moving @block_group to the proper filter.
248 */
249void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
250 u64 bytes)
251{
252 struct btrfs_discard_ctl *discard_ctl;
253
254 if (!block_group ||
255 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
256 return;
257
258 discard_ctl = &block_group->fs_info->discard_ctl;
259
260 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
261 bytes >= discard_minlen[block_group->discard_index - 1]) {
262 int i;
263
264 remove_from_discard_list(discard_ctl, block_group);
265
266 for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
267 i++) {
268 if (bytes >= discard_minlen[i]) {
269 block_group->discard_index = i;
270 add_to_discard_list(discard_ctl, block_group);
271 break;
272 }
273 }
274 }
275}
276
277/*
278 * Move a block group along the discard lists.
279 *
280 * @discard_ctl: discard control
281 * @block_group: block_group of interest
282 *
283 * Increment @block_group's discard_index. If it falls of the list, let it be.
284 * Otherwise add it back to the appropriate list.
285 */
286static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
287 struct btrfs_block_group *block_group)
288{
289 block_group->discard_index++;
290 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
291 block_group->discard_index = 1;
292 return;
293 }
294
295 add_to_discard_list(discard_ctl, block_group);
296}
297
298/*
299 * Remove a block_group from the discard lists.
300 *
301 * @discard_ctl: discard control
302 * @block_group: block_group of interest
303 *
304 * Remove @block_group from the discard lists. If necessary, wait on the
305 * current work and then reschedule the delayed work.
306 */
307void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
308 struct btrfs_block_group *block_group)
309{
310 if (remove_from_discard_list(discard_ctl, block_group)) {
311 cancel_delayed_work_sync(&discard_ctl->work);
312 btrfs_discard_schedule_work(discard_ctl, true);
313 }
314}
315
316/*
317 * Handles queuing the block_groups.
318 *
319 * @discard_ctl: discard control
320 * @block_group: block_group of interest
321 *
322 * Maintain the LRU order of the discard lists.
323 */
324void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
325 struct btrfs_block_group *block_group)
326{
327 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
328 return;
329
330 if (block_group->used == 0)
331 add_to_discard_unused_list(discard_ctl, block_group);
332 else
333 add_to_discard_list(discard_ctl, block_group);
334
335 if (!delayed_work_pending(&discard_ctl->work))
336 btrfs_discard_schedule_work(discard_ctl, false);
337}
338
339static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
340 u64 now, bool override)
341{
342 struct btrfs_block_group *block_group;
343
344 if (!btrfs_run_discard_work(discard_ctl))
345 return;
346 if (!override && delayed_work_pending(&discard_ctl->work))
347 return;
348
349 block_group = find_next_block_group(discard_ctl, now);
350 if (block_group) {
351 u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC;
352 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
353
354 /*
355 * A single delayed workqueue item is responsible for
356 * discarding, so we can manage the bytes rate limit by keeping
357 * track of the previous discard.
358 */
359 if (kbps_limit && discard_ctl->prev_discard) {
360 u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
361 u64 bps_delay = div64_u64(discard_ctl->prev_discard *
362 NSEC_PER_SEC, bps_limit);
363
364 delay = max(delay, bps_delay);
365 }
366
367 /*
368 * This timeout is to hopefully prevent immediate discarding
369 * in a recently allocated block group.
370 */
371 if (now < block_group->discard_eligible_time) {
372 u64 bg_timeout = block_group->discard_eligible_time - now;
373
374 delay = max(delay, bg_timeout);
375 }
376
377 if (override && discard_ctl->prev_discard) {
378 u64 elapsed = now - discard_ctl->prev_discard_time;
379
380 if (delay > elapsed)
381 delay -= elapsed;
382 else
383 delay = 0;
384 }
385
386 mod_delayed_work(discard_ctl->discard_workers,
387 &discard_ctl->work, nsecs_to_jiffies(delay));
388 }
389}
390
391/*
392 * Responsible for scheduling the discard work.
393 *
394 * @discard_ctl: discard control
395 * @override: override the current timer
396 *
397 * Discards are issued by a delayed workqueue item. @override is used to
398 * update the current delay as the baseline delay interval is reevaluated on
399 * transaction commit. This is also maxed with any other rate limit.
400 */
401void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
402 bool override)
403{
404 const u64 now = ktime_get_ns();
405
406 spin_lock(&discard_ctl->lock);
407 __btrfs_discard_schedule_work(discard_ctl, now, override);
408 spin_unlock(&discard_ctl->lock);
409}
410
411/*
412 * Determine next step of a block_group.
413 *
414 * @discard_ctl: discard control
415 * @block_group: block_group of interest
416 *
417 * Determine the next step for a block group after it's finished going through
418 * a pass on a discard list. If it is unused and fully trimmed, we can mark it
419 * unused and send it to the unused_bgs path. Otherwise, pass it onto the
420 * appropriate filter list or let it fall off.
421 */
422static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
423 struct btrfs_block_group *block_group)
424{
425 remove_from_discard_list(discard_ctl, block_group);
426
427 if (block_group->used == 0) {
428 if (btrfs_is_free_space_trimmed(block_group))
429 btrfs_mark_bg_unused(block_group);
430 else
431 add_to_discard_unused_list(discard_ctl, block_group);
432 } else {
433 btrfs_update_discard_index(discard_ctl, block_group);
434 }
435}
436
437/*
438 * Discard work queue callback
439 *
440 * @work: work
441 *
442 * Find the next block_group to start discarding and then discard a single
443 * region. It does this in a two-pass fashion: first extents and second
444 * bitmaps. Completely discarded block groups are sent to the unused_bgs path.
445 */
446static void btrfs_discard_workfn(struct work_struct *work)
447{
448 struct btrfs_discard_ctl *discard_ctl;
449 struct btrfs_block_group *block_group;
450 enum btrfs_discard_state discard_state;
451 int discard_index = 0;
452 u64 trimmed = 0;
453 u64 minlen = 0;
454 u64 now = ktime_get_ns();
455
456 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
457
458 block_group = peek_discard_list(discard_ctl, &discard_state,
459 &discard_index, now);
460 if (!block_group || !btrfs_run_discard_work(discard_ctl))
461 return;
462 if (now < block_group->discard_eligible_time) {
463 btrfs_discard_schedule_work(discard_ctl, false);
464 return;
465 }
466
467 /* Perform discarding */
468 minlen = discard_minlen[discard_index];
469
470 if (discard_state == BTRFS_DISCARD_BITMAPS) {
471 u64 maxlen = 0;
472
473 /*
474 * Use the previous levels minimum discard length as the max
475 * length filter. In the case something is added to make a
476 * region go beyond the max filter, the entire bitmap is set
477 * back to BTRFS_TRIM_STATE_UNTRIMMED.
478 */
479 if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
480 maxlen = discard_minlen[discard_index - 1];
481
482 btrfs_trim_block_group_bitmaps(block_group, &trimmed,
483 block_group->discard_cursor,
484 btrfs_block_group_end(block_group),
485 minlen, maxlen, true);
486 discard_ctl->discard_bitmap_bytes += trimmed;
487 } else {
488 btrfs_trim_block_group_extents(block_group, &trimmed,
489 block_group->discard_cursor,
490 btrfs_block_group_end(block_group),
491 minlen, true);
492 discard_ctl->discard_extent_bytes += trimmed;
493 }
494
495 /* Determine next steps for a block_group */
496 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
497 if (discard_state == BTRFS_DISCARD_BITMAPS) {
498 btrfs_finish_discard_pass(discard_ctl, block_group);
499 } else {
500 block_group->discard_cursor = block_group->start;
501 spin_lock(&discard_ctl->lock);
502 if (block_group->discard_state !=
503 BTRFS_DISCARD_RESET_CURSOR)
504 block_group->discard_state =
505 BTRFS_DISCARD_BITMAPS;
506 spin_unlock(&discard_ctl->lock);
507 }
508 }
509
510 now = ktime_get_ns();
511 spin_lock(&discard_ctl->lock);
512 discard_ctl->prev_discard = trimmed;
513 discard_ctl->prev_discard_time = now;
514 discard_ctl->block_group = NULL;
515 __btrfs_discard_schedule_work(discard_ctl, now, false);
516 spin_unlock(&discard_ctl->lock);
517}
518
519/*
520 * Determine if async discard should be running.
521 *
522 * @discard_ctl: discard control
523 *
524 * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
525 */
526bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
527{
528 struct btrfs_fs_info *fs_info = container_of(discard_ctl,
529 struct btrfs_fs_info,
530 discard_ctl);
531
532 return (!(fs_info->sb->s_flags & SB_RDONLY) &&
533 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
534}
535
536/*
537 * Recalculate the base delay.
538 *
539 * @discard_ctl: discard control
540 *
541 * Recalculate the base delay which is based off the total number of
542 * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
543 * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
544 */
545void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
546{
547 s32 discardable_extents;
548 s64 discardable_bytes;
549 u32 iops_limit;
550 unsigned long delay;
551
552 discardable_extents = atomic_read(&discard_ctl->discardable_extents);
553 if (!discardable_extents)
554 return;
555
556 spin_lock(&discard_ctl->lock);
557
558 /*
559 * The following is to fix a potential -1 discrepancy that we're not
560 * sure how to reproduce. But given that this is the only place that
561 * utilizes these numbers and this is only called by from
562 * btrfs_finish_extent_commit() which is synchronized, we can correct
563 * here.
564 */
565 if (discardable_extents < 0)
566 atomic_add(-discardable_extents,
567 &discard_ctl->discardable_extents);
568
569 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes);
570 if (discardable_bytes < 0)
571 atomic64_add(-discardable_bytes,
572 &discard_ctl->discardable_bytes);
573
574 if (discardable_extents <= 0) {
575 spin_unlock(&discard_ctl->lock);
576 return;
577 }
578
579 iops_limit = READ_ONCE(discard_ctl->iops_limit);
580 if (iops_limit)
581 delay = MSEC_PER_SEC / iops_limit;
582 else
583 delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
584
585 delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
586 BTRFS_DISCARD_MAX_DELAY_MSEC);
587 discard_ctl->delay_ms = delay;
588
589 spin_unlock(&discard_ctl->lock);
590}
591
592/*
593 * Propagate discard counters.
594 *
595 * @block_group: block_group of interest
596 *
597 * Propagate deltas of counters up to the discard_ctl. It maintains a current
598 * counter and a previous counter passing the delta up to the global stat.
599 * Then the current counter value becomes the previous counter value.
600 */
601void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
602{
603 struct btrfs_free_space_ctl *ctl;
604 struct btrfs_discard_ctl *discard_ctl;
605 s32 extents_delta;
606 s64 bytes_delta;
607
608 if (!block_group ||
609 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
610 !btrfs_is_block_group_data_only(block_group))
611 return;
612
613 ctl = block_group->free_space_ctl;
614 discard_ctl = &block_group->fs_info->discard_ctl;
615
616 lockdep_assert_held(&ctl->tree_lock);
617 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
618 ctl->discardable_extents[BTRFS_STAT_PREV];
619 if (extents_delta) {
620 atomic_add(extents_delta, &discard_ctl->discardable_extents);
621 ctl->discardable_extents[BTRFS_STAT_PREV] =
622 ctl->discardable_extents[BTRFS_STAT_CURR];
623 }
624
625 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
626 ctl->discardable_bytes[BTRFS_STAT_PREV];
627 if (bytes_delta) {
628 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
629 ctl->discardable_bytes[BTRFS_STAT_PREV] =
630 ctl->discardable_bytes[BTRFS_STAT_CURR];
631 }
632}
633
634/*
635 * Punt unused_bgs list to discard lists.
636 *
637 * @fs_info: fs_info of interest
638 *
639 * The unused_bgs list needs to be punted to the discard lists because the
640 * order of operations is changed. In the normal synchronous discard path, the
641 * block groups are trimmed via a single large trim in transaction commit. This
642 * is ultimately what we are trying to avoid with asynchronous discard. Thus,
643 * it must be done before going down the unused_bgs path.
644 */
645void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
646{
647 struct btrfs_block_group *block_group, *next;
648
649 spin_lock(&fs_info->unused_bgs_lock);
650 /* We enabled async discard, so punt all to the queue */
651 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
652 bg_list) {
653 list_del_init(&block_group->bg_list);
654 btrfs_put_block_group(block_group);
655 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
656 }
657 spin_unlock(&fs_info->unused_bgs_lock);
658}
659
660/*
661 * Purge discard lists.
662 *
663 * @discard_ctl: discard control
664 *
665 * If we are disabling async discard, we may have intercepted block groups that
666 * are completely free and ready for the unused_bgs path. As discarding will
667 * now happen in transaction commit or not at all, we can safely mark the
668 * corresponding block groups as unused and they will be sent on their merry
669 * way to the unused_bgs list.
670 */
671static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
672{
673 struct btrfs_block_group *block_group, *next;
674 int i;
675
676 spin_lock(&discard_ctl->lock);
677 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
678 list_for_each_entry_safe(block_group, next,
679 &discard_ctl->discard_list[i],
680 discard_list) {
681 list_del_init(&block_group->discard_list);
682 spin_unlock(&discard_ctl->lock);
683 if (block_group->used == 0)
684 btrfs_mark_bg_unused(block_group);
685 spin_lock(&discard_ctl->lock);
686 }
687 }
688 spin_unlock(&discard_ctl->lock);
689}
690
691void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
692{
693 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
694 btrfs_discard_cleanup(fs_info);
695 return;
696 }
697
698 btrfs_discard_punt_unused_bgs_list(fs_info);
699
700 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
701}
702
703void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
704{
705 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
706}
707
708void btrfs_discard_init(struct btrfs_fs_info *fs_info)
709{
710 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
711 int i;
712
713 spin_lock_init(&discard_ctl->lock);
714 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
715
716 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
717 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
718
719 discard_ctl->prev_discard = 0;
720 discard_ctl->prev_discard_time = 0;
721 atomic_set(&discard_ctl->discardable_extents, 0);
722 atomic64_set(&discard_ctl->discardable_bytes, 0);
723 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
724 discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC;
725 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
726 discard_ctl->kbps_limit = 0;
727 discard_ctl->discard_extent_bytes = 0;
728 discard_ctl->discard_bitmap_bytes = 0;
729 atomic64_set(&discard_ctl->discard_bytes_saved, 0);
730}
731
732void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
733{
734 btrfs_discard_stop(fs_info);
735 cancel_delayed_work_sync(&fs_info->discard_ctl.work);
736 btrfs_discard_purge_list(&fs_info->discard_ctl);
737}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/jiffies.h>
4#include <linux/kernel.h>
5#include <linux/ktime.h>
6#include <linux/list.h>
7#include <linux/math64.h>
8#include <linux/sizes.h>
9#include <linux/workqueue.h>
10#include "ctree.h"
11#include "block-group.h"
12#include "discard.h"
13#include "free-space-cache.h"
14
15/*
16 * This contains the logic to handle async discard.
17 *
18 * Async discard manages trimming of free space outside of transaction commit.
19 * Discarding is done by managing the block_groups on a LRU list based on free
20 * space recency. Two passes are used to first prioritize discarding extents
21 * and then allow for trimming in the bitmap the best opportunity to coalesce.
22 * The block_groups are maintained on multiple lists to allow for multiple
23 * passes with different discard filter requirements. A delayed work item is
24 * used to manage discarding with timeout determined by a max of the delay
25 * incurred by the iops rate limit, the byte rate limit, and the max delay of
26 * BTRFS_DISCARD_MAX_DELAY.
27 *
28 * Note, this only keeps track of block_groups that are explicitly for data.
29 * Mixed block_groups are not supported.
30 *
31 * The first list is special to manage discarding of fully free block groups.
32 * This is necessary because we issue a final trim for a full free block group
33 * after forgetting it. When a block group becomes unused, instead of directly
34 * being added to the unused_bgs list, we add it to this first list. Then
35 * from there, if it becomes fully discarded, we place it onto the unused_bgs
36 * list.
37 *
38 * The in-memory free space cache serves as the backing state for discard.
39 * Consequently this means there is no persistence. We opt to load all the
40 * block groups in as not discarded, so the mount case degenerates to the
41 * crashing case.
42 *
43 * As the free space cache uses bitmaps, there exists a tradeoff between
44 * ease/efficiency for find_free_extent() and the accuracy of discard state.
45 * Here we opt to let untrimmed regions merge with everything while only letting
46 * trimmed regions merge with other trimmed regions. This can cause
47 * overtrimming, but the coalescing benefit seems to be worth it. Additionally,
48 * bitmap state is tracked as a whole. If we're able to fully trim a bitmap,
49 * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in,
50 * this resets the state and we will retry trimming the whole bitmap. This is a
51 * tradeoff between discard state accuracy and the cost of accounting.
52 */
53
54/* This is an initial delay to give some chance for block reuse */
55#define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
56#define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
57
58/* Target completion latency of discarding all discardable extents */
59#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
60#define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
61#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
62#define BTRFS_DISCARD_MAX_IOPS (10U)
63
64/* Montonically decreasing minimum length filters after index 0 */
65static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
66 0,
67 BTRFS_ASYNC_DISCARD_MAX_FILTER,
68 BTRFS_ASYNC_DISCARD_MIN_FILTER
69};
70
71static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
72 struct btrfs_block_group *block_group)
73{
74 return &discard_ctl->discard_list[block_group->discard_index];
75}
76
77static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
78 struct btrfs_block_group *block_group)
79{
80 if (!btrfs_run_discard_work(discard_ctl))
81 return;
82
83 if (list_empty(&block_group->discard_list) ||
84 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
85 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED)
86 block_group->discard_index = BTRFS_DISCARD_INDEX_START;
87 block_group->discard_eligible_time = (ktime_get_ns() +
88 BTRFS_DISCARD_DELAY);
89 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
90 }
91
92 list_move_tail(&block_group->discard_list,
93 get_discard_list(discard_ctl, block_group));
94}
95
96static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
97 struct btrfs_block_group *block_group)
98{
99 if (!btrfs_is_block_group_data_only(block_group))
100 return;
101
102 spin_lock(&discard_ctl->lock);
103 __add_to_discard_list(discard_ctl, block_group);
104 spin_unlock(&discard_ctl->lock);
105}
106
107static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
108 struct btrfs_block_group *block_group)
109{
110 spin_lock(&discard_ctl->lock);
111
112 if (!btrfs_run_discard_work(discard_ctl)) {
113 spin_unlock(&discard_ctl->lock);
114 return;
115 }
116
117 list_del_init(&block_group->discard_list);
118
119 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
120 block_group->discard_eligible_time = (ktime_get_ns() +
121 BTRFS_DISCARD_UNUSED_DELAY);
122 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
123 list_add_tail(&block_group->discard_list,
124 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
125
126 spin_unlock(&discard_ctl->lock);
127}
128
129static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
130 struct btrfs_block_group *block_group)
131{
132 bool running = false;
133
134 spin_lock(&discard_ctl->lock);
135
136 if (block_group == discard_ctl->block_group) {
137 running = true;
138 discard_ctl->block_group = NULL;
139 }
140
141 block_group->discard_eligible_time = 0;
142 list_del_init(&block_group->discard_list);
143
144 spin_unlock(&discard_ctl->lock);
145
146 return running;
147}
148
149/**
150 * find_next_block_group - find block_group that's up next for discarding
151 * @discard_ctl: discard control
152 * @now: current time
153 *
154 * Iterate over the discard lists to find the next block_group up for
155 * discarding checking the discard_eligible_time of block_group.
156 */
157static struct btrfs_block_group *find_next_block_group(
158 struct btrfs_discard_ctl *discard_ctl,
159 u64 now)
160{
161 struct btrfs_block_group *ret_block_group = NULL, *block_group;
162 int i;
163
164 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
165 struct list_head *discard_list = &discard_ctl->discard_list[i];
166
167 if (!list_empty(discard_list)) {
168 block_group = list_first_entry(discard_list,
169 struct btrfs_block_group,
170 discard_list);
171
172 if (!ret_block_group)
173 ret_block_group = block_group;
174
175 if (ret_block_group->discard_eligible_time < now)
176 break;
177
178 if (ret_block_group->discard_eligible_time >
179 block_group->discard_eligible_time)
180 ret_block_group = block_group;
181 }
182 }
183
184 return ret_block_group;
185}
186
187/**
188 * peek_discard_list - wrap find_next_block_group()
189 * @discard_ctl: discard control
190 * @discard_state: the discard_state of the block_group after state management
191 * @discard_index: the discard_index of the block_group after state management
192 *
193 * This wraps find_next_block_group() and sets the block_group to be in use.
194 * discard_state's control flow is managed here. Variables related to
195 * discard_state are reset here as needed (eg discard_cursor). @discard_state
196 * and @discard_index are remembered as it may change while we're discarding,
197 * but we want the discard to execute in the context determined here.
198 */
199static struct btrfs_block_group *peek_discard_list(
200 struct btrfs_discard_ctl *discard_ctl,
201 enum btrfs_discard_state *discard_state,
202 int *discard_index)
203{
204 struct btrfs_block_group *block_group;
205 const u64 now = ktime_get_ns();
206
207 spin_lock(&discard_ctl->lock);
208again:
209 block_group = find_next_block_group(discard_ctl, now);
210
211 if (block_group && now > block_group->discard_eligible_time) {
212 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
213 block_group->used != 0) {
214 if (btrfs_is_block_group_data_only(block_group))
215 __add_to_discard_list(discard_ctl, block_group);
216 else
217 list_del_init(&block_group->discard_list);
218 goto again;
219 }
220 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
221 block_group->discard_cursor = block_group->start;
222 block_group->discard_state = BTRFS_DISCARD_EXTENTS;
223 }
224 discard_ctl->block_group = block_group;
225 *discard_state = block_group->discard_state;
226 *discard_index = block_group->discard_index;
227 } else {
228 block_group = NULL;
229 }
230
231 spin_unlock(&discard_ctl->lock);
232
233 return block_group;
234}
235
236/**
237 * btrfs_discard_check_filter - updates a block groups filters
238 * @block_group: block group of interest
239 * @bytes: recently freed region size after coalescing
240 *
241 * Async discard maintains multiple lists with progressively smaller filters
242 * to prioritize discarding based on size. Should a free space that matches
243 * a larger filter be returned to the free_space_cache, prioritize that discard
244 * by moving @block_group to the proper filter.
245 */
246void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
247 u64 bytes)
248{
249 struct btrfs_discard_ctl *discard_ctl;
250
251 if (!block_group ||
252 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
253 return;
254
255 discard_ctl = &block_group->fs_info->discard_ctl;
256
257 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
258 bytes >= discard_minlen[block_group->discard_index - 1]) {
259 int i;
260
261 remove_from_discard_list(discard_ctl, block_group);
262
263 for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
264 i++) {
265 if (bytes >= discard_minlen[i]) {
266 block_group->discard_index = i;
267 add_to_discard_list(discard_ctl, block_group);
268 break;
269 }
270 }
271 }
272}
273
274/**
275 * btrfs_update_discard_index - moves a block group along the discard lists
276 * @discard_ctl: discard control
277 * @block_group: block_group of interest
278 *
279 * Increment @block_group's discard_index. If it falls of the list, let it be.
280 * Otherwise add it back to the appropriate list.
281 */
282static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
283 struct btrfs_block_group *block_group)
284{
285 block_group->discard_index++;
286 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
287 block_group->discard_index = 1;
288 return;
289 }
290
291 add_to_discard_list(discard_ctl, block_group);
292}
293
294/**
295 * btrfs_discard_cancel_work - remove a block_group from the discard lists
296 * @discard_ctl: discard control
297 * @block_group: block_group of interest
298 *
299 * This removes @block_group from the discard lists. If necessary, it waits on
300 * the current work and then reschedules the delayed work.
301 */
302void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
303 struct btrfs_block_group *block_group)
304{
305 if (remove_from_discard_list(discard_ctl, block_group)) {
306 cancel_delayed_work_sync(&discard_ctl->work);
307 btrfs_discard_schedule_work(discard_ctl, true);
308 }
309}
310
311/**
312 * btrfs_discard_queue_work - handles queuing the block_groups
313 * @discard_ctl: discard control
314 * @block_group: block_group of interest
315 *
316 * This maintains the LRU order of the discard lists.
317 */
318void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
319 struct btrfs_block_group *block_group)
320{
321 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
322 return;
323
324 if (block_group->used == 0)
325 add_to_discard_unused_list(discard_ctl, block_group);
326 else
327 add_to_discard_list(discard_ctl, block_group);
328
329 if (!delayed_work_pending(&discard_ctl->work))
330 btrfs_discard_schedule_work(discard_ctl, false);
331}
332
333/**
334 * btrfs_discard_schedule_work - responsible for scheduling the discard work
335 * @discard_ctl: discard control
336 * @override: override the current timer
337 *
338 * Discards are issued by a delayed workqueue item. @override is used to
339 * update the current delay as the baseline delay interval is reevaluated on
340 * transaction commit. This is also maxed with any other rate limit.
341 */
342void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
343 bool override)
344{
345 struct btrfs_block_group *block_group;
346 const u64 now = ktime_get_ns();
347
348 spin_lock(&discard_ctl->lock);
349
350 if (!btrfs_run_discard_work(discard_ctl))
351 goto out;
352
353 if (!override && delayed_work_pending(&discard_ctl->work))
354 goto out;
355
356 block_group = find_next_block_group(discard_ctl, now);
357 if (block_group) {
358 unsigned long delay = discard_ctl->delay;
359 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
360
361 /*
362 * A single delayed workqueue item is responsible for
363 * discarding, so we can manage the bytes rate limit by keeping
364 * track of the previous discard.
365 */
366 if (kbps_limit && discard_ctl->prev_discard) {
367 u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
368 u64 bps_delay = div64_u64(discard_ctl->prev_discard *
369 MSEC_PER_SEC, bps_limit);
370
371 delay = max(delay, msecs_to_jiffies(bps_delay));
372 }
373
374 /*
375 * This timeout is to hopefully prevent immediate discarding
376 * in a recently allocated block group.
377 */
378 if (now < block_group->discard_eligible_time) {
379 u64 bg_timeout = block_group->discard_eligible_time - now;
380
381 delay = max(delay, nsecs_to_jiffies(bg_timeout));
382 }
383
384 mod_delayed_work(discard_ctl->discard_workers,
385 &discard_ctl->work, delay);
386 }
387out:
388 spin_unlock(&discard_ctl->lock);
389}
390
391/**
392 * btrfs_finish_discard_pass - determine next step of a block_group
393 * @discard_ctl: discard control
394 * @block_group: block_group of interest
395 *
396 * This determines the next step for a block group after it's finished going
397 * through a pass on a discard list. If it is unused and fully trimmed, we can
398 * mark it unused and send it to the unused_bgs path. Otherwise, pass it onto
399 * the appropriate filter list or let it fall off.
400 */
401static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
402 struct btrfs_block_group *block_group)
403{
404 remove_from_discard_list(discard_ctl, block_group);
405
406 if (block_group->used == 0) {
407 if (btrfs_is_free_space_trimmed(block_group))
408 btrfs_mark_bg_unused(block_group);
409 else
410 add_to_discard_unused_list(discard_ctl, block_group);
411 } else {
412 btrfs_update_discard_index(discard_ctl, block_group);
413 }
414}
415
416/**
417 * btrfs_discard_workfn - discard work function
418 * @work: work
419 *
420 * This finds the next block_group to start discarding and then discards a
421 * single region. It does this in a two-pass fashion: first extents and second
422 * bitmaps. Completely discarded block groups are sent to the unused_bgs path.
423 */
424static void btrfs_discard_workfn(struct work_struct *work)
425{
426 struct btrfs_discard_ctl *discard_ctl;
427 struct btrfs_block_group *block_group;
428 enum btrfs_discard_state discard_state;
429 int discard_index = 0;
430 u64 trimmed = 0;
431 u64 minlen = 0;
432
433 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
434
435 block_group = peek_discard_list(discard_ctl, &discard_state,
436 &discard_index);
437 if (!block_group || !btrfs_run_discard_work(discard_ctl))
438 return;
439
440 /* Perform discarding */
441 minlen = discard_minlen[discard_index];
442
443 if (discard_state == BTRFS_DISCARD_BITMAPS) {
444 u64 maxlen = 0;
445
446 /*
447 * Use the previous levels minimum discard length as the max
448 * length filter. In the case something is added to make a
449 * region go beyond the max filter, the entire bitmap is set
450 * back to BTRFS_TRIM_STATE_UNTRIMMED.
451 */
452 if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
453 maxlen = discard_minlen[discard_index - 1];
454
455 btrfs_trim_block_group_bitmaps(block_group, &trimmed,
456 block_group->discard_cursor,
457 btrfs_block_group_end(block_group),
458 minlen, maxlen, true);
459 discard_ctl->discard_bitmap_bytes += trimmed;
460 } else {
461 btrfs_trim_block_group_extents(block_group, &trimmed,
462 block_group->discard_cursor,
463 btrfs_block_group_end(block_group),
464 minlen, true);
465 discard_ctl->discard_extent_bytes += trimmed;
466 }
467
468 discard_ctl->prev_discard = trimmed;
469
470 /* Determine next steps for a block_group */
471 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
472 if (discard_state == BTRFS_DISCARD_BITMAPS) {
473 btrfs_finish_discard_pass(discard_ctl, block_group);
474 } else {
475 block_group->discard_cursor = block_group->start;
476 spin_lock(&discard_ctl->lock);
477 if (block_group->discard_state !=
478 BTRFS_DISCARD_RESET_CURSOR)
479 block_group->discard_state =
480 BTRFS_DISCARD_BITMAPS;
481 spin_unlock(&discard_ctl->lock);
482 }
483 }
484
485 spin_lock(&discard_ctl->lock);
486 discard_ctl->block_group = NULL;
487 spin_unlock(&discard_ctl->lock);
488
489 btrfs_discard_schedule_work(discard_ctl, false);
490}
491
492/**
493 * btrfs_run_discard_work - determines if async discard should be running
494 * @discard_ctl: discard control
495 *
496 * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
497 */
498bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
499{
500 struct btrfs_fs_info *fs_info = container_of(discard_ctl,
501 struct btrfs_fs_info,
502 discard_ctl);
503
504 return (!(fs_info->sb->s_flags & SB_RDONLY) &&
505 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
506}
507
508/**
509 * btrfs_discard_calc_delay - recalculate the base delay
510 * @discard_ctl: discard control
511 *
512 * Recalculate the base delay which is based off the total number of
513 * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
514 * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
515 */
516void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
517{
518 s32 discardable_extents;
519 s64 discardable_bytes;
520 u32 iops_limit;
521 unsigned long delay;
522 unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC;
523
524 discardable_extents = atomic_read(&discard_ctl->discardable_extents);
525 if (!discardable_extents)
526 return;
527
528 spin_lock(&discard_ctl->lock);
529
530 /*
531 * The following is to fix a potential -1 discrepenancy that we're not
532 * sure how to reproduce. But given that this is the only place that
533 * utilizes these numbers and this is only called by from
534 * btrfs_finish_extent_commit() which is synchronized, we can correct
535 * here.
536 */
537 if (discardable_extents < 0)
538 atomic_add(-discardable_extents,
539 &discard_ctl->discardable_extents);
540
541 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes);
542 if (discardable_bytes < 0)
543 atomic64_add(-discardable_bytes,
544 &discard_ctl->discardable_bytes);
545
546 if (discardable_extents <= 0) {
547 spin_unlock(&discard_ctl->lock);
548 return;
549 }
550
551 iops_limit = READ_ONCE(discard_ctl->iops_limit);
552 if (iops_limit)
553 lower_limit = max_t(unsigned long, lower_limit,
554 MSEC_PER_SEC / iops_limit);
555
556 delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
557 delay = clamp(delay, lower_limit, BTRFS_DISCARD_MAX_DELAY_MSEC);
558 discard_ctl->delay = msecs_to_jiffies(delay);
559
560 spin_unlock(&discard_ctl->lock);
561}
562
563/**
564 * btrfs_discard_update_discardable - propagate discard counters
565 * @block_group: block_group of interest
566 * @ctl: free_space_ctl of @block_group
567 *
568 * This propagates deltas of counters up to the discard_ctl. It maintains a
569 * current counter and a previous counter passing the delta up to the global
570 * stat. Then the current counter value becomes the previous counter value.
571 */
572void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
573 struct btrfs_free_space_ctl *ctl)
574{
575 struct btrfs_discard_ctl *discard_ctl;
576 s32 extents_delta;
577 s64 bytes_delta;
578
579 if (!block_group ||
580 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
581 !btrfs_is_block_group_data_only(block_group))
582 return;
583
584 discard_ctl = &block_group->fs_info->discard_ctl;
585
586 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
587 ctl->discardable_extents[BTRFS_STAT_PREV];
588 if (extents_delta) {
589 atomic_add(extents_delta, &discard_ctl->discardable_extents);
590 ctl->discardable_extents[BTRFS_STAT_PREV] =
591 ctl->discardable_extents[BTRFS_STAT_CURR];
592 }
593
594 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
595 ctl->discardable_bytes[BTRFS_STAT_PREV];
596 if (bytes_delta) {
597 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
598 ctl->discardable_bytes[BTRFS_STAT_PREV] =
599 ctl->discardable_bytes[BTRFS_STAT_CURR];
600 }
601}
602
603/**
604 * btrfs_discard_punt_unused_bgs_list - punt unused_bgs list to discard lists
605 * @fs_info: fs_info of interest
606 *
607 * The unused_bgs list needs to be punted to the discard lists because the
608 * order of operations is changed. In the normal sychronous discard path, the
609 * block groups are trimmed via a single large trim in transaction commit. This
610 * is ultimately what we are trying to avoid with asynchronous discard. Thus,
611 * it must be done before going down the unused_bgs path.
612 */
613void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
614{
615 struct btrfs_block_group *block_group, *next;
616
617 spin_lock(&fs_info->unused_bgs_lock);
618 /* We enabled async discard, so punt all to the queue */
619 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
620 bg_list) {
621 list_del_init(&block_group->bg_list);
622 btrfs_put_block_group(block_group);
623 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
624 }
625 spin_unlock(&fs_info->unused_bgs_lock);
626}
627
628/**
629 * btrfs_discard_purge_list - purge discard lists
630 * @discard_ctl: discard control
631 *
632 * If we are disabling async discard, we may have intercepted block groups that
633 * are completely free and ready for the unused_bgs path. As discarding will
634 * now happen in transaction commit or not at all, we can safely mark the
635 * corresponding block groups as unused and they will be sent on their merry
636 * way to the unused_bgs list.
637 */
638static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
639{
640 struct btrfs_block_group *block_group, *next;
641 int i;
642
643 spin_lock(&discard_ctl->lock);
644 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
645 list_for_each_entry_safe(block_group, next,
646 &discard_ctl->discard_list[i],
647 discard_list) {
648 list_del_init(&block_group->discard_list);
649 spin_unlock(&discard_ctl->lock);
650 if (block_group->used == 0)
651 btrfs_mark_bg_unused(block_group);
652 spin_lock(&discard_ctl->lock);
653 }
654 }
655 spin_unlock(&discard_ctl->lock);
656}
657
658void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
659{
660 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
661 btrfs_discard_cleanup(fs_info);
662 return;
663 }
664
665 btrfs_discard_punt_unused_bgs_list(fs_info);
666
667 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
668}
669
670void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
671{
672 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
673}
674
675void btrfs_discard_init(struct btrfs_fs_info *fs_info)
676{
677 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
678 int i;
679
680 spin_lock_init(&discard_ctl->lock);
681 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
682
683 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
684 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
685
686 discard_ctl->prev_discard = 0;
687 atomic_set(&discard_ctl->discardable_extents, 0);
688 atomic64_set(&discard_ctl->discardable_bytes, 0);
689 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
690 discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC;
691 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
692 discard_ctl->kbps_limit = 0;
693 discard_ctl->discard_extent_bytes = 0;
694 discard_ctl->discard_bitmap_bytes = 0;
695 atomic64_set(&discard_ctl->discard_bytes_saved, 0);
696}
697
698void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
699{
700 btrfs_discard_stop(fs_info);
701 cancel_delayed_work_sync(&fs_info->discard_ctl.work);
702 btrfs_discard_purge_list(&fs_info->discard_ctl);
703}