Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include "misc.h"
4#include "ctree.h"
5#include "space-info.h"
6#include "sysfs.h"
7#include "volumes.h"
8#include "free-space-cache.h"
9#include "ordered-data.h"
10#include "transaction.h"
11#include "block-group.h"
12
13u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
14 bool may_use_included)
15{
16 ASSERT(s_info);
17 return s_info->bytes_used + s_info->bytes_reserved +
18 s_info->bytes_pinned + s_info->bytes_readonly +
19 (may_use_included ? s_info->bytes_may_use : 0);
20}
21
22/*
23 * after adding space to the filesystem, we need to clear the full flags
24 * on all the space infos.
25 */
26void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
27{
28 struct list_head *head = &info->space_info;
29 struct btrfs_space_info *found;
30
31 rcu_read_lock();
32 list_for_each_entry_rcu(found, head, list)
33 found->full = 0;
34 rcu_read_unlock();
35}
36
37static int create_space_info(struct btrfs_fs_info *info, u64 flags)
38{
39
40 struct btrfs_space_info *space_info;
41 int i;
42 int ret;
43
44 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
45 if (!space_info)
46 return -ENOMEM;
47
48 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
49 GFP_KERNEL);
50 if (ret) {
51 kfree(space_info);
52 return ret;
53 }
54
55 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
56 INIT_LIST_HEAD(&space_info->block_groups[i]);
57 init_rwsem(&space_info->groups_sem);
58 spin_lock_init(&space_info->lock);
59 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
60 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
61 init_waitqueue_head(&space_info->wait);
62 INIT_LIST_HEAD(&space_info->ro_bgs);
63 INIT_LIST_HEAD(&space_info->tickets);
64 INIT_LIST_HEAD(&space_info->priority_tickets);
65
66 ret = btrfs_sysfs_add_space_info_type(info, space_info);
67 if (ret)
68 return ret;
69
70 list_add_rcu(&space_info->list, &info->space_info);
71 if (flags & BTRFS_BLOCK_GROUP_DATA)
72 info->data_sinfo = space_info;
73
74 return ret;
75}
76
77int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
78{
79 struct btrfs_super_block *disk_super;
80 u64 features;
81 u64 flags;
82 int mixed = 0;
83 int ret;
84
85 disk_super = fs_info->super_copy;
86 if (!btrfs_super_root(disk_super))
87 return -EINVAL;
88
89 features = btrfs_super_incompat_flags(disk_super);
90 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
91 mixed = 1;
92
93 flags = BTRFS_BLOCK_GROUP_SYSTEM;
94 ret = create_space_info(fs_info, flags);
95 if (ret)
96 goto out;
97
98 if (mixed) {
99 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
100 ret = create_space_info(fs_info, flags);
101 } else {
102 flags = BTRFS_BLOCK_GROUP_METADATA;
103 ret = create_space_info(fs_info, flags);
104 if (ret)
105 goto out;
106
107 flags = BTRFS_BLOCK_GROUP_DATA;
108 ret = create_space_info(fs_info, flags);
109 }
110out:
111 return ret;
112}
113
114void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
115 u64 total_bytes, u64 bytes_used,
116 u64 bytes_readonly,
117 struct btrfs_space_info **space_info)
118{
119 struct btrfs_space_info *found;
120 int factor;
121
122 factor = btrfs_bg_type_to_factor(flags);
123
124 found = btrfs_find_space_info(info, flags);
125 ASSERT(found);
126 spin_lock(&found->lock);
127 found->total_bytes += total_bytes;
128 found->disk_total += total_bytes * factor;
129 found->bytes_used += bytes_used;
130 found->disk_used += bytes_used * factor;
131 found->bytes_readonly += bytes_readonly;
132 if (total_bytes > 0)
133 found->full = 0;
134 btrfs_try_granting_tickets(info, found);
135 spin_unlock(&found->lock);
136 *space_info = found;
137}
138
139struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
140 u64 flags)
141{
142 struct list_head *head = &info->space_info;
143 struct btrfs_space_info *found;
144
145 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
146
147 rcu_read_lock();
148 list_for_each_entry_rcu(found, head, list) {
149 if (found->flags & flags) {
150 rcu_read_unlock();
151 return found;
152 }
153 }
154 rcu_read_unlock();
155 return NULL;
156}
157
158static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
159{
160 return (global->size << 1);
161}
162
163static int can_overcommit(struct btrfs_fs_info *fs_info,
164 struct btrfs_space_info *space_info, u64 bytes,
165 enum btrfs_reserve_flush_enum flush,
166 bool system_chunk)
167{
168 u64 profile;
169 u64 avail;
170 u64 used;
171 int factor;
172
173 /* Don't overcommit when in mixed mode. */
174 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
175 return 0;
176
177 if (system_chunk)
178 profile = btrfs_system_alloc_profile(fs_info);
179 else
180 profile = btrfs_metadata_alloc_profile(fs_info);
181
182 used = btrfs_space_info_used(space_info, true);
183 avail = atomic64_read(&fs_info->free_chunk_space);
184
185 /*
186 * If we have dup, raid1 or raid10 then only half of the free
187 * space is actually usable. For raid56, the space info used
188 * doesn't include the parity drive, so we don't have to
189 * change the math
190 */
191 factor = btrfs_bg_type_to_factor(profile);
192 avail = div_u64(avail, factor);
193
194 /*
195 * If we aren't flushing all things, let us overcommit up to
196 * 1/2th of the space. If we can flush, don't let us overcommit
197 * too much, let it overcommit up to 1/8 of the space.
198 */
199 if (flush == BTRFS_RESERVE_FLUSH_ALL)
200 avail >>= 3;
201 else
202 avail >>= 1;
203
204 if (used + bytes < space_info->total_bytes + avail)
205 return 1;
206 return 0;
207}
208
209/*
210 * This is for space we already have accounted in space_info->bytes_may_use, so
211 * basically when we're returning space from block_rsv's.
212 */
213void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
214 struct btrfs_space_info *space_info)
215{
216 struct list_head *head;
217 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
218
219 lockdep_assert_held(&space_info->lock);
220
221 head = &space_info->priority_tickets;
222again:
223 while (!list_empty(head)) {
224 struct reserve_ticket *ticket;
225 u64 used = btrfs_space_info_used(space_info, true);
226
227 ticket = list_first_entry(head, struct reserve_ticket, list);
228
229 /* Check and see if our ticket can be satisified now. */
230 if ((used + ticket->bytes <= space_info->total_bytes) ||
231 can_overcommit(fs_info, space_info, ticket->bytes, flush,
232 false)) {
233 btrfs_space_info_update_bytes_may_use(fs_info,
234 space_info,
235 ticket->bytes);
236 list_del_init(&ticket->list);
237 ticket->bytes = 0;
238 space_info->tickets_id++;
239 wake_up(&ticket->wait);
240 } else {
241 break;
242 }
243 }
244
245 if (head == &space_info->priority_tickets) {
246 head = &space_info->tickets;
247 flush = BTRFS_RESERVE_FLUSH_ALL;
248 goto again;
249 }
250}
251
252#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
253do { \
254 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
255 spin_lock(&__rsv->lock); \
256 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
257 __rsv->size, __rsv->reserved); \
258 spin_unlock(&__rsv->lock); \
259} while (0)
260
261static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
262 struct btrfs_space_info *info)
263{
264 lockdep_assert_held(&info->lock);
265
266 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
267 info->flags,
268 info->total_bytes - btrfs_space_info_used(info, true),
269 info->full ? "" : "not ");
270 btrfs_info(fs_info,
271 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
272 info->total_bytes, info->bytes_used, info->bytes_pinned,
273 info->bytes_reserved, info->bytes_may_use,
274 info->bytes_readonly);
275
276 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
277 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
278 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
279 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
280 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
281
282}
283
284void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
285 struct btrfs_space_info *info, u64 bytes,
286 int dump_block_groups)
287{
288 struct btrfs_block_group_cache *cache;
289 int index = 0;
290
291 spin_lock(&info->lock);
292 __btrfs_dump_space_info(fs_info, info);
293 spin_unlock(&info->lock);
294
295 if (!dump_block_groups)
296 return;
297
298 down_read(&info->groups_sem);
299again:
300 list_for_each_entry(cache, &info->block_groups[index], list) {
301 spin_lock(&cache->lock);
302 btrfs_info(fs_info,
303 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
304 cache->key.objectid, cache->key.offset,
305 btrfs_block_group_used(&cache->item), cache->pinned,
306 cache->reserved, cache->ro ? "[readonly]" : "");
307 btrfs_dump_free_space(cache, bytes);
308 spin_unlock(&cache->lock);
309 }
310 if (++index < BTRFS_NR_RAID_TYPES)
311 goto again;
312 up_read(&info->groups_sem);
313}
314
315static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
316 unsigned long nr_pages, int nr_items)
317{
318 struct super_block *sb = fs_info->sb;
319
320 if (down_read_trylock(&sb->s_umount)) {
321 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
322 up_read(&sb->s_umount);
323 } else {
324 /*
325 * We needn't worry the filesystem going from r/w to r/o though
326 * we don't acquire ->s_umount mutex, because the filesystem
327 * should guarantee the delalloc inodes list be empty after
328 * the filesystem is readonly(all dirty pages are written to
329 * the disk).
330 */
331 btrfs_start_delalloc_roots(fs_info, nr_items);
332 if (!current->journal_info)
333 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
334 }
335}
336
337static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
338 u64 to_reclaim)
339{
340 u64 bytes;
341 u64 nr;
342
343 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
344 nr = div64_u64(to_reclaim, bytes);
345 if (!nr)
346 nr = 1;
347 return nr;
348}
349
350#define EXTENT_SIZE_PER_ITEM SZ_256K
351
352/*
353 * shrink metadata reservation for delalloc
354 */
355static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
356 u64 orig, bool wait_ordered)
357{
358 struct btrfs_space_info *space_info;
359 struct btrfs_trans_handle *trans;
360 u64 delalloc_bytes;
361 u64 dio_bytes;
362 u64 async_pages;
363 u64 items;
364 long time_left;
365 unsigned long nr_pages;
366 int loops;
367
368 /* Calc the number of the pages we need flush for space reservation */
369 items = calc_reclaim_items_nr(fs_info, to_reclaim);
370 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
371
372 trans = (struct btrfs_trans_handle *)current->journal_info;
373 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
374
375 delalloc_bytes = percpu_counter_sum_positive(
376 &fs_info->delalloc_bytes);
377 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
378 if (delalloc_bytes == 0 && dio_bytes == 0) {
379 if (trans)
380 return;
381 if (wait_ordered)
382 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
383 return;
384 }
385
386 /*
387 * If we are doing more ordered than delalloc we need to just wait on
388 * ordered extents, otherwise we'll waste time trying to flush delalloc
389 * that likely won't give us the space back we need.
390 */
391 if (dio_bytes > delalloc_bytes)
392 wait_ordered = true;
393
394 loops = 0;
395 while ((delalloc_bytes || dio_bytes) && loops < 3) {
396 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
397
398 /*
399 * Triggers inode writeback for up to nr_pages. This will invoke
400 * ->writepages callback and trigger delalloc filling
401 * (btrfs_run_delalloc_range()).
402 */
403 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
404
405 /*
406 * We need to wait for the compressed pages to start before
407 * we continue.
408 */
409 async_pages = atomic_read(&fs_info->async_delalloc_pages);
410 if (!async_pages)
411 goto skip_async;
412
413 /*
414 * Calculate how many compressed pages we want to be written
415 * before we continue. I.e if there are more async pages than we
416 * require wait_event will wait until nr_pages are written.
417 */
418 if (async_pages <= nr_pages)
419 async_pages = 0;
420 else
421 async_pages -= nr_pages;
422
423 wait_event(fs_info->async_submit_wait,
424 atomic_read(&fs_info->async_delalloc_pages) <=
425 (int)async_pages);
426skip_async:
427 spin_lock(&space_info->lock);
428 if (list_empty(&space_info->tickets) &&
429 list_empty(&space_info->priority_tickets)) {
430 spin_unlock(&space_info->lock);
431 break;
432 }
433 spin_unlock(&space_info->lock);
434
435 loops++;
436 if (wait_ordered && !trans) {
437 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
438 } else {
439 time_left = schedule_timeout_killable(1);
440 if (time_left)
441 break;
442 }
443 delalloc_bytes = percpu_counter_sum_positive(
444 &fs_info->delalloc_bytes);
445 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
446 }
447}
448
449/**
450 * maybe_commit_transaction - possibly commit the transaction if its ok to
451 * @root - the root we're allocating for
452 * @bytes - the number of bytes we want to reserve
453 * @force - force the commit
454 *
455 * This will check to make sure that committing the transaction will actually
456 * get us somewhere and then commit the transaction if it does. Otherwise it
457 * will return -ENOSPC.
458 */
459static int may_commit_transaction(struct btrfs_fs_info *fs_info,
460 struct btrfs_space_info *space_info)
461{
462 struct reserve_ticket *ticket = NULL;
463 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
464 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
465 struct btrfs_trans_handle *trans;
466 u64 bytes_needed;
467 u64 reclaim_bytes = 0;
468 u64 cur_free_bytes = 0;
469
470 trans = (struct btrfs_trans_handle *)current->journal_info;
471 if (trans)
472 return -EAGAIN;
473
474 spin_lock(&space_info->lock);
475 cur_free_bytes = btrfs_space_info_used(space_info, true);
476 if (cur_free_bytes < space_info->total_bytes)
477 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
478 else
479 cur_free_bytes = 0;
480
481 if (!list_empty(&space_info->priority_tickets))
482 ticket = list_first_entry(&space_info->priority_tickets,
483 struct reserve_ticket, list);
484 else if (!list_empty(&space_info->tickets))
485 ticket = list_first_entry(&space_info->tickets,
486 struct reserve_ticket, list);
487 bytes_needed = (ticket) ? ticket->bytes : 0;
488
489 if (bytes_needed > cur_free_bytes)
490 bytes_needed -= cur_free_bytes;
491 else
492 bytes_needed = 0;
493 spin_unlock(&space_info->lock);
494
495 if (!bytes_needed)
496 return 0;
497
498 trans = btrfs_join_transaction(fs_info->extent_root);
499 if (IS_ERR(trans))
500 return PTR_ERR(trans);
501
502 /*
503 * See if there is enough pinned space to make this reservation, or if
504 * we have block groups that are going to be freed, allowing us to
505 * possibly do a chunk allocation the next loop through.
506 */
507 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
508 __percpu_counter_compare(&space_info->total_bytes_pinned,
509 bytes_needed,
510 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
511 goto commit;
512
513 /*
514 * See if there is some space in the delayed insertion reservation for
515 * this reservation.
516 */
517 if (space_info != delayed_rsv->space_info)
518 goto enospc;
519
520 spin_lock(&delayed_rsv->lock);
521 reclaim_bytes += delayed_rsv->reserved;
522 spin_unlock(&delayed_rsv->lock);
523
524 spin_lock(&delayed_refs_rsv->lock);
525 reclaim_bytes += delayed_refs_rsv->reserved;
526 spin_unlock(&delayed_refs_rsv->lock);
527 if (reclaim_bytes >= bytes_needed)
528 goto commit;
529 bytes_needed -= reclaim_bytes;
530
531 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
532 bytes_needed,
533 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
534 goto enospc;
535
536commit:
537 return btrfs_commit_transaction(trans);
538enospc:
539 btrfs_end_transaction(trans);
540 return -ENOSPC;
541}
542
543/*
544 * Try to flush some data based on policy set by @state. This is only advisory
545 * and may fail for various reasons. The caller is supposed to examine the
546 * state of @space_info to detect the outcome.
547 */
548static void flush_space(struct btrfs_fs_info *fs_info,
549 struct btrfs_space_info *space_info, u64 num_bytes,
550 int state)
551{
552 struct btrfs_root *root = fs_info->extent_root;
553 struct btrfs_trans_handle *trans;
554 int nr;
555 int ret = 0;
556
557 switch (state) {
558 case FLUSH_DELAYED_ITEMS_NR:
559 case FLUSH_DELAYED_ITEMS:
560 if (state == FLUSH_DELAYED_ITEMS_NR)
561 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
562 else
563 nr = -1;
564
565 trans = btrfs_join_transaction(root);
566 if (IS_ERR(trans)) {
567 ret = PTR_ERR(trans);
568 break;
569 }
570 ret = btrfs_run_delayed_items_nr(trans, nr);
571 btrfs_end_transaction(trans);
572 break;
573 case FLUSH_DELALLOC:
574 case FLUSH_DELALLOC_WAIT:
575 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
576 state == FLUSH_DELALLOC_WAIT);
577 break;
578 case FLUSH_DELAYED_REFS_NR:
579 case FLUSH_DELAYED_REFS:
580 trans = btrfs_join_transaction(root);
581 if (IS_ERR(trans)) {
582 ret = PTR_ERR(trans);
583 break;
584 }
585 if (state == FLUSH_DELAYED_REFS_NR)
586 nr = calc_reclaim_items_nr(fs_info, num_bytes);
587 else
588 nr = 0;
589 btrfs_run_delayed_refs(trans, nr);
590 btrfs_end_transaction(trans);
591 break;
592 case ALLOC_CHUNK:
593 case ALLOC_CHUNK_FORCE:
594 trans = btrfs_join_transaction(root);
595 if (IS_ERR(trans)) {
596 ret = PTR_ERR(trans);
597 break;
598 }
599 ret = btrfs_chunk_alloc(trans,
600 btrfs_metadata_alloc_profile(fs_info),
601 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
602 CHUNK_ALLOC_FORCE);
603 btrfs_end_transaction(trans);
604 if (ret > 0 || ret == -ENOSPC)
605 ret = 0;
606 break;
607 case RUN_DELAYED_IPUTS:
608 /*
609 * If we have pending delayed iputs then we could free up a
610 * bunch of pinned space, so make sure we run the iputs before
611 * we do our pinned bytes check below.
612 */
613 btrfs_run_delayed_iputs(fs_info);
614 btrfs_wait_on_delayed_iputs(fs_info);
615 break;
616 case COMMIT_TRANS:
617 ret = may_commit_transaction(fs_info, space_info);
618 break;
619 default:
620 ret = -ENOSPC;
621 break;
622 }
623
624 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
625 ret);
626 return;
627}
628
629static inline u64
630btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
631 struct btrfs_space_info *space_info,
632 bool system_chunk)
633{
634 struct reserve_ticket *ticket;
635 u64 used;
636 u64 expected;
637 u64 to_reclaim = 0;
638
639 list_for_each_entry(ticket, &space_info->tickets, list)
640 to_reclaim += ticket->bytes;
641 list_for_each_entry(ticket, &space_info->priority_tickets, list)
642 to_reclaim += ticket->bytes;
643 if (to_reclaim)
644 return to_reclaim;
645
646 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
647 if (can_overcommit(fs_info, space_info, to_reclaim,
648 BTRFS_RESERVE_FLUSH_ALL, system_chunk))
649 return 0;
650
651 used = btrfs_space_info_used(space_info, true);
652
653 if (can_overcommit(fs_info, space_info, SZ_1M,
654 BTRFS_RESERVE_FLUSH_ALL, system_chunk))
655 expected = div_factor_fine(space_info->total_bytes, 95);
656 else
657 expected = div_factor_fine(space_info->total_bytes, 90);
658
659 if (used > expected)
660 to_reclaim = used - expected;
661 else
662 to_reclaim = 0;
663 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
664 space_info->bytes_reserved);
665 return to_reclaim;
666}
667
668static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
669 struct btrfs_space_info *space_info,
670 u64 used, bool system_chunk)
671{
672 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
673
674 /* If we're just plain full then async reclaim just slows us down. */
675 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
676 return 0;
677
678 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
679 system_chunk))
680 return 0;
681
682 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
683 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
684}
685
686/*
687 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
688 * @fs_info - fs_info for this fs
689 * @space_info - the space info we were flushing
690 *
691 * We call this when we've exhausted our flushing ability and haven't made
692 * progress in satisfying tickets. The reservation code handles tickets in
693 * order, so if there is a large ticket first and then smaller ones we could
694 * very well satisfy the smaller tickets. This will attempt to wake up any
695 * tickets in the list to catch this case.
696 *
697 * This function returns true if it was able to make progress by clearing out
698 * other tickets, or if it stumbles across a ticket that was smaller than the
699 * first ticket.
700 */
701static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
702 struct btrfs_space_info *space_info)
703{
704 struct reserve_ticket *ticket;
705 u64 tickets_id = space_info->tickets_id;
706 u64 first_ticket_bytes = 0;
707
708 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
709 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
710 __btrfs_dump_space_info(fs_info, space_info);
711 }
712
713 while (!list_empty(&space_info->tickets) &&
714 tickets_id == space_info->tickets_id) {
715 ticket = list_first_entry(&space_info->tickets,
716 struct reserve_ticket, list);
717
718 /*
719 * may_commit_transaction will avoid committing the transaction
720 * if it doesn't feel like the space reclaimed by the commit
721 * would result in the ticket succeeding. However if we have a
722 * smaller ticket in the queue it may be small enough to be
723 * satisified by committing the transaction, so if any
724 * subsequent ticket is smaller than the first ticket go ahead
725 * and send us back for another loop through the enospc flushing
726 * code.
727 */
728 if (first_ticket_bytes == 0)
729 first_ticket_bytes = ticket->bytes;
730 else if (first_ticket_bytes > ticket->bytes)
731 return true;
732
733 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
734 btrfs_info(fs_info, "failing ticket with %llu bytes",
735 ticket->bytes);
736
737 list_del_init(&ticket->list);
738 ticket->error = -ENOSPC;
739 wake_up(&ticket->wait);
740
741 /*
742 * We're just throwing tickets away, so more flushing may not
743 * trip over btrfs_try_granting_tickets, so we need to call it
744 * here to see if we can make progress with the next ticket in
745 * the list.
746 */
747 btrfs_try_granting_tickets(fs_info, space_info);
748 }
749 return (tickets_id != space_info->tickets_id);
750}
751
752/*
753 * This is for normal flushers, we can wait all goddamned day if we want to. We
754 * will loop and continuously try to flush as long as we are making progress.
755 * We count progress as clearing off tickets each time we have to loop.
756 */
757static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
758{
759 struct btrfs_fs_info *fs_info;
760 struct btrfs_space_info *space_info;
761 u64 to_reclaim;
762 int flush_state;
763 int commit_cycles = 0;
764 u64 last_tickets_id;
765
766 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
767 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
768
769 spin_lock(&space_info->lock);
770 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
771 false);
772 if (!to_reclaim) {
773 space_info->flush = 0;
774 spin_unlock(&space_info->lock);
775 return;
776 }
777 last_tickets_id = space_info->tickets_id;
778 spin_unlock(&space_info->lock);
779
780 flush_state = FLUSH_DELAYED_ITEMS_NR;
781 do {
782 flush_space(fs_info, space_info, to_reclaim, flush_state);
783 spin_lock(&space_info->lock);
784 if (list_empty(&space_info->tickets)) {
785 space_info->flush = 0;
786 spin_unlock(&space_info->lock);
787 return;
788 }
789 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
790 space_info,
791 false);
792 if (last_tickets_id == space_info->tickets_id) {
793 flush_state++;
794 } else {
795 last_tickets_id = space_info->tickets_id;
796 flush_state = FLUSH_DELAYED_ITEMS_NR;
797 if (commit_cycles)
798 commit_cycles--;
799 }
800
801 /*
802 * We don't want to force a chunk allocation until we've tried
803 * pretty hard to reclaim space. Think of the case where we
804 * freed up a bunch of space and so have a lot of pinned space
805 * to reclaim. We would rather use that than possibly create a
806 * underutilized metadata chunk. So if this is our first run
807 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
808 * commit the transaction. If nothing has changed the next go
809 * around then we can force a chunk allocation.
810 */
811 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
812 flush_state++;
813
814 if (flush_state > COMMIT_TRANS) {
815 commit_cycles++;
816 if (commit_cycles > 2) {
817 if (maybe_fail_all_tickets(fs_info, space_info)) {
818 flush_state = FLUSH_DELAYED_ITEMS_NR;
819 commit_cycles--;
820 } else {
821 space_info->flush = 0;
822 }
823 } else {
824 flush_state = FLUSH_DELAYED_ITEMS_NR;
825 }
826 }
827 spin_unlock(&space_info->lock);
828 } while (flush_state <= COMMIT_TRANS);
829}
830
831void btrfs_init_async_reclaim_work(struct work_struct *work)
832{
833 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
834}
835
836static const enum btrfs_flush_state priority_flush_states[] = {
837 FLUSH_DELAYED_ITEMS_NR,
838 FLUSH_DELAYED_ITEMS,
839 ALLOC_CHUNK,
840};
841
842static const enum btrfs_flush_state evict_flush_states[] = {
843 FLUSH_DELAYED_ITEMS_NR,
844 FLUSH_DELAYED_ITEMS,
845 FLUSH_DELAYED_REFS_NR,
846 FLUSH_DELAYED_REFS,
847 FLUSH_DELALLOC,
848 FLUSH_DELALLOC_WAIT,
849 ALLOC_CHUNK,
850 COMMIT_TRANS,
851};
852
853static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
854 struct btrfs_space_info *space_info,
855 struct reserve_ticket *ticket,
856 const enum btrfs_flush_state *states,
857 int states_nr)
858{
859 u64 to_reclaim;
860 int flush_state;
861
862 spin_lock(&space_info->lock);
863 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
864 false);
865 if (!to_reclaim) {
866 spin_unlock(&space_info->lock);
867 return;
868 }
869 spin_unlock(&space_info->lock);
870
871 flush_state = 0;
872 do {
873 flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
874 flush_state++;
875 spin_lock(&space_info->lock);
876 if (ticket->bytes == 0) {
877 spin_unlock(&space_info->lock);
878 return;
879 }
880 spin_unlock(&space_info->lock);
881 } while (flush_state < states_nr);
882}
883
884static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
885 struct btrfs_space_info *space_info,
886 struct reserve_ticket *ticket)
887
888{
889 DEFINE_WAIT(wait);
890 int ret = 0;
891
892 spin_lock(&space_info->lock);
893 while (ticket->bytes > 0 && ticket->error == 0) {
894 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
895 if (ret) {
896 /*
897 * Delete us from the list. After we unlock the space
898 * info, we don't want the async reclaim job to reserve
899 * space for this ticket. If that would happen, then the
900 * ticket's task would not known that space was reserved
901 * despite getting an error, resulting in a space leak
902 * (bytes_may_use counter of our space_info).
903 */
904 list_del_init(&ticket->list);
905 ticket->error = -EINTR;
906 break;
907 }
908 spin_unlock(&space_info->lock);
909
910 schedule();
911
912 finish_wait(&ticket->wait, &wait);
913 spin_lock(&space_info->lock);
914 }
915 spin_unlock(&space_info->lock);
916}
917
918/**
919 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
920 * @fs_info - the fs
921 * @space_info - the space_info for the reservation
922 * @ticket - the ticket for the reservation
923 * @flush - how much we can flush
924 *
925 * This does the work of figuring out how to flush for the ticket, waiting for
926 * the reservation, and returning the appropriate error if there is one.
927 */
928static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
929 struct btrfs_space_info *space_info,
930 struct reserve_ticket *ticket,
931 enum btrfs_reserve_flush_enum flush)
932{
933 int ret;
934
935 switch (flush) {
936 case BTRFS_RESERVE_FLUSH_ALL:
937 wait_reserve_ticket(fs_info, space_info, ticket);
938 break;
939 case BTRFS_RESERVE_FLUSH_LIMIT:
940 priority_reclaim_metadata_space(fs_info, space_info, ticket,
941 priority_flush_states,
942 ARRAY_SIZE(priority_flush_states));
943 break;
944 case BTRFS_RESERVE_FLUSH_EVICT:
945 priority_reclaim_metadata_space(fs_info, space_info, ticket,
946 evict_flush_states,
947 ARRAY_SIZE(evict_flush_states));
948 break;
949 default:
950 ASSERT(0);
951 break;
952 }
953
954 spin_lock(&space_info->lock);
955 ret = ticket->error;
956 if (ticket->bytes || ticket->error) {
957 /*
958 * Need to delete here for priority tickets. For regular tickets
959 * either the async reclaim job deletes the ticket from the list
960 * or we delete it ourselves at wait_reserve_ticket().
961 */
962 list_del_init(&ticket->list);
963 if (!ret)
964 ret = -ENOSPC;
965 }
966 spin_unlock(&space_info->lock);
967 ASSERT(list_empty(&ticket->list));
968 /*
969 * Check that we can't have an error set if the reservation succeeded,
970 * as that would confuse tasks and lead them to error out without
971 * releasing reserved space (if an error happens the expectation is that
972 * space wasn't reserved at all).
973 */
974 ASSERT(!(ticket->bytes == 0 && ticket->error));
975 return ret;
976}
977
978/**
979 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
980 * @root - the root we're allocating for
981 * @space_info - the space info we want to allocate from
982 * @orig_bytes - the number of bytes we want
983 * @flush - whether or not we can flush to make our reservation
984 *
985 * This will reserve orig_bytes number of bytes from the space info associated
986 * with the block_rsv. If there is not enough space it will make an attempt to
987 * flush out space to make room. It will do this by flushing delalloc if
988 * possible or committing the transaction. If flush is 0 then no attempts to
989 * regain reservations will be made and this will fail if there is not enough
990 * space already.
991 */
992static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
993 struct btrfs_space_info *space_info,
994 u64 orig_bytes,
995 enum btrfs_reserve_flush_enum flush,
996 bool system_chunk)
997{
998 struct reserve_ticket ticket;
999 u64 used;
1000 int ret = 0;
1001 bool pending_tickets;
1002
1003 ASSERT(orig_bytes);
1004 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1005
1006 spin_lock(&space_info->lock);
1007 ret = -ENOSPC;
1008 used = btrfs_space_info_used(space_info, true);
1009 pending_tickets = !list_empty(&space_info->tickets) ||
1010 !list_empty(&space_info->priority_tickets);
1011
1012 /*
1013 * Carry on if we have enough space (short-circuit) OR call
1014 * can_overcommit() to ensure we can overcommit to continue.
1015 */
1016 if (!pending_tickets &&
1017 ((used + orig_bytes <= space_info->total_bytes) ||
1018 can_overcommit(fs_info, space_info, orig_bytes, flush,
1019 system_chunk))) {
1020 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1021 orig_bytes);
1022 ret = 0;
1023 }
1024
1025 /*
1026 * If we couldn't make a reservation then setup our reservation ticket
1027 * and kick the async worker if it's not already running.
1028 *
1029 * If we are a priority flusher then we just need to add our ticket to
1030 * the list and we will do our own flushing further down.
1031 */
1032 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1033 ticket.bytes = orig_bytes;
1034 ticket.error = 0;
1035 init_waitqueue_head(&ticket.wait);
1036 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
1037 list_add_tail(&ticket.list, &space_info->tickets);
1038 if (!space_info->flush) {
1039 space_info->flush = 1;
1040 trace_btrfs_trigger_flush(fs_info,
1041 space_info->flags,
1042 orig_bytes, flush,
1043 "enospc");
1044 queue_work(system_unbound_wq,
1045 &fs_info->async_reclaim_work);
1046 }
1047 } else {
1048 list_add_tail(&ticket.list,
1049 &space_info->priority_tickets);
1050 }
1051 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1052 used += orig_bytes;
1053 /*
1054 * We will do the space reservation dance during log replay,
1055 * which means we won't have fs_info->fs_root set, so don't do
1056 * the async reclaim as we will panic.
1057 */
1058 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1059 need_do_async_reclaim(fs_info, space_info,
1060 used, system_chunk) &&
1061 !work_busy(&fs_info->async_reclaim_work)) {
1062 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1063 orig_bytes, flush, "preempt");
1064 queue_work(system_unbound_wq,
1065 &fs_info->async_reclaim_work);
1066 }
1067 }
1068 spin_unlock(&space_info->lock);
1069 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1070 return ret;
1071
1072 return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
1073}
1074
1075/**
1076 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1077 * @root - the root we're allocating for
1078 * @block_rsv - the block_rsv we're allocating for
1079 * @orig_bytes - the number of bytes we want
1080 * @flush - whether or not we can flush to make our reservation
1081 *
1082 * This will reserve orig_bytes number of bytes from the space info associated
1083 * with the block_rsv. If there is not enough space it will make an attempt to
1084 * flush out space to make room. It will do this by flushing delalloc if
1085 * possible or committing the transaction. If flush is 0 then no attempts to
1086 * regain reservations will be made and this will fail if there is not enough
1087 * space already.
1088 */
1089int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1090 struct btrfs_block_rsv *block_rsv,
1091 u64 orig_bytes,
1092 enum btrfs_reserve_flush_enum flush)
1093{
1094 struct btrfs_fs_info *fs_info = root->fs_info;
1095 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1096 int ret;
1097 bool system_chunk = (root == fs_info->chunk_root);
1098
1099 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
1100 orig_bytes, flush, system_chunk);
1101 if (ret == -ENOSPC &&
1102 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1103 if (block_rsv != global_rsv &&
1104 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1105 ret = 0;
1106 }
1107 if (ret == -ENOSPC) {
1108 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1109 block_rsv->space_info->flags,
1110 orig_bytes, 1);
1111
1112 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1113 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1114 orig_bytes, 0);
1115 }
1116 return ret;
1117}
1// SPDX-License-Identifier: GPL-2.0
2
3#include "misc.h"
4#include "ctree.h"
5#include "space-info.h"
6#include "sysfs.h"
7#include "volumes.h"
8#include "free-space-cache.h"
9#include "ordered-data.h"
10#include "transaction.h"
11#include "block-group.h"
12
13/*
14 * HOW DOES SPACE RESERVATION WORK
15 *
16 * If you want to know about delalloc specifically, there is a separate comment
17 * for that with the delalloc code. This comment is about how the whole system
18 * works generally.
19 *
20 * BASIC CONCEPTS
21 *
22 * 1) space_info. This is the ultimate arbiter of how much space we can use.
23 * There's a description of the bytes_ fields with the struct declaration,
24 * refer to that for specifics on each field. Suffice it to say that for
25 * reservations we care about total_bytes - SUM(space_info->bytes_) when
26 * determining if there is space to make an allocation. There is a space_info
27 * for METADATA, SYSTEM, and DATA areas.
28 *
29 * 2) block_rsv's. These are basically buckets for every different type of
30 * metadata reservation we have. You can see the comment in the block_rsv
31 * code on the rules for each type, but generally block_rsv->reserved is how
32 * much space is accounted for in space_info->bytes_may_use.
33 *
34 * 3) btrfs_calc*_size. These are the worst case calculations we used based
35 * on the number of items we will want to modify. We have one for changing
36 * items, and one for inserting new items. Generally we use these helpers to
37 * determine the size of the block reserves, and then use the actual bytes
38 * values to adjust the space_info counters.
39 *
40 * MAKING RESERVATIONS, THE NORMAL CASE
41 *
42 * We call into either btrfs_reserve_data_bytes() or
43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44 * num_bytes we want to reserve.
45 *
46 * ->reserve
47 * space_info->bytes_may_reserve += num_bytes
48 *
49 * ->extent allocation
50 * Call btrfs_add_reserved_bytes() which does
51 * space_info->bytes_may_reserve -= num_bytes
52 * space_info->bytes_reserved += extent_bytes
53 *
54 * ->insert reference
55 * Call btrfs_update_block_group() which does
56 * space_info->bytes_reserved -= extent_bytes
57 * space_info->bytes_used += extent_bytes
58 *
59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
60 *
61 * Assume we are unable to simply make the reservation because we do not have
62 * enough space
63 *
64 * -> __reserve_bytes
65 * create a reserve_ticket with ->bytes set to our reservation, add it to
66 * the tail of space_info->tickets, kick async flush thread
67 *
68 * ->handle_reserve_ticket
69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
70 * on the ticket.
71 *
72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73 * Flushes various things attempting to free up space.
74 *
75 * -> btrfs_try_granting_tickets()
76 * This is called by anything that either subtracts space from
77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78 * space_info->total_bytes. This loops through the ->priority_tickets and
79 * then the ->tickets list checking to see if the reservation can be
80 * completed. If it can the space is added to space_info->bytes_may_use and
81 * the ticket is woken up.
82 *
83 * -> ticket wakeup
84 * Check if ->bytes == 0, if it does we got our reservation and we can carry
85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
86 * were interrupted.)
87 *
88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
89 *
90 * Same as the above, except we add ourselves to the
91 * space_info->priority_tickets, and we do not use ticket->wait, we simply
92 * call flush_space() ourselves for the states that are safe for us to call
93 * without deadlocking and hope for the best.
94 *
95 * THE FLUSHING STATES
96 *
97 * Generally speaking we will have two cases for each state, a "nice" state
98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
99 * reduce the locking over head on the various trees, and even to keep from
100 * doing any work at all in the case of delayed refs. Each of these delayed
101 * things however hold reservations, and so letting them run allows us to
102 * reclaim space so we can make new reservations.
103 *
104 * FLUSH_DELAYED_ITEMS
105 * Every inode has a delayed item to update the inode. Take a simple write
106 * for example, we would update the inode item at write time to update the
107 * mtime, and then again at finish_ordered_io() time in order to update the
108 * isize or bytes. We keep these delayed items to coalesce these operations
109 * into a single operation done on demand. These are an easy way to reclaim
110 * metadata space.
111 *
112 * FLUSH_DELALLOC
113 * Look at the delalloc comment to get an idea of how much space is reserved
114 * for delayed allocation. We can reclaim some of this space simply by
115 * running delalloc, but usually we need to wait for ordered extents to
116 * reclaim the bulk of this space.
117 *
118 * FLUSH_DELAYED_REFS
119 * We have a block reserve for the outstanding delayed refs space, and every
120 * delayed ref operation holds a reservation. Running these is a quick way
121 * to reclaim space, but we want to hold this until the end because COW can
122 * churn a lot and we can avoid making some extent tree modifications if we
123 * are able to delay for as long as possible.
124 *
125 * ALLOC_CHUNK
126 * We will skip this the first time through space reservation, because of
127 * overcommit and we don't want to have a lot of useless metadata space when
128 * our worst case reservations will likely never come true.
129 *
130 * RUN_DELAYED_IPUTS
131 * If we're freeing inodes we're likely freeing checksums, file extent
132 * items, and extent tree items. Loads of space could be freed up by these
133 * operations, however they won't be usable until the transaction commits.
134 *
135 * COMMIT_TRANS
136 * may_commit_transaction() is the ultimate arbiter on whether we commit the
137 * transaction or not. In order to avoid constantly churning we do all the
138 * above flushing first and then commit the transaction as the last resort.
139 * However we need to take into account things like pinned space that would
140 * be freed, plus any delayed work we may not have gotten rid of in the case
141 * of metadata.
142 *
143 * OVERCOMMIT
144 *
145 * Because we hold so many reservations for metadata we will allow you to
146 * reserve more space than is currently free in the currently allocate
147 * metadata space. This only happens with metadata, data does not allow
148 * overcommitting.
149 *
150 * You can see the current logic for when we allow overcommit in
151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
152 * is no unallocated space to be had, all reservations are kept within the
153 * free space in the allocated metadata chunks.
154 *
155 * Because of overcommitting, you generally want to use the
156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
157 * thing with or without extra unallocated space.
158 */
159
160u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
161 bool may_use_included)
162{
163 ASSERT(s_info);
164 return s_info->bytes_used + s_info->bytes_reserved +
165 s_info->bytes_pinned + s_info->bytes_readonly +
166 (may_use_included ? s_info->bytes_may_use : 0);
167}
168
169/*
170 * after adding space to the filesystem, we need to clear the full flags
171 * on all the space infos.
172 */
173void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
174{
175 struct list_head *head = &info->space_info;
176 struct btrfs_space_info *found;
177
178 rcu_read_lock();
179 list_for_each_entry_rcu(found, head, list)
180 found->full = 0;
181 rcu_read_unlock();
182}
183
184static int create_space_info(struct btrfs_fs_info *info, u64 flags)
185{
186
187 struct btrfs_space_info *space_info;
188 int i;
189 int ret;
190
191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
192 if (!space_info)
193 return -ENOMEM;
194
195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
196 GFP_KERNEL);
197 if (ret) {
198 kfree(space_info);
199 return ret;
200 }
201
202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
203 INIT_LIST_HEAD(&space_info->block_groups[i]);
204 init_rwsem(&space_info->groups_sem);
205 spin_lock_init(&space_info->lock);
206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
208 INIT_LIST_HEAD(&space_info->ro_bgs);
209 INIT_LIST_HEAD(&space_info->tickets);
210 INIT_LIST_HEAD(&space_info->priority_tickets);
211
212 ret = btrfs_sysfs_add_space_info_type(info, space_info);
213 if (ret)
214 return ret;
215
216 list_add_rcu(&space_info->list, &info->space_info);
217 if (flags & BTRFS_BLOCK_GROUP_DATA)
218 info->data_sinfo = space_info;
219
220 return ret;
221}
222
223int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
224{
225 struct btrfs_super_block *disk_super;
226 u64 features;
227 u64 flags;
228 int mixed = 0;
229 int ret;
230
231 disk_super = fs_info->super_copy;
232 if (!btrfs_super_root(disk_super))
233 return -EINVAL;
234
235 features = btrfs_super_incompat_flags(disk_super);
236 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
237 mixed = 1;
238
239 flags = BTRFS_BLOCK_GROUP_SYSTEM;
240 ret = create_space_info(fs_info, flags);
241 if (ret)
242 goto out;
243
244 if (mixed) {
245 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
246 ret = create_space_info(fs_info, flags);
247 } else {
248 flags = BTRFS_BLOCK_GROUP_METADATA;
249 ret = create_space_info(fs_info, flags);
250 if (ret)
251 goto out;
252
253 flags = BTRFS_BLOCK_GROUP_DATA;
254 ret = create_space_info(fs_info, flags);
255 }
256out:
257 return ret;
258}
259
260void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
261 u64 total_bytes, u64 bytes_used,
262 u64 bytes_readonly,
263 struct btrfs_space_info **space_info)
264{
265 struct btrfs_space_info *found;
266 int factor;
267
268 factor = btrfs_bg_type_to_factor(flags);
269
270 found = btrfs_find_space_info(info, flags);
271 ASSERT(found);
272 spin_lock(&found->lock);
273 found->total_bytes += total_bytes;
274 found->disk_total += total_bytes * factor;
275 found->bytes_used += bytes_used;
276 found->disk_used += bytes_used * factor;
277 found->bytes_readonly += bytes_readonly;
278 if (total_bytes > 0)
279 found->full = 0;
280 btrfs_try_granting_tickets(info, found);
281 spin_unlock(&found->lock);
282 *space_info = found;
283}
284
285struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
286 u64 flags)
287{
288 struct list_head *head = &info->space_info;
289 struct btrfs_space_info *found;
290
291 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
292
293 rcu_read_lock();
294 list_for_each_entry_rcu(found, head, list) {
295 if (found->flags & flags) {
296 rcu_read_unlock();
297 return found;
298 }
299 }
300 rcu_read_unlock();
301 return NULL;
302}
303
304static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
305{
306 return (global->size << 1);
307}
308
309static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
310 struct btrfs_space_info *space_info,
311 enum btrfs_reserve_flush_enum flush)
312{
313 u64 profile;
314 u64 avail;
315 int factor;
316
317 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
318 profile = btrfs_system_alloc_profile(fs_info);
319 else
320 profile = btrfs_metadata_alloc_profile(fs_info);
321
322 avail = atomic64_read(&fs_info->free_chunk_space);
323
324 /*
325 * If we have dup, raid1 or raid10 then only half of the free
326 * space is actually usable. For raid56, the space info used
327 * doesn't include the parity drive, so we don't have to
328 * change the math
329 */
330 factor = btrfs_bg_type_to_factor(profile);
331 avail = div_u64(avail, factor);
332
333 /*
334 * If we aren't flushing all things, let us overcommit up to
335 * 1/2th of the space. If we can flush, don't let us overcommit
336 * too much, let it overcommit up to 1/8 of the space.
337 */
338 if (flush == BTRFS_RESERVE_FLUSH_ALL)
339 avail >>= 3;
340 else
341 avail >>= 1;
342 return avail;
343}
344
345int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
346 struct btrfs_space_info *space_info, u64 bytes,
347 enum btrfs_reserve_flush_enum flush)
348{
349 u64 avail;
350 u64 used;
351
352 /* Don't overcommit when in mixed mode */
353 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
354 return 0;
355
356 used = btrfs_space_info_used(space_info, true);
357 avail = calc_available_free_space(fs_info, space_info, flush);
358
359 if (used + bytes < space_info->total_bytes + avail)
360 return 1;
361 return 0;
362}
363
364static void remove_ticket(struct btrfs_space_info *space_info,
365 struct reserve_ticket *ticket)
366{
367 if (!list_empty(&ticket->list)) {
368 list_del_init(&ticket->list);
369 ASSERT(space_info->reclaim_size >= ticket->bytes);
370 space_info->reclaim_size -= ticket->bytes;
371 }
372}
373
374/*
375 * This is for space we already have accounted in space_info->bytes_may_use, so
376 * basically when we're returning space from block_rsv's.
377 */
378void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
379 struct btrfs_space_info *space_info)
380{
381 struct list_head *head;
382 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
383
384 lockdep_assert_held(&space_info->lock);
385
386 head = &space_info->priority_tickets;
387again:
388 while (!list_empty(head)) {
389 struct reserve_ticket *ticket;
390 u64 used = btrfs_space_info_used(space_info, true);
391
392 ticket = list_first_entry(head, struct reserve_ticket, list);
393
394 /* Check and see if our ticket can be satisified now. */
395 if ((used + ticket->bytes <= space_info->total_bytes) ||
396 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
397 flush)) {
398 btrfs_space_info_update_bytes_may_use(fs_info,
399 space_info,
400 ticket->bytes);
401 remove_ticket(space_info, ticket);
402 ticket->bytes = 0;
403 space_info->tickets_id++;
404 wake_up(&ticket->wait);
405 } else {
406 break;
407 }
408 }
409
410 if (head == &space_info->priority_tickets) {
411 head = &space_info->tickets;
412 flush = BTRFS_RESERVE_FLUSH_ALL;
413 goto again;
414 }
415}
416
417#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
418do { \
419 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
420 spin_lock(&__rsv->lock); \
421 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
422 __rsv->size, __rsv->reserved); \
423 spin_unlock(&__rsv->lock); \
424} while (0)
425
426static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
427 struct btrfs_space_info *info)
428{
429 lockdep_assert_held(&info->lock);
430
431 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
432 info->flags,
433 info->total_bytes - btrfs_space_info_used(info, true),
434 info->full ? "" : "not ");
435 btrfs_info(fs_info,
436 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
437 info->total_bytes, info->bytes_used, info->bytes_pinned,
438 info->bytes_reserved, info->bytes_may_use,
439 info->bytes_readonly);
440
441 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
442 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
443 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
444 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
445 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
446
447}
448
449void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
450 struct btrfs_space_info *info, u64 bytes,
451 int dump_block_groups)
452{
453 struct btrfs_block_group *cache;
454 int index = 0;
455
456 spin_lock(&info->lock);
457 __btrfs_dump_space_info(fs_info, info);
458 spin_unlock(&info->lock);
459
460 if (!dump_block_groups)
461 return;
462
463 down_read(&info->groups_sem);
464again:
465 list_for_each_entry(cache, &info->block_groups[index], list) {
466 spin_lock(&cache->lock);
467 btrfs_info(fs_info,
468 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
469 cache->start, cache->length, cache->used, cache->pinned,
470 cache->reserved, cache->ro ? "[readonly]" : "");
471 spin_unlock(&cache->lock);
472 btrfs_dump_free_space(cache, bytes);
473 }
474 if (++index < BTRFS_NR_RAID_TYPES)
475 goto again;
476 up_read(&info->groups_sem);
477}
478
479static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
480 unsigned long nr_pages, int nr_items)
481{
482 struct super_block *sb = fs_info->sb;
483
484 if (down_read_trylock(&sb->s_umount)) {
485 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
486 up_read(&sb->s_umount);
487 } else {
488 /*
489 * We needn't worry the filesystem going from r/w to r/o though
490 * we don't acquire ->s_umount mutex, because the filesystem
491 * should guarantee the delalloc inodes list be empty after
492 * the filesystem is readonly(all dirty pages are written to
493 * the disk).
494 */
495 btrfs_start_delalloc_roots(fs_info, nr_items);
496 if (!current->journal_info)
497 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
498 }
499}
500
501static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
502 u64 to_reclaim)
503{
504 u64 bytes;
505 u64 nr;
506
507 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
508 nr = div64_u64(to_reclaim, bytes);
509 if (!nr)
510 nr = 1;
511 return nr;
512}
513
514#define EXTENT_SIZE_PER_ITEM SZ_256K
515
516/*
517 * shrink metadata reservation for delalloc
518 */
519static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
520 u64 orig, bool wait_ordered)
521{
522 struct btrfs_space_info *space_info;
523 struct btrfs_trans_handle *trans;
524 u64 delalloc_bytes;
525 u64 dio_bytes;
526 u64 async_pages;
527 u64 items;
528 long time_left;
529 unsigned long nr_pages;
530 int loops;
531
532 /* Calc the number of the pages we need flush for space reservation */
533 items = calc_reclaim_items_nr(fs_info, to_reclaim);
534 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
535
536 trans = (struct btrfs_trans_handle *)current->journal_info;
537 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
538
539 delalloc_bytes = percpu_counter_sum_positive(
540 &fs_info->delalloc_bytes);
541 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
542 if (delalloc_bytes == 0 && dio_bytes == 0) {
543 if (trans)
544 return;
545 if (wait_ordered)
546 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
547 return;
548 }
549
550 /*
551 * If we are doing more ordered than delalloc we need to just wait on
552 * ordered extents, otherwise we'll waste time trying to flush delalloc
553 * that likely won't give us the space back we need.
554 */
555 if (dio_bytes > delalloc_bytes)
556 wait_ordered = true;
557
558 loops = 0;
559 while ((delalloc_bytes || dio_bytes) && loops < 3) {
560 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
561
562 /*
563 * Triggers inode writeback for up to nr_pages. This will invoke
564 * ->writepages callback and trigger delalloc filling
565 * (btrfs_run_delalloc_range()).
566 */
567 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
568
569 /*
570 * We need to wait for the compressed pages to start before
571 * we continue.
572 */
573 async_pages = atomic_read(&fs_info->async_delalloc_pages);
574 if (!async_pages)
575 goto skip_async;
576
577 /*
578 * Calculate how many compressed pages we want to be written
579 * before we continue. I.e if there are more async pages than we
580 * require wait_event will wait until nr_pages are written.
581 */
582 if (async_pages <= nr_pages)
583 async_pages = 0;
584 else
585 async_pages -= nr_pages;
586
587 wait_event(fs_info->async_submit_wait,
588 atomic_read(&fs_info->async_delalloc_pages) <=
589 (int)async_pages);
590skip_async:
591 spin_lock(&space_info->lock);
592 if (list_empty(&space_info->tickets) &&
593 list_empty(&space_info->priority_tickets)) {
594 spin_unlock(&space_info->lock);
595 break;
596 }
597 spin_unlock(&space_info->lock);
598
599 loops++;
600 if (wait_ordered && !trans) {
601 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
602 } else {
603 time_left = schedule_timeout_killable(1);
604 if (time_left)
605 break;
606 }
607 delalloc_bytes = percpu_counter_sum_positive(
608 &fs_info->delalloc_bytes);
609 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
610 }
611}
612
613/**
614 * maybe_commit_transaction - possibly commit the transaction if its ok to
615 * @root - the root we're allocating for
616 * @bytes - the number of bytes we want to reserve
617 * @force - force the commit
618 *
619 * This will check to make sure that committing the transaction will actually
620 * get us somewhere and then commit the transaction if it does. Otherwise it
621 * will return -ENOSPC.
622 */
623static int may_commit_transaction(struct btrfs_fs_info *fs_info,
624 struct btrfs_space_info *space_info)
625{
626 struct reserve_ticket *ticket = NULL;
627 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
628 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
629 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
630 struct btrfs_trans_handle *trans;
631 u64 bytes_needed;
632 u64 reclaim_bytes = 0;
633 u64 cur_free_bytes = 0;
634
635 trans = (struct btrfs_trans_handle *)current->journal_info;
636 if (trans)
637 return -EAGAIN;
638
639 spin_lock(&space_info->lock);
640 cur_free_bytes = btrfs_space_info_used(space_info, true);
641 if (cur_free_bytes < space_info->total_bytes)
642 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
643 else
644 cur_free_bytes = 0;
645
646 if (!list_empty(&space_info->priority_tickets))
647 ticket = list_first_entry(&space_info->priority_tickets,
648 struct reserve_ticket, list);
649 else if (!list_empty(&space_info->tickets))
650 ticket = list_first_entry(&space_info->tickets,
651 struct reserve_ticket, list);
652 bytes_needed = (ticket) ? ticket->bytes : 0;
653
654 if (bytes_needed > cur_free_bytes)
655 bytes_needed -= cur_free_bytes;
656 else
657 bytes_needed = 0;
658 spin_unlock(&space_info->lock);
659
660 if (!bytes_needed)
661 return 0;
662
663 trans = btrfs_join_transaction(fs_info->extent_root);
664 if (IS_ERR(trans))
665 return PTR_ERR(trans);
666
667 /*
668 * See if there is enough pinned space to make this reservation, or if
669 * we have block groups that are going to be freed, allowing us to
670 * possibly do a chunk allocation the next loop through.
671 */
672 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
673 __percpu_counter_compare(&space_info->total_bytes_pinned,
674 bytes_needed,
675 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
676 goto commit;
677
678 /*
679 * See if there is some space in the delayed insertion reservation for
680 * this reservation.
681 */
682 if (space_info != delayed_rsv->space_info)
683 goto enospc;
684
685 spin_lock(&delayed_rsv->lock);
686 reclaim_bytes += delayed_rsv->reserved;
687 spin_unlock(&delayed_rsv->lock);
688
689 spin_lock(&delayed_refs_rsv->lock);
690 reclaim_bytes += delayed_refs_rsv->reserved;
691 spin_unlock(&delayed_refs_rsv->lock);
692
693 spin_lock(&trans_rsv->lock);
694 reclaim_bytes += trans_rsv->reserved;
695 spin_unlock(&trans_rsv->lock);
696
697 if (reclaim_bytes >= bytes_needed)
698 goto commit;
699 bytes_needed -= reclaim_bytes;
700
701 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
702 bytes_needed,
703 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
704 goto enospc;
705
706commit:
707 return btrfs_commit_transaction(trans);
708enospc:
709 btrfs_end_transaction(trans);
710 return -ENOSPC;
711}
712
713/*
714 * Try to flush some data based on policy set by @state. This is only advisory
715 * and may fail for various reasons. The caller is supposed to examine the
716 * state of @space_info to detect the outcome.
717 */
718static void flush_space(struct btrfs_fs_info *fs_info,
719 struct btrfs_space_info *space_info, u64 num_bytes,
720 int state)
721{
722 struct btrfs_root *root = fs_info->extent_root;
723 struct btrfs_trans_handle *trans;
724 int nr;
725 int ret = 0;
726
727 switch (state) {
728 case FLUSH_DELAYED_ITEMS_NR:
729 case FLUSH_DELAYED_ITEMS:
730 if (state == FLUSH_DELAYED_ITEMS_NR)
731 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
732 else
733 nr = -1;
734
735 trans = btrfs_join_transaction(root);
736 if (IS_ERR(trans)) {
737 ret = PTR_ERR(trans);
738 break;
739 }
740 ret = btrfs_run_delayed_items_nr(trans, nr);
741 btrfs_end_transaction(trans);
742 break;
743 case FLUSH_DELALLOC:
744 case FLUSH_DELALLOC_WAIT:
745 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
746 state == FLUSH_DELALLOC_WAIT);
747 break;
748 case FLUSH_DELAYED_REFS_NR:
749 case FLUSH_DELAYED_REFS:
750 trans = btrfs_join_transaction(root);
751 if (IS_ERR(trans)) {
752 ret = PTR_ERR(trans);
753 break;
754 }
755 if (state == FLUSH_DELAYED_REFS_NR)
756 nr = calc_reclaim_items_nr(fs_info, num_bytes);
757 else
758 nr = 0;
759 btrfs_run_delayed_refs(trans, nr);
760 btrfs_end_transaction(trans);
761 break;
762 case ALLOC_CHUNK:
763 case ALLOC_CHUNK_FORCE:
764 trans = btrfs_join_transaction(root);
765 if (IS_ERR(trans)) {
766 ret = PTR_ERR(trans);
767 break;
768 }
769 ret = btrfs_chunk_alloc(trans,
770 btrfs_metadata_alloc_profile(fs_info),
771 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
772 CHUNK_ALLOC_FORCE);
773 btrfs_end_transaction(trans);
774 if (ret > 0 || ret == -ENOSPC)
775 ret = 0;
776 break;
777 case RUN_DELAYED_IPUTS:
778 /*
779 * If we have pending delayed iputs then we could free up a
780 * bunch of pinned space, so make sure we run the iputs before
781 * we do our pinned bytes check below.
782 */
783 btrfs_run_delayed_iputs(fs_info);
784 btrfs_wait_on_delayed_iputs(fs_info);
785 break;
786 case COMMIT_TRANS:
787 ret = may_commit_transaction(fs_info, space_info);
788 break;
789 default:
790 ret = -ENOSPC;
791 break;
792 }
793
794 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
795 ret);
796 return;
797}
798
799static inline u64
800btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
801 struct btrfs_space_info *space_info)
802{
803 u64 used;
804 u64 avail;
805 u64 expected;
806 u64 to_reclaim = space_info->reclaim_size;
807
808 lockdep_assert_held(&space_info->lock);
809
810 avail = calc_available_free_space(fs_info, space_info,
811 BTRFS_RESERVE_FLUSH_ALL);
812 used = btrfs_space_info_used(space_info, true);
813
814 /*
815 * We may be flushing because suddenly we have less space than we had
816 * before, and now we're well over-committed based on our current free
817 * space. If that's the case add in our overage so we make sure to put
818 * appropriate pressure on the flushing state machine.
819 */
820 if (space_info->total_bytes + avail < used)
821 to_reclaim += used - (space_info->total_bytes + avail);
822
823 if (to_reclaim)
824 return to_reclaim;
825
826 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
827 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
828 BTRFS_RESERVE_FLUSH_ALL))
829 return 0;
830
831 used = btrfs_space_info_used(space_info, true);
832
833 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
834 BTRFS_RESERVE_FLUSH_ALL))
835 expected = div_factor_fine(space_info->total_bytes, 95);
836 else
837 expected = div_factor_fine(space_info->total_bytes, 90);
838
839 if (used > expected)
840 to_reclaim = used - expected;
841 else
842 to_reclaim = 0;
843 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
844 space_info->bytes_reserved);
845 return to_reclaim;
846}
847
848static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
849 struct btrfs_space_info *space_info,
850 u64 used)
851{
852 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
853
854 /* If we're just plain full then async reclaim just slows us down. */
855 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
856 return 0;
857
858 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
859 return 0;
860
861 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
862 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
863}
864
865static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
866 struct btrfs_space_info *space_info,
867 struct reserve_ticket *ticket)
868{
869 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
870 u64 min_bytes;
871
872 if (global_rsv->space_info != space_info)
873 return false;
874
875 spin_lock(&global_rsv->lock);
876 min_bytes = div_factor(global_rsv->size, 1);
877 if (global_rsv->reserved < min_bytes + ticket->bytes) {
878 spin_unlock(&global_rsv->lock);
879 return false;
880 }
881 global_rsv->reserved -= ticket->bytes;
882 remove_ticket(space_info, ticket);
883 ticket->bytes = 0;
884 wake_up(&ticket->wait);
885 space_info->tickets_id++;
886 if (global_rsv->reserved < global_rsv->size)
887 global_rsv->full = 0;
888 spin_unlock(&global_rsv->lock);
889
890 return true;
891}
892
893/*
894 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
895 * @fs_info - fs_info for this fs
896 * @space_info - the space info we were flushing
897 *
898 * We call this when we've exhausted our flushing ability and haven't made
899 * progress in satisfying tickets. The reservation code handles tickets in
900 * order, so if there is a large ticket first and then smaller ones we could
901 * very well satisfy the smaller tickets. This will attempt to wake up any
902 * tickets in the list to catch this case.
903 *
904 * This function returns true if it was able to make progress by clearing out
905 * other tickets, or if it stumbles across a ticket that was smaller than the
906 * first ticket.
907 */
908static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
909 struct btrfs_space_info *space_info)
910{
911 struct reserve_ticket *ticket;
912 u64 tickets_id = space_info->tickets_id;
913 u64 first_ticket_bytes = 0;
914
915 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
916 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
917 __btrfs_dump_space_info(fs_info, space_info);
918 }
919
920 while (!list_empty(&space_info->tickets) &&
921 tickets_id == space_info->tickets_id) {
922 ticket = list_first_entry(&space_info->tickets,
923 struct reserve_ticket, list);
924
925 if (ticket->steal &&
926 steal_from_global_rsv(fs_info, space_info, ticket))
927 return true;
928
929 /*
930 * may_commit_transaction will avoid committing the transaction
931 * if it doesn't feel like the space reclaimed by the commit
932 * would result in the ticket succeeding. However if we have a
933 * smaller ticket in the queue it may be small enough to be
934 * satisified by committing the transaction, so if any
935 * subsequent ticket is smaller than the first ticket go ahead
936 * and send us back for another loop through the enospc flushing
937 * code.
938 */
939 if (first_ticket_bytes == 0)
940 first_ticket_bytes = ticket->bytes;
941 else if (first_ticket_bytes > ticket->bytes)
942 return true;
943
944 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
945 btrfs_info(fs_info, "failing ticket with %llu bytes",
946 ticket->bytes);
947
948 remove_ticket(space_info, ticket);
949 ticket->error = -ENOSPC;
950 wake_up(&ticket->wait);
951
952 /*
953 * We're just throwing tickets away, so more flushing may not
954 * trip over btrfs_try_granting_tickets, so we need to call it
955 * here to see if we can make progress with the next ticket in
956 * the list.
957 */
958 btrfs_try_granting_tickets(fs_info, space_info);
959 }
960 return (tickets_id != space_info->tickets_id);
961}
962
963/*
964 * This is for normal flushers, we can wait all goddamned day if we want to. We
965 * will loop and continuously try to flush as long as we are making progress.
966 * We count progress as clearing off tickets each time we have to loop.
967 */
968static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
969{
970 struct btrfs_fs_info *fs_info;
971 struct btrfs_space_info *space_info;
972 u64 to_reclaim;
973 int flush_state;
974 int commit_cycles = 0;
975 u64 last_tickets_id;
976
977 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
978 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
979
980 spin_lock(&space_info->lock);
981 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
982 if (!to_reclaim) {
983 space_info->flush = 0;
984 spin_unlock(&space_info->lock);
985 return;
986 }
987 last_tickets_id = space_info->tickets_id;
988 spin_unlock(&space_info->lock);
989
990 flush_state = FLUSH_DELAYED_ITEMS_NR;
991 do {
992 flush_space(fs_info, space_info, to_reclaim, flush_state);
993 spin_lock(&space_info->lock);
994 if (list_empty(&space_info->tickets)) {
995 space_info->flush = 0;
996 spin_unlock(&space_info->lock);
997 return;
998 }
999 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1000 space_info);
1001 if (last_tickets_id == space_info->tickets_id) {
1002 flush_state++;
1003 } else {
1004 last_tickets_id = space_info->tickets_id;
1005 flush_state = FLUSH_DELAYED_ITEMS_NR;
1006 if (commit_cycles)
1007 commit_cycles--;
1008 }
1009
1010 /*
1011 * We don't want to force a chunk allocation until we've tried
1012 * pretty hard to reclaim space. Think of the case where we
1013 * freed up a bunch of space and so have a lot of pinned space
1014 * to reclaim. We would rather use that than possibly create a
1015 * underutilized metadata chunk. So if this is our first run
1016 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1017 * commit the transaction. If nothing has changed the next go
1018 * around then we can force a chunk allocation.
1019 */
1020 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1021 flush_state++;
1022
1023 if (flush_state > COMMIT_TRANS) {
1024 commit_cycles++;
1025 if (commit_cycles > 2) {
1026 if (maybe_fail_all_tickets(fs_info, space_info)) {
1027 flush_state = FLUSH_DELAYED_ITEMS_NR;
1028 commit_cycles--;
1029 } else {
1030 space_info->flush = 0;
1031 }
1032 } else {
1033 flush_state = FLUSH_DELAYED_ITEMS_NR;
1034 }
1035 }
1036 spin_unlock(&space_info->lock);
1037 } while (flush_state <= COMMIT_TRANS);
1038}
1039
1040void btrfs_init_async_reclaim_work(struct work_struct *work)
1041{
1042 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
1043}
1044
1045static const enum btrfs_flush_state priority_flush_states[] = {
1046 FLUSH_DELAYED_ITEMS_NR,
1047 FLUSH_DELAYED_ITEMS,
1048 ALLOC_CHUNK,
1049};
1050
1051static const enum btrfs_flush_state evict_flush_states[] = {
1052 FLUSH_DELAYED_ITEMS_NR,
1053 FLUSH_DELAYED_ITEMS,
1054 FLUSH_DELAYED_REFS_NR,
1055 FLUSH_DELAYED_REFS,
1056 FLUSH_DELALLOC,
1057 FLUSH_DELALLOC_WAIT,
1058 ALLOC_CHUNK,
1059 COMMIT_TRANS,
1060};
1061
1062static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1063 struct btrfs_space_info *space_info,
1064 struct reserve_ticket *ticket,
1065 const enum btrfs_flush_state *states,
1066 int states_nr)
1067{
1068 u64 to_reclaim;
1069 int flush_state;
1070
1071 spin_lock(&space_info->lock);
1072 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1073 if (!to_reclaim) {
1074 spin_unlock(&space_info->lock);
1075 return;
1076 }
1077 spin_unlock(&space_info->lock);
1078
1079 flush_state = 0;
1080 do {
1081 flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
1082 flush_state++;
1083 spin_lock(&space_info->lock);
1084 if (ticket->bytes == 0) {
1085 spin_unlock(&space_info->lock);
1086 return;
1087 }
1088 spin_unlock(&space_info->lock);
1089 } while (flush_state < states_nr);
1090}
1091
1092static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1093 struct btrfs_space_info *space_info,
1094 struct reserve_ticket *ticket)
1095
1096{
1097 DEFINE_WAIT(wait);
1098 int ret = 0;
1099
1100 spin_lock(&space_info->lock);
1101 while (ticket->bytes > 0 && ticket->error == 0) {
1102 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1103 if (ret) {
1104 /*
1105 * Delete us from the list. After we unlock the space
1106 * info, we don't want the async reclaim job to reserve
1107 * space for this ticket. If that would happen, then the
1108 * ticket's task would not known that space was reserved
1109 * despite getting an error, resulting in a space leak
1110 * (bytes_may_use counter of our space_info).
1111 */
1112 remove_ticket(space_info, ticket);
1113 ticket->error = -EINTR;
1114 break;
1115 }
1116 spin_unlock(&space_info->lock);
1117
1118 schedule();
1119
1120 finish_wait(&ticket->wait, &wait);
1121 spin_lock(&space_info->lock);
1122 }
1123 spin_unlock(&space_info->lock);
1124}
1125
1126/**
1127 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
1128 * @fs_info - the fs
1129 * @space_info - the space_info for the reservation
1130 * @ticket - the ticket for the reservation
1131 * @flush - how much we can flush
1132 *
1133 * This does the work of figuring out how to flush for the ticket, waiting for
1134 * the reservation, and returning the appropriate error if there is one.
1135 */
1136static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1137 struct btrfs_space_info *space_info,
1138 struct reserve_ticket *ticket,
1139 enum btrfs_reserve_flush_enum flush)
1140{
1141 int ret;
1142
1143 switch (flush) {
1144 case BTRFS_RESERVE_FLUSH_ALL:
1145 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1146 wait_reserve_ticket(fs_info, space_info, ticket);
1147 break;
1148 case BTRFS_RESERVE_FLUSH_LIMIT:
1149 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1150 priority_flush_states,
1151 ARRAY_SIZE(priority_flush_states));
1152 break;
1153 case BTRFS_RESERVE_FLUSH_EVICT:
1154 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1155 evict_flush_states,
1156 ARRAY_SIZE(evict_flush_states));
1157 break;
1158 default:
1159 ASSERT(0);
1160 break;
1161 }
1162
1163 spin_lock(&space_info->lock);
1164 ret = ticket->error;
1165 if (ticket->bytes || ticket->error) {
1166 /*
1167 * We were a priority ticket, so we need to delete ourselves
1168 * from the list. Because we could have other priority tickets
1169 * behind us that require less space, run
1170 * btrfs_try_granting_tickets() to see if their reservations can
1171 * now be made.
1172 */
1173 if (!list_empty(&ticket->list)) {
1174 remove_ticket(space_info, ticket);
1175 btrfs_try_granting_tickets(fs_info, space_info);
1176 }
1177
1178 if (!ret)
1179 ret = -ENOSPC;
1180 }
1181 spin_unlock(&space_info->lock);
1182 ASSERT(list_empty(&ticket->list));
1183 /*
1184 * Check that we can't have an error set if the reservation succeeded,
1185 * as that would confuse tasks and lead them to error out without
1186 * releasing reserved space (if an error happens the expectation is that
1187 * space wasn't reserved at all).
1188 */
1189 ASSERT(!(ticket->bytes == 0 && ticket->error));
1190 return ret;
1191}
1192
1193/*
1194 * This returns true if this flush state will go through the ordinary flushing
1195 * code.
1196 */
1197static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1198{
1199 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1200 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1201}
1202
1203/**
1204 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1205 * @root - the root we're allocating for
1206 * @space_info - the space info we want to allocate from
1207 * @orig_bytes - the number of bytes we want
1208 * @flush - whether or not we can flush to make our reservation
1209 *
1210 * This will reserve orig_bytes number of bytes from the space info associated
1211 * with the block_rsv. If there is not enough space it will make an attempt to
1212 * flush out space to make room. It will do this by flushing delalloc if
1213 * possible or committing the transaction. If flush is 0 then no attempts to
1214 * regain reservations will be made and this will fail if there is not enough
1215 * space already.
1216 */
1217static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1218 struct btrfs_space_info *space_info,
1219 u64 orig_bytes,
1220 enum btrfs_reserve_flush_enum flush)
1221{
1222 struct reserve_ticket ticket;
1223 u64 used;
1224 int ret = 0;
1225 bool pending_tickets;
1226
1227 ASSERT(orig_bytes);
1228 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1229
1230 spin_lock(&space_info->lock);
1231 ret = -ENOSPC;
1232 used = btrfs_space_info_used(space_info, true);
1233
1234 /*
1235 * We don't want NO_FLUSH allocations to jump everybody, they can
1236 * generally handle ENOSPC in a different way, so treat them the same as
1237 * normal flushers when it comes to skipping pending tickets.
1238 */
1239 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1240 pending_tickets = !list_empty(&space_info->tickets) ||
1241 !list_empty(&space_info->priority_tickets);
1242 else
1243 pending_tickets = !list_empty(&space_info->priority_tickets);
1244
1245 /*
1246 * Carry on if we have enough space (short-circuit) OR call
1247 * can_overcommit() to ensure we can overcommit to continue.
1248 */
1249 if (!pending_tickets &&
1250 ((used + orig_bytes <= space_info->total_bytes) ||
1251 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1252 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1253 orig_bytes);
1254 ret = 0;
1255 }
1256
1257 /*
1258 * If we couldn't make a reservation then setup our reservation ticket
1259 * and kick the async worker if it's not already running.
1260 *
1261 * If we are a priority flusher then we just need to add our ticket to
1262 * the list and we will do our own flushing further down.
1263 */
1264 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1265 ticket.bytes = orig_bytes;
1266 ticket.error = 0;
1267 space_info->reclaim_size += ticket.bytes;
1268 init_waitqueue_head(&ticket.wait);
1269 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1270 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1271 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
1272 list_add_tail(&ticket.list, &space_info->tickets);
1273 if (!space_info->flush) {
1274 space_info->flush = 1;
1275 trace_btrfs_trigger_flush(fs_info,
1276 space_info->flags,
1277 orig_bytes, flush,
1278 "enospc");
1279 queue_work(system_unbound_wq,
1280 &fs_info->async_reclaim_work);
1281 }
1282 } else {
1283 list_add_tail(&ticket.list,
1284 &space_info->priority_tickets);
1285 }
1286 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1287 used += orig_bytes;
1288 /*
1289 * We will do the space reservation dance during log replay,
1290 * which means we won't have fs_info->fs_root set, so don't do
1291 * the async reclaim as we will panic.
1292 */
1293 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1294 need_do_async_reclaim(fs_info, space_info, used) &&
1295 !work_busy(&fs_info->async_reclaim_work)) {
1296 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1297 orig_bytes, flush, "preempt");
1298 queue_work(system_unbound_wq,
1299 &fs_info->async_reclaim_work);
1300 }
1301 }
1302 spin_unlock(&space_info->lock);
1303 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1304 return ret;
1305
1306 return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
1307}
1308
1309/**
1310 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1311 * @root - the root we're allocating for
1312 * @block_rsv - the block_rsv we're allocating for
1313 * @orig_bytes - the number of bytes we want
1314 * @flush - whether or not we can flush to make our reservation
1315 *
1316 * This will reserve orig_bytes number of bytes from the space info associated
1317 * with the block_rsv. If there is not enough space it will make an attempt to
1318 * flush out space to make room. It will do this by flushing delalloc if
1319 * possible or committing the transaction. If flush is 0 then no attempts to
1320 * regain reservations will be made and this will fail if there is not enough
1321 * space already.
1322 */
1323int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1324 struct btrfs_block_rsv *block_rsv,
1325 u64 orig_bytes,
1326 enum btrfs_reserve_flush_enum flush)
1327{
1328 struct btrfs_fs_info *fs_info = root->fs_info;
1329 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1330 int ret;
1331
1332 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
1333 orig_bytes, flush);
1334 if (ret == -ENOSPC &&
1335 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1336 if (block_rsv != global_rsv &&
1337 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1338 ret = 0;
1339 }
1340 if (ret == -ENOSPC) {
1341 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1342 block_rsv->space_info->flags,
1343 orig_bytes, 1);
1344
1345 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1346 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1347 orig_bytes, 0);
1348 }
1349 return ret;
1350}