Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Red Hat. All rights reserved.
4 */
5
6#include <linux/pagemap.h>
7#include <linux/sched.h>
8#include <linux/sched/signal.h>
9#include <linux/slab.h>
10#include <linux/math64.h>
11#include <linux/ratelimit.h>
12#include <linux/error-injection.h>
13#include <linux/sched/mm.h>
14#include "ctree.h"
15#include "free-space-cache.h"
16#include "transaction.h"
17#include "disk-io.h"
18#include "extent_io.h"
19#include "inode-map.h"
20#include "volumes.h"
21#include "space-info.h"
22#include "delalloc-space.h"
23#include "block-group.h"
24
25#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
26#define MAX_CACHE_BYTES_PER_GIG SZ_32K
27
28struct btrfs_trim_range {
29 u64 start;
30 u64 bytes;
31 struct list_head list;
32};
33
34static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info);
36static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37 struct btrfs_free_space *info);
38static int btrfs_wait_cache_io_root(struct btrfs_root *root,
39 struct btrfs_trans_handle *trans,
40 struct btrfs_io_ctl *io_ctl,
41 struct btrfs_path *path);
42
43static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
44 struct btrfs_path *path,
45 u64 offset)
46{
47 struct btrfs_fs_info *fs_info = root->fs_info;
48 struct btrfs_key key;
49 struct btrfs_key location;
50 struct btrfs_disk_key disk_key;
51 struct btrfs_free_space_header *header;
52 struct extent_buffer *leaf;
53 struct inode *inode = NULL;
54 unsigned nofs_flag;
55 int ret;
56
57 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
58 key.offset = offset;
59 key.type = 0;
60
61 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
62 if (ret < 0)
63 return ERR_PTR(ret);
64 if (ret > 0) {
65 btrfs_release_path(path);
66 return ERR_PTR(-ENOENT);
67 }
68
69 leaf = path->nodes[0];
70 header = btrfs_item_ptr(leaf, path->slots[0],
71 struct btrfs_free_space_header);
72 btrfs_free_space_key(leaf, header, &disk_key);
73 btrfs_disk_key_to_cpu(&location, &disk_key);
74 btrfs_release_path(path);
75
76 /*
77 * We are often under a trans handle at this point, so we need to make
78 * sure NOFS is set to keep us from deadlocking.
79 */
80 nofs_flag = memalloc_nofs_save();
81 inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
82 btrfs_release_path(path);
83 memalloc_nofs_restore(nofs_flag);
84 if (IS_ERR(inode))
85 return inode;
86
87 mapping_set_gfp_mask(inode->i_mapping,
88 mapping_gfp_constraint(inode->i_mapping,
89 ~(__GFP_FS | __GFP_HIGHMEM)));
90
91 return inode;
92}
93
94struct inode *lookup_free_space_inode(
95 struct btrfs_block_group_cache *block_group,
96 struct btrfs_path *path)
97{
98 struct btrfs_fs_info *fs_info = block_group->fs_info;
99 struct inode *inode = NULL;
100 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
101
102 spin_lock(&block_group->lock);
103 if (block_group->inode)
104 inode = igrab(block_group->inode);
105 spin_unlock(&block_group->lock);
106 if (inode)
107 return inode;
108
109 inode = __lookup_free_space_inode(fs_info->tree_root, path,
110 block_group->key.objectid);
111 if (IS_ERR(inode))
112 return inode;
113
114 spin_lock(&block_group->lock);
115 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
116 btrfs_info(fs_info, "Old style space inode found, converting.");
117 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
118 BTRFS_INODE_NODATACOW;
119 block_group->disk_cache_state = BTRFS_DC_CLEAR;
120 }
121
122 if (!block_group->iref) {
123 block_group->inode = igrab(inode);
124 block_group->iref = 1;
125 }
126 spin_unlock(&block_group->lock);
127
128 return inode;
129}
130
131static int __create_free_space_inode(struct btrfs_root *root,
132 struct btrfs_trans_handle *trans,
133 struct btrfs_path *path,
134 u64 ino, u64 offset)
135{
136 struct btrfs_key key;
137 struct btrfs_disk_key disk_key;
138 struct btrfs_free_space_header *header;
139 struct btrfs_inode_item *inode_item;
140 struct extent_buffer *leaf;
141 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
142 int ret;
143
144 ret = btrfs_insert_empty_inode(trans, root, path, ino);
145 if (ret)
146 return ret;
147
148 /* We inline crc's for the free disk space cache */
149 if (ino != BTRFS_FREE_INO_OBJECTID)
150 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
151
152 leaf = path->nodes[0];
153 inode_item = btrfs_item_ptr(leaf, path->slots[0],
154 struct btrfs_inode_item);
155 btrfs_item_key(leaf, &disk_key, path->slots[0]);
156 memzero_extent_buffer(leaf, (unsigned long)inode_item,
157 sizeof(*inode_item));
158 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
159 btrfs_set_inode_size(leaf, inode_item, 0);
160 btrfs_set_inode_nbytes(leaf, inode_item, 0);
161 btrfs_set_inode_uid(leaf, inode_item, 0);
162 btrfs_set_inode_gid(leaf, inode_item, 0);
163 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
164 btrfs_set_inode_flags(leaf, inode_item, flags);
165 btrfs_set_inode_nlink(leaf, inode_item, 1);
166 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
167 btrfs_set_inode_block_group(leaf, inode_item, offset);
168 btrfs_mark_buffer_dirty(leaf);
169 btrfs_release_path(path);
170
171 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
172 key.offset = offset;
173 key.type = 0;
174 ret = btrfs_insert_empty_item(trans, root, path, &key,
175 sizeof(struct btrfs_free_space_header));
176 if (ret < 0) {
177 btrfs_release_path(path);
178 return ret;
179 }
180
181 leaf = path->nodes[0];
182 header = btrfs_item_ptr(leaf, path->slots[0],
183 struct btrfs_free_space_header);
184 memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
185 btrfs_set_free_space_key(leaf, header, &disk_key);
186 btrfs_mark_buffer_dirty(leaf);
187 btrfs_release_path(path);
188
189 return 0;
190}
191
192int create_free_space_inode(struct btrfs_trans_handle *trans,
193 struct btrfs_block_group_cache *block_group,
194 struct btrfs_path *path)
195{
196 int ret;
197 u64 ino;
198
199 ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino);
200 if (ret < 0)
201 return ret;
202
203 return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
204 ino, block_group->key.objectid);
205}
206
207int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
208 struct btrfs_block_rsv *rsv)
209{
210 u64 needed_bytes;
211 int ret;
212
213 /* 1 for slack space, 1 for updating the inode */
214 needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
215 btrfs_calc_metadata_size(fs_info, 1);
216
217 spin_lock(&rsv->lock);
218 if (rsv->reserved < needed_bytes)
219 ret = -ENOSPC;
220 else
221 ret = 0;
222 spin_unlock(&rsv->lock);
223 return ret;
224}
225
226int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
227 struct btrfs_block_group_cache *block_group,
228 struct inode *inode)
229{
230 struct btrfs_root *root = BTRFS_I(inode)->root;
231 int ret = 0;
232 bool locked = false;
233
234 if (block_group) {
235 struct btrfs_path *path = btrfs_alloc_path();
236
237 if (!path) {
238 ret = -ENOMEM;
239 goto fail;
240 }
241 locked = true;
242 mutex_lock(&trans->transaction->cache_write_mutex);
243 if (!list_empty(&block_group->io_list)) {
244 list_del_init(&block_group->io_list);
245
246 btrfs_wait_cache_io(trans, block_group, path);
247 btrfs_put_block_group(block_group);
248 }
249
250 /*
251 * now that we've truncated the cache away, its no longer
252 * setup or written
253 */
254 spin_lock(&block_group->lock);
255 block_group->disk_cache_state = BTRFS_DC_CLEAR;
256 spin_unlock(&block_group->lock);
257 btrfs_free_path(path);
258 }
259
260 btrfs_i_size_write(BTRFS_I(inode), 0);
261 truncate_pagecache(inode, 0);
262
263 /*
264 * We skip the throttling logic for free space cache inodes, so we don't
265 * need to check for -EAGAIN.
266 */
267 ret = btrfs_truncate_inode_items(trans, root, inode,
268 0, BTRFS_EXTENT_DATA_KEY);
269 if (ret)
270 goto fail;
271
272 ret = btrfs_update_inode(trans, root, inode);
273
274fail:
275 if (locked)
276 mutex_unlock(&trans->transaction->cache_write_mutex);
277 if (ret)
278 btrfs_abort_transaction(trans, ret);
279
280 return ret;
281}
282
283static void readahead_cache(struct inode *inode)
284{
285 struct file_ra_state *ra;
286 unsigned long last_index;
287
288 ra = kzalloc(sizeof(*ra), GFP_NOFS);
289 if (!ra)
290 return;
291
292 file_ra_state_init(ra, inode->i_mapping);
293 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
294
295 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
296
297 kfree(ra);
298}
299
300static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
301 int write)
302{
303 int num_pages;
304 int check_crcs = 0;
305
306 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
307
308 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
309 check_crcs = 1;
310
311 /* Make sure we can fit our crcs and generation into the first page */
312 if (write && check_crcs &&
313 (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
314 return -ENOSPC;
315
316 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
317
318 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
319 if (!io_ctl->pages)
320 return -ENOMEM;
321
322 io_ctl->num_pages = num_pages;
323 io_ctl->fs_info = btrfs_sb(inode->i_sb);
324 io_ctl->check_crcs = check_crcs;
325 io_ctl->inode = inode;
326
327 return 0;
328}
329ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
330
331static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
332{
333 kfree(io_ctl->pages);
334 io_ctl->pages = NULL;
335}
336
337static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
338{
339 if (io_ctl->cur) {
340 io_ctl->cur = NULL;
341 io_ctl->orig = NULL;
342 }
343}
344
345static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
346{
347 ASSERT(io_ctl->index < io_ctl->num_pages);
348 io_ctl->page = io_ctl->pages[io_ctl->index++];
349 io_ctl->cur = page_address(io_ctl->page);
350 io_ctl->orig = io_ctl->cur;
351 io_ctl->size = PAGE_SIZE;
352 if (clear)
353 clear_page(io_ctl->cur);
354}
355
356static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
357{
358 int i;
359
360 io_ctl_unmap_page(io_ctl);
361
362 for (i = 0; i < io_ctl->num_pages; i++) {
363 if (io_ctl->pages[i]) {
364 ClearPageChecked(io_ctl->pages[i]);
365 unlock_page(io_ctl->pages[i]);
366 put_page(io_ctl->pages[i]);
367 }
368 }
369}
370
371static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
372 int uptodate)
373{
374 struct page *page;
375 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
376 int i;
377
378 for (i = 0; i < io_ctl->num_pages; i++) {
379 page = find_or_create_page(inode->i_mapping, i, mask);
380 if (!page) {
381 io_ctl_drop_pages(io_ctl);
382 return -ENOMEM;
383 }
384 io_ctl->pages[i] = page;
385 if (uptodate && !PageUptodate(page)) {
386 btrfs_readpage(NULL, page);
387 lock_page(page);
388 if (!PageUptodate(page)) {
389 btrfs_err(BTRFS_I(inode)->root->fs_info,
390 "error reading free space cache");
391 io_ctl_drop_pages(io_ctl);
392 return -EIO;
393 }
394 }
395 }
396
397 for (i = 0; i < io_ctl->num_pages; i++) {
398 clear_page_dirty_for_io(io_ctl->pages[i]);
399 set_page_extent_mapped(io_ctl->pages[i]);
400 }
401
402 return 0;
403}
404
405static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
406{
407 __le64 *val;
408
409 io_ctl_map_page(io_ctl, 1);
410
411 /*
412 * Skip the csum areas. If we don't check crcs then we just have a
413 * 64bit chunk at the front of the first page.
414 */
415 if (io_ctl->check_crcs) {
416 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
417 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
418 } else {
419 io_ctl->cur += sizeof(u64);
420 io_ctl->size -= sizeof(u64) * 2;
421 }
422
423 val = io_ctl->cur;
424 *val = cpu_to_le64(generation);
425 io_ctl->cur += sizeof(u64);
426}
427
428static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
429{
430 __le64 *gen;
431
432 /*
433 * Skip the crc area. If we don't check crcs then we just have a 64bit
434 * chunk at the front of the first page.
435 */
436 if (io_ctl->check_crcs) {
437 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
438 io_ctl->size -= sizeof(u64) +
439 (sizeof(u32) * io_ctl->num_pages);
440 } else {
441 io_ctl->cur += sizeof(u64);
442 io_ctl->size -= sizeof(u64) * 2;
443 }
444
445 gen = io_ctl->cur;
446 if (le64_to_cpu(*gen) != generation) {
447 btrfs_err_rl(io_ctl->fs_info,
448 "space cache generation (%llu) does not match inode (%llu)",
449 *gen, generation);
450 io_ctl_unmap_page(io_ctl);
451 return -EIO;
452 }
453 io_ctl->cur += sizeof(u64);
454 return 0;
455}
456
457static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
458{
459 u32 *tmp;
460 u32 crc = ~(u32)0;
461 unsigned offset = 0;
462
463 if (!io_ctl->check_crcs) {
464 io_ctl_unmap_page(io_ctl);
465 return;
466 }
467
468 if (index == 0)
469 offset = sizeof(u32) * io_ctl->num_pages;
470
471 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
472 btrfs_crc32c_final(crc, (u8 *)&crc);
473 io_ctl_unmap_page(io_ctl);
474 tmp = page_address(io_ctl->pages[0]);
475 tmp += index;
476 *tmp = crc;
477}
478
479static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
480{
481 u32 *tmp, val;
482 u32 crc = ~(u32)0;
483 unsigned offset = 0;
484
485 if (!io_ctl->check_crcs) {
486 io_ctl_map_page(io_ctl, 0);
487 return 0;
488 }
489
490 if (index == 0)
491 offset = sizeof(u32) * io_ctl->num_pages;
492
493 tmp = page_address(io_ctl->pages[0]);
494 tmp += index;
495 val = *tmp;
496
497 io_ctl_map_page(io_ctl, 0);
498 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
499 btrfs_crc32c_final(crc, (u8 *)&crc);
500 if (val != crc) {
501 btrfs_err_rl(io_ctl->fs_info,
502 "csum mismatch on free space cache");
503 io_ctl_unmap_page(io_ctl);
504 return -EIO;
505 }
506
507 return 0;
508}
509
510static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
511 void *bitmap)
512{
513 struct btrfs_free_space_entry *entry;
514
515 if (!io_ctl->cur)
516 return -ENOSPC;
517
518 entry = io_ctl->cur;
519 entry->offset = cpu_to_le64(offset);
520 entry->bytes = cpu_to_le64(bytes);
521 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
522 BTRFS_FREE_SPACE_EXTENT;
523 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
524 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
525
526 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
527 return 0;
528
529 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
530
531 /* No more pages to map */
532 if (io_ctl->index >= io_ctl->num_pages)
533 return 0;
534
535 /* map the next page */
536 io_ctl_map_page(io_ctl, 1);
537 return 0;
538}
539
540static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
541{
542 if (!io_ctl->cur)
543 return -ENOSPC;
544
545 /*
546 * If we aren't at the start of the current page, unmap this one and
547 * map the next one if there is any left.
548 */
549 if (io_ctl->cur != io_ctl->orig) {
550 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
551 if (io_ctl->index >= io_ctl->num_pages)
552 return -ENOSPC;
553 io_ctl_map_page(io_ctl, 0);
554 }
555
556 copy_page(io_ctl->cur, bitmap);
557 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
558 if (io_ctl->index < io_ctl->num_pages)
559 io_ctl_map_page(io_ctl, 0);
560 return 0;
561}
562
563static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
564{
565 /*
566 * If we're not on the boundary we know we've modified the page and we
567 * need to crc the page.
568 */
569 if (io_ctl->cur != io_ctl->orig)
570 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
571 else
572 io_ctl_unmap_page(io_ctl);
573
574 while (io_ctl->index < io_ctl->num_pages) {
575 io_ctl_map_page(io_ctl, 1);
576 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
577 }
578}
579
580static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
581 struct btrfs_free_space *entry, u8 *type)
582{
583 struct btrfs_free_space_entry *e;
584 int ret;
585
586 if (!io_ctl->cur) {
587 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
588 if (ret)
589 return ret;
590 }
591
592 e = io_ctl->cur;
593 entry->offset = le64_to_cpu(e->offset);
594 entry->bytes = le64_to_cpu(e->bytes);
595 *type = e->type;
596 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
597 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
598
599 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
600 return 0;
601
602 io_ctl_unmap_page(io_ctl);
603
604 return 0;
605}
606
607static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
608 struct btrfs_free_space *entry)
609{
610 int ret;
611
612 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
613 if (ret)
614 return ret;
615
616 copy_page(entry->bitmap, io_ctl->cur);
617 io_ctl_unmap_page(io_ctl);
618
619 return 0;
620}
621
622/*
623 * Since we attach pinned extents after the fact we can have contiguous sections
624 * of free space that are split up in entries. This poses a problem with the
625 * tree logging stuff since it could have allocated across what appears to be 2
626 * entries since we would have merged the entries when adding the pinned extents
627 * back to the free space cache. So run through the space cache that we just
628 * loaded and merge contiguous entries. This will make the log replay stuff not
629 * blow up and it will make for nicer allocator behavior.
630 */
631static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
632{
633 struct btrfs_free_space *e, *prev = NULL;
634 struct rb_node *n;
635
636again:
637 spin_lock(&ctl->tree_lock);
638 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
639 e = rb_entry(n, struct btrfs_free_space, offset_index);
640 if (!prev)
641 goto next;
642 if (e->bitmap || prev->bitmap)
643 goto next;
644 if (prev->offset + prev->bytes == e->offset) {
645 unlink_free_space(ctl, prev);
646 unlink_free_space(ctl, e);
647 prev->bytes += e->bytes;
648 kmem_cache_free(btrfs_free_space_cachep, e);
649 link_free_space(ctl, prev);
650 prev = NULL;
651 spin_unlock(&ctl->tree_lock);
652 goto again;
653 }
654next:
655 prev = e;
656 }
657 spin_unlock(&ctl->tree_lock);
658}
659
660static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
661 struct btrfs_free_space_ctl *ctl,
662 struct btrfs_path *path, u64 offset)
663{
664 struct btrfs_fs_info *fs_info = root->fs_info;
665 struct btrfs_free_space_header *header;
666 struct extent_buffer *leaf;
667 struct btrfs_io_ctl io_ctl;
668 struct btrfs_key key;
669 struct btrfs_free_space *e, *n;
670 LIST_HEAD(bitmaps);
671 u64 num_entries;
672 u64 num_bitmaps;
673 u64 generation;
674 u8 type;
675 int ret = 0;
676
677 /* Nothing in the space cache, goodbye */
678 if (!i_size_read(inode))
679 return 0;
680
681 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
682 key.offset = offset;
683 key.type = 0;
684
685 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
686 if (ret < 0)
687 return 0;
688 else if (ret > 0) {
689 btrfs_release_path(path);
690 return 0;
691 }
692
693 ret = -1;
694
695 leaf = path->nodes[0];
696 header = btrfs_item_ptr(leaf, path->slots[0],
697 struct btrfs_free_space_header);
698 num_entries = btrfs_free_space_entries(leaf, header);
699 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
700 generation = btrfs_free_space_generation(leaf, header);
701 btrfs_release_path(path);
702
703 if (!BTRFS_I(inode)->generation) {
704 btrfs_info(fs_info,
705 "the free space cache file (%llu) is invalid, skip it",
706 offset);
707 return 0;
708 }
709
710 if (BTRFS_I(inode)->generation != generation) {
711 btrfs_err(fs_info,
712 "free space inode generation (%llu) did not match free space cache generation (%llu)",
713 BTRFS_I(inode)->generation, generation);
714 return 0;
715 }
716
717 if (!num_entries)
718 return 0;
719
720 ret = io_ctl_init(&io_ctl, inode, 0);
721 if (ret)
722 return ret;
723
724 readahead_cache(inode);
725
726 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
727 if (ret)
728 goto out;
729
730 ret = io_ctl_check_crc(&io_ctl, 0);
731 if (ret)
732 goto free_cache;
733
734 ret = io_ctl_check_generation(&io_ctl, generation);
735 if (ret)
736 goto free_cache;
737
738 while (num_entries) {
739 e = kmem_cache_zalloc(btrfs_free_space_cachep,
740 GFP_NOFS);
741 if (!e)
742 goto free_cache;
743
744 ret = io_ctl_read_entry(&io_ctl, e, &type);
745 if (ret) {
746 kmem_cache_free(btrfs_free_space_cachep, e);
747 goto free_cache;
748 }
749
750 if (!e->bytes) {
751 kmem_cache_free(btrfs_free_space_cachep, e);
752 goto free_cache;
753 }
754
755 if (type == BTRFS_FREE_SPACE_EXTENT) {
756 spin_lock(&ctl->tree_lock);
757 ret = link_free_space(ctl, e);
758 spin_unlock(&ctl->tree_lock);
759 if (ret) {
760 btrfs_err(fs_info,
761 "Duplicate entries in free space cache, dumping");
762 kmem_cache_free(btrfs_free_space_cachep, e);
763 goto free_cache;
764 }
765 } else {
766 ASSERT(num_bitmaps);
767 num_bitmaps--;
768 e->bitmap = kmem_cache_zalloc(
769 btrfs_free_space_bitmap_cachep, GFP_NOFS);
770 if (!e->bitmap) {
771 kmem_cache_free(
772 btrfs_free_space_cachep, e);
773 goto free_cache;
774 }
775 spin_lock(&ctl->tree_lock);
776 ret = link_free_space(ctl, e);
777 ctl->total_bitmaps++;
778 ctl->op->recalc_thresholds(ctl);
779 spin_unlock(&ctl->tree_lock);
780 if (ret) {
781 btrfs_err(fs_info,
782 "Duplicate entries in free space cache, dumping");
783 kmem_cache_free(btrfs_free_space_cachep, e);
784 goto free_cache;
785 }
786 list_add_tail(&e->list, &bitmaps);
787 }
788
789 num_entries--;
790 }
791
792 io_ctl_unmap_page(&io_ctl);
793
794 /*
795 * We add the bitmaps at the end of the entries in order that
796 * the bitmap entries are added to the cache.
797 */
798 list_for_each_entry_safe(e, n, &bitmaps, list) {
799 list_del_init(&e->list);
800 ret = io_ctl_read_bitmap(&io_ctl, e);
801 if (ret)
802 goto free_cache;
803 }
804
805 io_ctl_drop_pages(&io_ctl);
806 merge_space_tree(ctl);
807 ret = 1;
808out:
809 io_ctl_free(&io_ctl);
810 return ret;
811free_cache:
812 io_ctl_drop_pages(&io_ctl);
813 __btrfs_remove_free_space_cache(ctl);
814 goto out;
815}
816
817int load_free_space_cache(struct btrfs_block_group_cache *block_group)
818{
819 struct btrfs_fs_info *fs_info = block_group->fs_info;
820 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
821 struct inode *inode;
822 struct btrfs_path *path;
823 int ret = 0;
824 bool matched;
825 u64 used = btrfs_block_group_used(&block_group->item);
826
827 /*
828 * If this block group has been marked to be cleared for one reason or
829 * another then we can't trust the on disk cache, so just return.
830 */
831 spin_lock(&block_group->lock);
832 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
833 spin_unlock(&block_group->lock);
834 return 0;
835 }
836 spin_unlock(&block_group->lock);
837
838 path = btrfs_alloc_path();
839 if (!path)
840 return 0;
841 path->search_commit_root = 1;
842 path->skip_locking = 1;
843
844 /*
845 * We must pass a path with search_commit_root set to btrfs_iget in
846 * order to avoid a deadlock when allocating extents for the tree root.
847 *
848 * When we are COWing an extent buffer from the tree root, when looking
849 * for a free extent, at extent-tree.c:find_free_extent(), we can find
850 * block group without its free space cache loaded. When we find one
851 * we must load its space cache which requires reading its free space
852 * cache's inode item from the root tree. If this inode item is located
853 * in the same leaf that we started COWing before, then we end up in
854 * deadlock on the extent buffer (trying to read lock it when we
855 * previously write locked it).
856 *
857 * It's safe to read the inode item using the commit root because
858 * block groups, once loaded, stay in memory forever (until they are
859 * removed) as well as their space caches once loaded. New block groups
860 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
861 * we will never try to read their inode item while the fs is mounted.
862 */
863 inode = lookup_free_space_inode(block_group, path);
864 if (IS_ERR(inode)) {
865 btrfs_free_path(path);
866 return 0;
867 }
868
869 /* We may have converted the inode and made the cache invalid. */
870 spin_lock(&block_group->lock);
871 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
872 spin_unlock(&block_group->lock);
873 btrfs_free_path(path);
874 goto out;
875 }
876 spin_unlock(&block_group->lock);
877
878 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
879 path, block_group->key.objectid);
880 btrfs_free_path(path);
881 if (ret <= 0)
882 goto out;
883
884 spin_lock(&ctl->tree_lock);
885 matched = (ctl->free_space == (block_group->key.offset - used -
886 block_group->bytes_super));
887 spin_unlock(&ctl->tree_lock);
888
889 if (!matched) {
890 __btrfs_remove_free_space_cache(ctl);
891 btrfs_warn(fs_info,
892 "block group %llu has wrong amount of free space",
893 block_group->key.objectid);
894 ret = -1;
895 }
896out:
897 if (ret < 0) {
898 /* This cache is bogus, make sure it gets cleared */
899 spin_lock(&block_group->lock);
900 block_group->disk_cache_state = BTRFS_DC_CLEAR;
901 spin_unlock(&block_group->lock);
902 ret = 0;
903
904 btrfs_warn(fs_info,
905 "failed to load free space cache for block group %llu, rebuilding it now",
906 block_group->key.objectid);
907 }
908
909 iput(inode);
910 return ret;
911}
912
913static noinline_for_stack
914int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
915 struct btrfs_free_space_ctl *ctl,
916 struct btrfs_block_group_cache *block_group,
917 int *entries, int *bitmaps,
918 struct list_head *bitmap_list)
919{
920 int ret;
921 struct btrfs_free_cluster *cluster = NULL;
922 struct btrfs_free_cluster *cluster_locked = NULL;
923 struct rb_node *node = rb_first(&ctl->free_space_offset);
924 struct btrfs_trim_range *trim_entry;
925
926 /* Get the cluster for this block_group if it exists */
927 if (block_group && !list_empty(&block_group->cluster_list)) {
928 cluster = list_entry(block_group->cluster_list.next,
929 struct btrfs_free_cluster,
930 block_group_list);
931 }
932
933 if (!node && cluster) {
934 cluster_locked = cluster;
935 spin_lock(&cluster_locked->lock);
936 node = rb_first(&cluster->root);
937 cluster = NULL;
938 }
939
940 /* Write out the extent entries */
941 while (node) {
942 struct btrfs_free_space *e;
943
944 e = rb_entry(node, struct btrfs_free_space, offset_index);
945 *entries += 1;
946
947 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
948 e->bitmap);
949 if (ret)
950 goto fail;
951
952 if (e->bitmap) {
953 list_add_tail(&e->list, bitmap_list);
954 *bitmaps += 1;
955 }
956 node = rb_next(node);
957 if (!node && cluster) {
958 node = rb_first(&cluster->root);
959 cluster_locked = cluster;
960 spin_lock(&cluster_locked->lock);
961 cluster = NULL;
962 }
963 }
964 if (cluster_locked) {
965 spin_unlock(&cluster_locked->lock);
966 cluster_locked = NULL;
967 }
968
969 /*
970 * Make sure we don't miss any range that was removed from our rbtree
971 * because trimming is running. Otherwise after a umount+mount (or crash
972 * after committing the transaction) we would leak free space and get
973 * an inconsistent free space cache report from fsck.
974 */
975 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
976 ret = io_ctl_add_entry(io_ctl, trim_entry->start,
977 trim_entry->bytes, NULL);
978 if (ret)
979 goto fail;
980 *entries += 1;
981 }
982
983 return 0;
984fail:
985 if (cluster_locked)
986 spin_unlock(&cluster_locked->lock);
987 return -ENOSPC;
988}
989
990static noinline_for_stack int
991update_cache_item(struct btrfs_trans_handle *trans,
992 struct btrfs_root *root,
993 struct inode *inode,
994 struct btrfs_path *path, u64 offset,
995 int entries, int bitmaps)
996{
997 struct btrfs_key key;
998 struct btrfs_free_space_header *header;
999 struct extent_buffer *leaf;
1000 int ret;
1001
1002 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1003 key.offset = offset;
1004 key.type = 0;
1005
1006 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1007 if (ret < 0) {
1008 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1009 EXTENT_DELALLOC, 0, 0, NULL);
1010 goto fail;
1011 }
1012 leaf = path->nodes[0];
1013 if (ret > 0) {
1014 struct btrfs_key found_key;
1015 ASSERT(path->slots[0]);
1016 path->slots[0]--;
1017 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1018 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1019 found_key.offset != offset) {
1020 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1021 inode->i_size - 1, EXTENT_DELALLOC, 0,
1022 0, NULL);
1023 btrfs_release_path(path);
1024 goto fail;
1025 }
1026 }
1027
1028 BTRFS_I(inode)->generation = trans->transid;
1029 header = btrfs_item_ptr(leaf, path->slots[0],
1030 struct btrfs_free_space_header);
1031 btrfs_set_free_space_entries(leaf, header, entries);
1032 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1033 btrfs_set_free_space_generation(leaf, header, trans->transid);
1034 btrfs_mark_buffer_dirty(leaf);
1035 btrfs_release_path(path);
1036
1037 return 0;
1038
1039fail:
1040 return -1;
1041}
1042
1043static noinline_for_stack int write_pinned_extent_entries(
1044 struct btrfs_block_group_cache *block_group,
1045 struct btrfs_io_ctl *io_ctl,
1046 int *entries)
1047{
1048 u64 start, extent_start, extent_end, len;
1049 struct extent_io_tree *unpin = NULL;
1050 int ret;
1051
1052 if (!block_group)
1053 return 0;
1054
1055 /*
1056 * We want to add any pinned extents to our free space cache
1057 * so we don't leak the space
1058 *
1059 * We shouldn't have switched the pinned extents yet so this is the
1060 * right one
1061 */
1062 unpin = block_group->fs_info->pinned_extents;
1063
1064 start = block_group->key.objectid;
1065
1066 while (start < block_group->key.objectid + block_group->key.offset) {
1067 ret = find_first_extent_bit(unpin, start,
1068 &extent_start, &extent_end,
1069 EXTENT_DIRTY, NULL);
1070 if (ret)
1071 return 0;
1072
1073 /* This pinned extent is out of our range */
1074 if (extent_start >= block_group->key.objectid +
1075 block_group->key.offset)
1076 return 0;
1077
1078 extent_start = max(extent_start, start);
1079 extent_end = min(block_group->key.objectid +
1080 block_group->key.offset, extent_end + 1);
1081 len = extent_end - extent_start;
1082
1083 *entries += 1;
1084 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1085 if (ret)
1086 return -ENOSPC;
1087
1088 start = extent_end;
1089 }
1090
1091 return 0;
1092}
1093
1094static noinline_for_stack int
1095write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1096{
1097 struct btrfs_free_space *entry, *next;
1098 int ret;
1099
1100 /* Write out the bitmaps */
1101 list_for_each_entry_safe(entry, next, bitmap_list, list) {
1102 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1103 if (ret)
1104 return -ENOSPC;
1105 list_del_init(&entry->list);
1106 }
1107
1108 return 0;
1109}
1110
1111static int flush_dirty_cache(struct inode *inode)
1112{
1113 int ret;
1114
1115 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1116 if (ret)
1117 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1118 EXTENT_DELALLOC, 0, 0, NULL);
1119
1120 return ret;
1121}
1122
1123static void noinline_for_stack
1124cleanup_bitmap_list(struct list_head *bitmap_list)
1125{
1126 struct btrfs_free_space *entry, *next;
1127
1128 list_for_each_entry_safe(entry, next, bitmap_list, list)
1129 list_del_init(&entry->list);
1130}
1131
1132static void noinline_for_stack
1133cleanup_write_cache_enospc(struct inode *inode,
1134 struct btrfs_io_ctl *io_ctl,
1135 struct extent_state **cached_state)
1136{
1137 io_ctl_drop_pages(io_ctl);
1138 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1139 i_size_read(inode) - 1, cached_state);
1140}
1141
1142static int __btrfs_wait_cache_io(struct btrfs_root *root,
1143 struct btrfs_trans_handle *trans,
1144 struct btrfs_block_group_cache *block_group,
1145 struct btrfs_io_ctl *io_ctl,
1146 struct btrfs_path *path, u64 offset)
1147{
1148 int ret;
1149 struct inode *inode = io_ctl->inode;
1150
1151 if (!inode)
1152 return 0;
1153
1154 /* Flush the dirty pages in the cache file. */
1155 ret = flush_dirty_cache(inode);
1156 if (ret)
1157 goto out;
1158
1159 /* Update the cache item to tell everyone this cache file is valid. */
1160 ret = update_cache_item(trans, root, inode, path, offset,
1161 io_ctl->entries, io_ctl->bitmaps);
1162out:
1163 io_ctl_free(io_ctl);
1164 if (ret) {
1165 invalidate_inode_pages2(inode->i_mapping);
1166 BTRFS_I(inode)->generation = 0;
1167 if (block_group) {
1168#ifdef DEBUG
1169 btrfs_err(root->fs_info,
1170 "failed to write free space cache for block group %llu",
1171 block_group->key.objectid);
1172#endif
1173 }
1174 }
1175 btrfs_update_inode(trans, root, inode);
1176
1177 if (block_group) {
1178 /* the dirty list is protected by the dirty_bgs_lock */
1179 spin_lock(&trans->transaction->dirty_bgs_lock);
1180
1181 /* the disk_cache_state is protected by the block group lock */
1182 spin_lock(&block_group->lock);
1183
1184 /*
1185 * only mark this as written if we didn't get put back on
1186 * the dirty list while waiting for IO. Otherwise our
1187 * cache state won't be right, and we won't get written again
1188 */
1189 if (!ret && list_empty(&block_group->dirty_list))
1190 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1191 else if (ret)
1192 block_group->disk_cache_state = BTRFS_DC_ERROR;
1193
1194 spin_unlock(&block_group->lock);
1195 spin_unlock(&trans->transaction->dirty_bgs_lock);
1196 io_ctl->inode = NULL;
1197 iput(inode);
1198 }
1199
1200 return ret;
1201
1202}
1203
1204static int btrfs_wait_cache_io_root(struct btrfs_root *root,
1205 struct btrfs_trans_handle *trans,
1206 struct btrfs_io_ctl *io_ctl,
1207 struct btrfs_path *path)
1208{
1209 return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
1210}
1211
1212int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1213 struct btrfs_block_group_cache *block_group,
1214 struct btrfs_path *path)
1215{
1216 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1217 block_group, &block_group->io_ctl,
1218 path, block_group->key.objectid);
1219}
1220
1221/**
1222 * __btrfs_write_out_cache - write out cached info to an inode
1223 * @root - the root the inode belongs to
1224 * @ctl - the free space cache we are going to write out
1225 * @block_group - the block_group for this cache if it belongs to a block_group
1226 * @trans - the trans handle
1227 *
1228 * This function writes out a free space cache struct to disk for quick recovery
1229 * on mount. This will return 0 if it was successful in writing the cache out,
1230 * or an errno if it was not.
1231 */
1232static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1233 struct btrfs_free_space_ctl *ctl,
1234 struct btrfs_block_group_cache *block_group,
1235 struct btrfs_io_ctl *io_ctl,
1236 struct btrfs_trans_handle *trans)
1237{
1238 struct extent_state *cached_state = NULL;
1239 LIST_HEAD(bitmap_list);
1240 int entries = 0;
1241 int bitmaps = 0;
1242 int ret;
1243 int must_iput = 0;
1244
1245 if (!i_size_read(inode))
1246 return -EIO;
1247
1248 WARN_ON(io_ctl->pages);
1249 ret = io_ctl_init(io_ctl, inode, 1);
1250 if (ret)
1251 return ret;
1252
1253 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1254 down_write(&block_group->data_rwsem);
1255 spin_lock(&block_group->lock);
1256 if (block_group->delalloc_bytes) {
1257 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1258 spin_unlock(&block_group->lock);
1259 up_write(&block_group->data_rwsem);
1260 BTRFS_I(inode)->generation = 0;
1261 ret = 0;
1262 must_iput = 1;
1263 goto out;
1264 }
1265 spin_unlock(&block_group->lock);
1266 }
1267
1268 /* Lock all pages first so we can lock the extent safely. */
1269 ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1270 if (ret)
1271 goto out_unlock;
1272
1273 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1274 &cached_state);
1275
1276 io_ctl_set_generation(io_ctl, trans->transid);
1277
1278 mutex_lock(&ctl->cache_writeout_mutex);
1279 /* Write out the extent entries in the free space cache */
1280 spin_lock(&ctl->tree_lock);
1281 ret = write_cache_extent_entries(io_ctl, ctl,
1282 block_group, &entries, &bitmaps,
1283 &bitmap_list);
1284 if (ret)
1285 goto out_nospc_locked;
1286
1287 /*
1288 * Some spaces that are freed in the current transaction are pinned,
1289 * they will be added into free space cache after the transaction is
1290 * committed, we shouldn't lose them.
1291 *
1292 * If this changes while we are working we'll get added back to
1293 * the dirty list and redo it. No locking needed
1294 */
1295 ret = write_pinned_extent_entries(block_group, io_ctl, &entries);
1296 if (ret)
1297 goto out_nospc_locked;
1298
1299 /*
1300 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1301 * locked while doing it because a concurrent trim can be manipulating
1302 * or freeing the bitmap.
1303 */
1304 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1305 spin_unlock(&ctl->tree_lock);
1306 mutex_unlock(&ctl->cache_writeout_mutex);
1307 if (ret)
1308 goto out_nospc;
1309
1310 /* Zero out the rest of the pages just to make sure */
1311 io_ctl_zero_remaining_pages(io_ctl);
1312
1313 /* Everything is written out, now we dirty the pages in the file. */
1314 ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
1315 i_size_read(inode), &cached_state);
1316 if (ret)
1317 goto out_nospc;
1318
1319 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1320 up_write(&block_group->data_rwsem);
1321 /*
1322 * Release the pages and unlock the extent, we will flush
1323 * them out later
1324 */
1325 io_ctl_drop_pages(io_ctl);
1326
1327 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1328 i_size_read(inode) - 1, &cached_state);
1329
1330 /*
1331 * at this point the pages are under IO and we're happy,
1332 * The caller is responsible for waiting on them and updating the
1333 * the cache and the inode
1334 */
1335 io_ctl->entries = entries;
1336 io_ctl->bitmaps = bitmaps;
1337
1338 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1339 if (ret)
1340 goto out;
1341
1342 return 0;
1343
1344out:
1345 io_ctl->inode = NULL;
1346 io_ctl_free(io_ctl);
1347 if (ret) {
1348 invalidate_inode_pages2(inode->i_mapping);
1349 BTRFS_I(inode)->generation = 0;
1350 }
1351 btrfs_update_inode(trans, root, inode);
1352 if (must_iput)
1353 iput(inode);
1354 return ret;
1355
1356out_nospc_locked:
1357 cleanup_bitmap_list(&bitmap_list);
1358 spin_unlock(&ctl->tree_lock);
1359 mutex_unlock(&ctl->cache_writeout_mutex);
1360
1361out_nospc:
1362 cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1363
1364out_unlock:
1365 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1366 up_write(&block_group->data_rwsem);
1367
1368 goto out;
1369}
1370
1371int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1372 struct btrfs_block_group_cache *block_group,
1373 struct btrfs_path *path)
1374{
1375 struct btrfs_fs_info *fs_info = trans->fs_info;
1376 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1377 struct inode *inode;
1378 int ret = 0;
1379
1380 spin_lock(&block_group->lock);
1381 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1382 spin_unlock(&block_group->lock);
1383 return 0;
1384 }
1385 spin_unlock(&block_group->lock);
1386
1387 inode = lookup_free_space_inode(block_group, path);
1388 if (IS_ERR(inode))
1389 return 0;
1390
1391 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
1392 block_group, &block_group->io_ctl, trans);
1393 if (ret) {
1394#ifdef DEBUG
1395 btrfs_err(fs_info,
1396 "failed to write free space cache for block group %llu",
1397 block_group->key.objectid);
1398#endif
1399 spin_lock(&block_group->lock);
1400 block_group->disk_cache_state = BTRFS_DC_ERROR;
1401 spin_unlock(&block_group->lock);
1402
1403 block_group->io_ctl.inode = NULL;
1404 iput(inode);
1405 }
1406
1407 /*
1408 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1409 * to wait for IO and put the inode
1410 */
1411
1412 return ret;
1413}
1414
1415static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1416 u64 offset)
1417{
1418 ASSERT(offset >= bitmap_start);
1419 offset -= bitmap_start;
1420 return (unsigned long)(div_u64(offset, unit));
1421}
1422
1423static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1424{
1425 return (unsigned long)(div_u64(bytes, unit));
1426}
1427
1428static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1429 u64 offset)
1430{
1431 u64 bitmap_start;
1432 u64 bytes_per_bitmap;
1433
1434 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1435 bitmap_start = offset - ctl->start;
1436 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1437 bitmap_start *= bytes_per_bitmap;
1438 bitmap_start += ctl->start;
1439
1440 return bitmap_start;
1441}
1442
1443static int tree_insert_offset(struct rb_root *root, u64 offset,
1444 struct rb_node *node, int bitmap)
1445{
1446 struct rb_node **p = &root->rb_node;
1447 struct rb_node *parent = NULL;
1448 struct btrfs_free_space *info;
1449
1450 while (*p) {
1451 parent = *p;
1452 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1453
1454 if (offset < info->offset) {
1455 p = &(*p)->rb_left;
1456 } else if (offset > info->offset) {
1457 p = &(*p)->rb_right;
1458 } else {
1459 /*
1460 * we could have a bitmap entry and an extent entry
1461 * share the same offset. If this is the case, we want
1462 * the extent entry to always be found first if we do a
1463 * linear search through the tree, since we want to have
1464 * the quickest allocation time, and allocating from an
1465 * extent is faster than allocating from a bitmap. So
1466 * if we're inserting a bitmap and we find an entry at
1467 * this offset, we want to go right, or after this entry
1468 * logically. If we are inserting an extent and we've
1469 * found a bitmap, we want to go left, or before
1470 * logically.
1471 */
1472 if (bitmap) {
1473 if (info->bitmap) {
1474 WARN_ON_ONCE(1);
1475 return -EEXIST;
1476 }
1477 p = &(*p)->rb_right;
1478 } else {
1479 if (!info->bitmap) {
1480 WARN_ON_ONCE(1);
1481 return -EEXIST;
1482 }
1483 p = &(*p)->rb_left;
1484 }
1485 }
1486 }
1487
1488 rb_link_node(node, parent, p);
1489 rb_insert_color(node, root);
1490
1491 return 0;
1492}
1493
1494/*
1495 * searches the tree for the given offset.
1496 *
1497 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1498 * want a section that has at least bytes size and comes at or after the given
1499 * offset.
1500 */
1501static struct btrfs_free_space *
1502tree_search_offset(struct btrfs_free_space_ctl *ctl,
1503 u64 offset, int bitmap_only, int fuzzy)
1504{
1505 struct rb_node *n = ctl->free_space_offset.rb_node;
1506 struct btrfs_free_space *entry, *prev = NULL;
1507
1508 /* find entry that is closest to the 'offset' */
1509 while (1) {
1510 if (!n) {
1511 entry = NULL;
1512 break;
1513 }
1514
1515 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1516 prev = entry;
1517
1518 if (offset < entry->offset)
1519 n = n->rb_left;
1520 else if (offset > entry->offset)
1521 n = n->rb_right;
1522 else
1523 break;
1524 }
1525
1526 if (bitmap_only) {
1527 if (!entry)
1528 return NULL;
1529 if (entry->bitmap)
1530 return entry;
1531
1532 /*
1533 * bitmap entry and extent entry may share same offset,
1534 * in that case, bitmap entry comes after extent entry.
1535 */
1536 n = rb_next(n);
1537 if (!n)
1538 return NULL;
1539 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1540 if (entry->offset != offset)
1541 return NULL;
1542
1543 WARN_ON(!entry->bitmap);
1544 return entry;
1545 } else if (entry) {
1546 if (entry->bitmap) {
1547 /*
1548 * if previous extent entry covers the offset,
1549 * we should return it instead of the bitmap entry
1550 */
1551 n = rb_prev(&entry->offset_index);
1552 if (n) {
1553 prev = rb_entry(n, struct btrfs_free_space,
1554 offset_index);
1555 if (!prev->bitmap &&
1556 prev->offset + prev->bytes > offset)
1557 entry = prev;
1558 }
1559 }
1560 return entry;
1561 }
1562
1563 if (!prev)
1564 return NULL;
1565
1566 /* find last entry before the 'offset' */
1567 entry = prev;
1568 if (entry->offset > offset) {
1569 n = rb_prev(&entry->offset_index);
1570 if (n) {
1571 entry = rb_entry(n, struct btrfs_free_space,
1572 offset_index);
1573 ASSERT(entry->offset <= offset);
1574 } else {
1575 if (fuzzy)
1576 return entry;
1577 else
1578 return NULL;
1579 }
1580 }
1581
1582 if (entry->bitmap) {
1583 n = rb_prev(&entry->offset_index);
1584 if (n) {
1585 prev = rb_entry(n, struct btrfs_free_space,
1586 offset_index);
1587 if (!prev->bitmap &&
1588 prev->offset + prev->bytes > offset)
1589 return prev;
1590 }
1591 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1592 return entry;
1593 } else if (entry->offset + entry->bytes > offset)
1594 return entry;
1595
1596 if (!fuzzy)
1597 return NULL;
1598
1599 while (1) {
1600 if (entry->bitmap) {
1601 if (entry->offset + BITS_PER_BITMAP *
1602 ctl->unit > offset)
1603 break;
1604 } else {
1605 if (entry->offset + entry->bytes > offset)
1606 break;
1607 }
1608
1609 n = rb_next(&entry->offset_index);
1610 if (!n)
1611 return NULL;
1612 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1613 }
1614 return entry;
1615}
1616
1617static inline void
1618__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1619 struct btrfs_free_space *info)
1620{
1621 rb_erase(&info->offset_index, &ctl->free_space_offset);
1622 ctl->free_extents--;
1623}
1624
1625static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1626 struct btrfs_free_space *info)
1627{
1628 __unlink_free_space(ctl, info);
1629 ctl->free_space -= info->bytes;
1630}
1631
1632static int link_free_space(struct btrfs_free_space_ctl *ctl,
1633 struct btrfs_free_space *info)
1634{
1635 int ret = 0;
1636
1637 ASSERT(info->bytes || info->bitmap);
1638 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1639 &info->offset_index, (info->bitmap != NULL));
1640 if (ret)
1641 return ret;
1642
1643 ctl->free_space += info->bytes;
1644 ctl->free_extents++;
1645 return ret;
1646}
1647
1648static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1649{
1650 struct btrfs_block_group_cache *block_group = ctl->private;
1651 u64 max_bytes;
1652 u64 bitmap_bytes;
1653 u64 extent_bytes;
1654 u64 size = block_group->key.offset;
1655 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1656 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1657
1658 max_bitmaps = max_t(u64, max_bitmaps, 1);
1659
1660 ASSERT(ctl->total_bitmaps <= max_bitmaps);
1661
1662 /*
1663 * The goal is to keep the total amount of memory used per 1gb of space
1664 * at or below 32k, so we need to adjust how much memory we allow to be
1665 * used by extent based free space tracking
1666 */
1667 if (size < SZ_1G)
1668 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1669 else
1670 max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1671
1672 /*
1673 * we want to account for 1 more bitmap than what we have so we can make
1674 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1675 * we add more bitmaps.
1676 */
1677 bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1678
1679 if (bitmap_bytes >= max_bytes) {
1680 ctl->extents_thresh = 0;
1681 return;
1682 }
1683
1684 /*
1685 * we want the extent entry threshold to always be at most 1/2 the max
1686 * bytes we can have, or whatever is less than that.
1687 */
1688 extent_bytes = max_bytes - bitmap_bytes;
1689 extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1690
1691 ctl->extents_thresh =
1692 div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1693}
1694
1695static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1696 struct btrfs_free_space *info,
1697 u64 offset, u64 bytes)
1698{
1699 unsigned long start, count;
1700
1701 start = offset_to_bit(info->offset, ctl->unit, offset);
1702 count = bytes_to_bits(bytes, ctl->unit);
1703 ASSERT(start + count <= BITS_PER_BITMAP);
1704
1705 bitmap_clear(info->bitmap, start, count);
1706
1707 info->bytes -= bytes;
1708 if (info->max_extent_size > ctl->unit)
1709 info->max_extent_size = 0;
1710}
1711
1712static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1713 struct btrfs_free_space *info, u64 offset,
1714 u64 bytes)
1715{
1716 __bitmap_clear_bits(ctl, info, offset, bytes);
1717 ctl->free_space -= bytes;
1718}
1719
1720static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1721 struct btrfs_free_space *info, u64 offset,
1722 u64 bytes)
1723{
1724 unsigned long start, count;
1725
1726 start = offset_to_bit(info->offset, ctl->unit, offset);
1727 count = bytes_to_bits(bytes, ctl->unit);
1728 ASSERT(start + count <= BITS_PER_BITMAP);
1729
1730 bitmap_set(info->bitmap, start, count);
1731
1732 info->bytes += bytes;
1733 ctl->free_space += bytes;
1734}
1735
1736/*
1737 * If we can not find suitable extent, we will use bytes to record
1738 * the size of the max extent.
1739 */
1740static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1741 struct btrfs_free_space *bitmap_info, u64 *offset,
1742 u64 *bytes, bool for_alloc)
1743{
1744 unsigned long found_bits = 0;
1745 unsigned long max_bits = 0;
1746 unsigned long bits, i;
1747 unsigned long next_zero;
1748 unsigned long extent_bits;
1749
1750 /*
1751 * Skip searching the bitmap if we don't have a contiguous section that
1752 * is large enough for this allocation.
1753 */
1754 if (for_alloc &&
1755 bitmap_info->max_extent_size &&
1756 bitmap_info->max_extent_size < *bytes) {
1757 *bytes = bitmap_info->max_extent_size;
1758 return -1;
1759 }
1760
1761 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1762 max_t(u64, *offset, bitmap_info->offset));
1763 bits = bytes_to_bits(*bytes, ctl->unit);
1764
1765 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1766 if (for_alloc && bits == 1) {
1767 found_bits = 1;
1768 break;
1769 }
1770 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1771 BITS_PER_BITMAP, i);
1772 extent_bits = next_zero - i;
1773 if (extent_bits >= bits) {
1774 found_bits = extent_bits;
1775 break;
1776 } else if (extent_bits > max_bits) {
1777 max_bits = extent_bits;
1778 }
1779 i = next_zero;
1780 }
1781
1782 if (found_bits) {
1783 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1784 *bytes = (u64)(found_bits) * ctl->unit;
1785 return 0;
1786 }
1787
1788 *bytes = (u64)(max_bits) * ctl->unit;
1789 bitmap_info->max_extent_size = *bytes;
1790 return -1;
1791}
1792
1793static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
1794{
1795 if (entry->bitmap)
1796 return entry->max_extent_size;
1797 return entry->bytes;
1798}
1799
1800/* Cache the size of the max extent in bytes */
1801static struct btrfs_free_space *
1802find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1803 unsigned long align, u64 *max_extent_size)
1804{
1805 struct btrfs_free_space *entry;
1806 struct rb_node *node;
1807 u64 tmp;
1808 u64 align_off;
1809 int ret;
1810
1811 if (!ctl->free_space_offset.rb_node)
1812 goto out;
1813
1814 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1815 if (!entry)
1816 goto out;
1817
1818 for (node = &entry->offset_index; node; node = rb_next(node)) {
1819 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1820 if (entry->bytes < *bytes) {
1821 *max_extent_size = max(get_max_extent_size(entry),
1822 *max_extent_size);
1823 continue;
1824 }
1825
1826 /* make sure the space returned is big enough
1827 * to match our requested alignment
1828 */
1829 if (*bytes >= align) {
1830 tmp = entry->offset - ctl->start + align - 1;
1831 tmp = div64_u64(tmp, align);
1832 tmp = tmp * align + ctl->start;
1833 align_off = tmp - entry->offset;
1834 } else {
1835 align_off = 0;
1836 tmp = entry->offset;
1837 }
1838
1839 if (entry->bytes < *bytes + align_off) {
1840 *max_extent_size = max(get_max_extent_size(entry),
1841 *max_extent_size);
1842 continue;
1843 }
1844
1845 if (entry->bitmap) {
1846 u64 size = *bytes;
1847
1848 ret = search_bitmap(ctl, entry, &tmp, &size, true);
1849 if (!ret) {
1850 *offset = tmp;
1851 *bytes = size;
1852 return entry;
1853 } else {
1854 *max_extent_size =
1855 max(get_max_extent_size(entry),
1856 *max_extent_size);
1857 }
1858 continue;
1859 }
1860
1861 *offset = tmp;
1862 *bytes = entry->bytes - align_off;
1863 return entry;
1864 }
1865out:
1866 return NULL;
1867}
1868
1869static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1870 struct btrfs_free_space *info, u64 offset)
1871{
1872 info->offset = offset_to_bitmap(ctl, offset);
1873 info->bytes = 0;
1874 INIT_LIST_HEAD(&info->list);
1875 link_free_space(ctl, info);
1876 ctl->total_bitmaps++;
1877
1878 ctl->op->recalc_thresholds(ctl);
1879}
1880
1881static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1882 struct btrfs_free_space *bitmap_info)
1883{
1884 unlink_free_space(ctl, bitmap_info);
1885 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
1886 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1887 ctl->total_bitmaps--;
1888 ctl->op->recalc_thresholds(ctl);
1889}
1890
1891static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1892 struct btrfs_free_space *bitmap_info,
1893 u64 *offset, u64 *bytes)
1894{
1895 u64 end;
1896 u64 search_start, search_bytes;
1897 int ret;
1898
1899again:
1900 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1901
1902 /*
1903 * We need to search for bits in this bitmap. We could only cover some
1904 * of the extent in this bitmap thanks to how we add space, so we need
1905 * to search for as much as it as we can and clear that amount, and then
1906 * go searching for the next bit.
1907 */
1908 search_start = *offset;
1909 search_bytes = ctl->unit;
1910 search_bytes = min(search_bytes, end - search_start + 1);
1911 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
1912 false);
1913 if (ret < 0 || search_start != *offset)
1914 return -EINVAL;
1915
1916 /* We may have found more bits than what we need */
1917 search_bytes = min(search_bytes, *bytes);
1918
1919 /* Cannot clear past the end of the bitmap */
1920 search_bytes = min(search_bytes, end - search_start + 1);
1921
1922 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1923 *offset += search_bytes;
1924 *bytes -= search_bytes;
1925
1926 if (*bytes) {
1927 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1928 if (!bitmap_info->bytes)
1929 free_bitmap(ctl, bitmap_info);
1930
1931 /*
1932 * no entry after this bitmap, but we still have bytes to
1933 * remove, so something has gone wrong.
1934 */
1935 if (!next)
1936 return -EINVAL;
1937
1938 bitmap_info = rb_entry(next, struct btrfs_free_space,
1939 offset_index);
1940
1941 /*
1942 * if the next entry isn't a bitmap we need to return to let the
1943 * extent stuff do its work.
1944 */
1945 if (!bitmap_info->bitmap)
1946 return -EAGAIN;
1947
1948 /*
1949 * Ok the next item is a bitmap, but it may not actually hold
1950 * the information for the rest of this free space stuff, so
1951 * look for it, and if we don't find it return so we can try
1952 * everything over again.
1953 */
1954 search_start = *offset;
1955 search_bytes = ctl->unit;
1956 ret = search_bitmap(ctl, bitmap_info, &search_start,
1957 &search_bytes, false);
1958 if (ret < 0 || search_start != *offset)
1959 return -EAGAIN;
1960
1961 goto again;
1962 } else if (!bitmap_info->bytes)
1963 free_bitmap(ctl, bitmap_info);
1964
1965 return 0;
1966}
1967
1968static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1969 struct btrfs_free_space *info, u64 offset,
1970 u64 bytes)
1971{
1972 u64 bytes_to_set = 0;
1973 u64 end;
1974
1975 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1976
1977 bytes_to_set = min(end - offset, bytes);
1978
1979 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1980
1981 /*
1982 * We set some bytes, we have no idea what the max extent size is
1983 * anymore.
1984 */
1985 info->max_extent_size = 0;
1986
1987 return bytes_to_set;
1988
1989}
1990
1991static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1992 struct btrfs_free_space *info)
1993{
1994 struct btrfs_block_group_cache *block_group = ctl->private;
1995 struct btrfs_fs_info *fs_info = block_group->fs_info;
1996 bool forced = false;
1997
1998#ifdef CONFIG_BTRFS_DEBUG
1999 if (btrfs_should_fragment_free_space(block_group))
2000 forced = true;
2001#endif
2002
2003 /*
2004 * If we are below the extents threshold then we can add this as an
2005 * extent, and don't have to deal with the bitmap
2006 */
2007 if (!forced && ctl->free_extents < ctl->extents_thresh) {
2008 /*
2009 * If this block group has some small extents we don't want to
2010 * use up all of our free slots in the cache with them, we want
2011 * to reserve them to larger extents, however if we have plenty
2012 * of cache left then go ahead an dadd them, no sense in adding
2013 * the overhead of a bitmap if we don't have to.
2014 */
2015 if (info->bytes <= fs_info->sectorsize * 4) {
2016 if (ctl->free_extents * 2 <= ctl->extents_thresh)
2017 return false;
2018 } else {
2019 return false;
2020 }
2021 }
2022
2023 /*
2024 * The original block groups from mkfs can be really small, like 8
2025 * megabytes, so don't bother with a bitmap for those entries. However
2026 * some block groups can be smaller than what a bitmap would cover but
2027 * are still large enough that they could overflow the 32k memory limit,
2028 * so allow those block groups to still be allowed to have a bitmap
2029 * entry.
2030 */
2031 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
2032 return false;
2033
2034 return true;
2035}
2036
2037static const struct btrfs_free_space_op free_space_op = {
2038 .recalc_thresholds = recalculate_thresholds,
2039 .use_bitmap = use_bitmap,
2040};
2041
2042static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2043 struct btrfs_free_space *info)
2044{
2045 struct btrfs_free_space *bitmap_info;
2046 struct btrfs_block_group_cache *block_group = NULL;
2047 int added = 0;
2048 u64 bytes, offset, bytes_added;
2049 int ret;
2050
2051 bytes = info->bytes;
2052 offset = info->offset;
2053
2054 if (!ctl->op->use_bitmap(ctl, info))
2055 return 0;
2056
2057 if (ctl->op == &free_space_op)
2058 block_group = ctl->private;
2059again:
2060 /*
2061 * Since we link bitmaps right into the cluster we need to see if we
2062 * have a cluster here, and if so and it has our bitmap we need to add
2063 * the free space to that bitmap.
2064 */
2065 if (block_group && !list_empty(&block_group->cluster_list)) {
2066 struct btrfs_free_cluster *cluster;
2067 struct rb_node *node;
2068 struct btrfs_free_space *entry;
2069
2070 cluster = list_entry(block_group->cluster_list.next,
2071 struct btrfs_free_cluster,
2072 block_group_list);
2073 spin_lock(&cluster->lock);
2074 node = rb_first(&cluster->root);
2075 if (!node) {
2076 spin_unlock(&cluster->lock);
2077 goto no_cluster_bitmap;
2078 }
2079
2080 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2081 if (!entry->bitmap) {
2082 spin_unlock(&cluster->lock);
2083 goto no_cluster_bitmap;
2084 }
2085
2086 if (entry->offset == offset_to_bitmap(ctl, offset)) {
2087 bytes_added = add_bytes_to_bitmap(ctl, entry,
2088 offset, bytes);
2089 bytes -= bytes_added;
2090 offset += bytes_added;
2091 }
2092 spin_unlock(&cluster->lock);
2093 if (!bytes) {
2094 ret = 1;
2095 goto out;
2096 }
2097 }
2098
2099no_cluster_bitmap:
2100 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2101 1, 0);
2102 if (!bitmap_info) {
2103 ASSERT(added == 0);
2104 goto new_bitmap;
2105 }
2106
2107 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
2108 bytes -= bytes_added;
2109 offset += bytes_added;
2110 added = 0;
2111
2112 if (!bytes) {
2113 ret = 1;
2114 goto out;
2115 } else
2116 goto again;
2117
2118new_bitmap:
2119 if (info && info->bitmap) {
2120 add_new_bitmap(ctl, info, offset);
2121 added = 1;
2122 info = NULL;
2123 goto again;
2124 } else {
2125 spin_unlock(&ctl->tree_lock);
2126
2127 /* no pre-allocated info, allocate a new one */
2128 if (!info) {
2129 info = kmem_cache_zalloc(btrfs_free_space_cachep,
2130 GFP_NOFS);
2131 if (!info) {
2132 spin_lock(&ctl->tree_lock);
2133 ret = -ENOMEM;
2134 goto out;
2135 }
2136 }
2137
2138 /* allocate the bitmap */
2139 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2140 GFP_NOFS);
2141 spin_lock(&ctl->tree_lock);
2142 if (!info->bitmap) {
2143 ret = -ENOMEM;
2144 goto out;
2145 }
2146 goto again;
2147 }
2148
2149out:
2150 if (info) {
2151 if (info->bitmap)
2152 kmem_cache_free(btrfs_free_space_bitmap_cachep,
2153 info->bitmap);
2154 kmem_cache_free(btrfs_free_space_cachep, info);
2155 }
2156
2157 return ret;
2158}
2159
2160static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2161 struct btrfs_free_space *info, bool update_stat)
2162{
2163 struct btrfs_free_space *left_info;
2164 struct btrfs_free_space *right_info;
2165 bool merged = false;
2166 u64 offset = info->offset;
2167 u64 bytes = info->bytes;
2168
2169 /*
2170 * first we want to see if there is free space adjacent to the range we
2171 * are adding, if there is remove that struct and add a new one to
2172 * cover the entire range
2173 */
2174 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2175 if (right_info && rb_prev(&right_info->offset_index))
2176 left_info = rb_entry(rb_prev(&right_info->offset_index),
2177 struct btrfs_free_space, offset_index);
2178 else
2179 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2180
2181 if (right_info && !right_info->bitmap) {
2182 if (update_stat)
2183 unlink_free_space(ctl, right_info);
2184 else
2185 __unlink_free_space(ctl, right_info);
2186 info->bytes += right_info->bytes;
2187 kmem_cache_free(btrfs_free_space_cachep, right_info);
2188 merged = true;
2189 }
2190
2191 if (left_info && !left_info->bitmap &&
2192 left_info->offset + left_info->bytes == offset) {
2193 if (update_stat)
2194 unlink_free_space(ctl, left_info);
2195 else
2196 __unlink_free_space(ctl, left_info);
2197 info->offset = left_info->offset;
2198 info->bytes += left_info->bytes;
2199 kmem_cache_free(btrfs_free_space_cachep, left_info);
2200 merged = true;
2201 }
2202
2203 return merged;
2204}
2205
2206static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2207 struct btrfs_free_space *info,
2208 bool update_stat)
2209{
2210 struct btrfs_free_space *bitmap;
2211 unsigned long i;
2212 unsigned long j;
2213 const u64 end = info->offset + info->bytes;
2214 const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2215 u64 bytes;
2216
2217 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2218 if (!bitmap)
2219 return false;
2220
2221 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2222 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2223 if (j == i)
2224 return false;
2225 bytes = (j - i) * ctl->unit;
2226 info->bytes += bytes;
2227
2228 if (update_stat)
2229 bitmap_clear_bits(ctl, bitmap, end, bytes);
2230 else
2231 __bitmap_clear_bits(ctl, bitmap, end, bytes);
2232
2233 if (!bitmap->bytes)
2234 free_bitmap(ctl, bitmap);
2235
2236 return true;
2237}
2238
2239static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2240 struct btrfs_free_space *info,
2241 bool update_stat)
2242{
2243 struct btrfs_free_space *bitmap;
2244 u64 bitmap_offset;
2245 unsigned long i;
2246 unsigned long j;
2247 unsigned long prev_j;
2248 u64 bytes;
2249
2250 bitmap_offset = offset_to_bitmap(ctl, info->offset);
2251 /* If we're on a boundary, try the previous logical bitmap. */
2252 if (bitmap_offset == info->offset) {
2253 if (info->offset == 0)
2254 return false;
2255 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2256 }
2257
2258 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2259 if (!bitmap)
2260 return false;
2261
2262 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2263 j = 0;
2264 prev_j = (unsigned long)-1;
2265 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2266 if (j > i)
2267 break;
2268 prev_j = j;
2269 }
2270 if (prev_j == i)
2271 return false;
2272
2273 if (prev_j == (unsigned long)-1)
2274 bytes = (i + 1) * ctl->unit;
2275 else
2276 bytes = (i - prev_j) * ctl->unit;
2277
2278 info->offset -= bytes;
2279 info->bytes += bytes;
2280
2281 if (update_stat)
2282 bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2283 else
2284 __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2285
2286 if (!bitmap->bytes)
2287 free_bitmap(ctl, bitmap);
2288
2289 return true;
2290}
2291
2292/*
2293 * We prefer always to allocate from extent entries, both for clustered and
2294 * non-clustered allocation requests. So when attempting to add a new extent
2295 * entry, try to see if there's adjacent free space in bitmap entries, and if
2296 * there is, migrate that space from the bitmaps to the extent.
2297 * Like this we get better chances of satisfying space allocation requests
2298 * because we attempt to satisfy them based on a single cache entry, and never
2299 * on 2 or more entries - even if the entries represent a contiguous free space
2300 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2301 * ends).
2302 */
2303static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2304 struct btrfs_free_space *info,
2305 bool update_stat)
2306{
2307 /*
2308 * Only work with disconnected entries, as we can change their offset,
2309 * and must be extent entries.
2310 */
2311 ASSERT(!info->bitmap);
2312 ASSERT(RB_EMPTY_NODE(&info->offset_index));
2313
2314 if (ctl->total_bitmaps > 0) {
2315 bool stole_end;
2316 bool stole_front = false;
2317
2318 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2319 if (ctl->total_bitmaps > 0)
2320 stole_front = steal_from_bitmap_to_front(ctl, info,
2321 update_stat);
2322
2323 if (stole_end || stole_front)
2324 try_merge_free_space(ctl, info, update_stat);
2325 }
2326}
2327
2328int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
2329 struct btrfs_free_space_ctl *ctl,
2330 u64 offset, u64 bytes)
2331{
2332 struct btrfs_free_space *info;
2333 int ret = 0;
2334
2335 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2336 if (!info)
2337 return -ENOMEM;
2338
2339 info->offset = offset;
2340 info->bytes = bytes;
2341 RB_CLEAR_NODE(&info->offset_index);
2342
2343 spin_lock(&ctl->tree_lock);
2344
2345 if (try_merge_free_space(ctl, info, true))
2346 goto link;
2347
2348 /*
2349 * There was no extent directly to the left or right of this new
2350 * extent then we know we're going to have to allocate a new extent, so
2351 * before we do that see if we need to drop this into a bitmap
2352 */
2353 ret = insert_into_bitmap(ctl, info);
2354 if (ret < 0) {
2355 goto out;
2356 } else if (ret) {
2357 ret = 0;
2358 goto out;
2359 }
2360link:
2361 /*
2362 * Only steal free space from adjacent bitmaps if we're sure we're not
2363 * going to add the new free space to existing bitmap entries - because
2364 * that would mean unnecessary work that would be reverted. Therefore
2365 * attempt to steal space from bitmaps if we're adding an extent entry.
2366 */
2367 steal_from_bitmap(ctl, info, true);
2368
2369 ret = link_free_space(ctl, info);
2370 if (ret)
2371 kmem_cache_free(btrfs_free_space_cachep, info);
2372out:
2373 spin_unlock(&ctl->tree_lock);
2374
2375 if (ret) {
2376 btrfs_crit(fs_info, "unable to add free space :%d", ret);
2377 ASSERT(ret != -EEXIST);
2378 }
2379
2380 return ret;
2381}
2382
2383int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
2384 u64 bytenr, u64 size)
2385{
2386 return __btrfs_add_free_space(block_group->fs_info,
2387 block_group->free_space_ctl,
2388 bytenr, size);
2389}
2390
2391int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
2392 u64 offset, u64 bytes)
2393{
2394 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2395 struct btrfs_free_space *info;
2396 int ret;
2397 bool re_search = false;
2398
2399 spin_lock(&ctl->tree_lock);
2400
2401again:
2402 ret = 0;
2403 if (!bytes)
2404 goto out_lock;
2405
2406 info = tree_search_offset(ctl, offset, 0, 0);
2407 if (!info) {
2408 /*
2409 * oops didn't find an extent that matched the space we wanted
2410 * to remove, look for a bitmap instead
2411 */
2412 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2413 1, 0);
2414 if (!info) {
2415 /*
2416 * If we found a partial bit of our free space in a
2417 * bitmap but then couldn't find the other part this may
2418 * be a problem, so WARN about it.
2419 */
2420 WARN_ON(re_search);
2421 goto out_lock;
2422 }
2423 }
2424
2425 re_search = false;
2426 if (!info->bitmap) {
2427 unlink_free_space(ctl, info);
2428 if (offset == info->offset) {
2429 u64 to_free = min(bytes, info->bytes);
2430
2431 info->bytes -= to_free;
2432 info->offset += to_free;
2433 if (info->bytes) {
2434 ret = link_free_space(ctl, info);
2435 WARN_ON(ret);
2436 } else {
2437 kmem_cache_free(btrfs_free_space_cachep, info);
2438 }
2439
2440 offset += to_free;
2441 bytes -= to_free;
2442 goto again;
2443 } else {
2444 u64 old_end = info->bytes + info->offset;
2445
2446 info->bytes = offset - info->offset;
2447 ret = link_free_space(ctl, info);
2448 WARN_ON(ret);
2449 if (ret)
2450 goto out_lock;
2451
2452 /* Not enough bytes in this entry to satisfy us */
2453 if (old_end < offset + bytes) {
2454 bytes -= old_end - offset;
2455 offset = old_end;
2456 goto again;
2457 } else if (old_end == offset + bytes) {
2458 /* all done */
2459 goto out_lock;
2460 }
2461 spin_unlock(&ctl->tree_lock);
2462
2463 ret = btrfs_add_free_space(block_group, offset + bytes,
2464 old_end - (offset + bytes));
2465 WARN_ON(ret);
2466 goto out;
2467 }
2468 }
2469
2470 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2471 if (ret == -EAGAIN) {
2472 re_search = true;
2473 goto again;
2474 }
2475out_lock:
2476 spin_unlock(&ctl->tree_lock);
2477out:
2478 return ret;
2479}
2480
2481void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2482 u64 bytes)
2483{
2484 struct btrfs_fs_info *fs_info = block_group->fs_info;
2485 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2486 struct btrfs_free_space *info;
2487 struct rb_node *n;
2488 int count = 0;
2489
2490 spin_lock(&ctl->tree_lock);
2491 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2492 info = rb_entry(n, struct btrfs_free_space, offset_index);
2493 if (info->bytes >= bytes && !block_group->ro)
2494 count++;
2495 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2496 info->offset, info->bytes,
2497 (info->bitmap) ? "yes" : "no");
2498 }
2499 spin_unlock(&ctl->tree_lock);
2500 btrfs_info(fs_info, "block group has cluster?: %s",
2501 list_empty(&block_group->cluster_list) ? "no" : "yes");
2502 btrfs_info(fs_info,
2503 "%d blocks of free space at or bigger than bytes is", count);
2504}
2505
2506void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2507{
2508 struct btrfs_fs_info *fs_info = block_group->fs_info;
2509 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2510
2511 spin_lock_init(&ctl->tree_lock);
2512 ctl->unit = fs_info->sectorsize;
2513 ctl->start = block_group->key.objectid;
2514 ctl->private = block_group;
2515 ctl->op = &free_space_op;
2516 INIT_LIST_HEAD(&ctl->trimming_ranges);
2517 mutex_init(&ctl->cache_writeout_mutex);
2518
2519 /*
2520 * we only want to have 32k of ram per block group for keeping
2521 * track of free space, and if we pass 1/2 of that we want to
2522 * start converting things over to using bitmaps
2523 */
2524 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2525}
2526
2527/*
2528 * for a given cluster, put all of its extents back into the free
2529 * space cache. If the block group passed doesn't match the block group
2530 * pointed to by the cluster, someone else raced in and freed the
2531 * cluster already. In that case, we just return without changing anything
2532 */
2533static int
2534__btrfs_return_cluster_to_free_space(
2535 struct btrfs_block_group_cache *block_group,
2536 struct btrfs_free_cluster *cluster)
2537{
2538 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2539 struct btrfs_free_space *entry;
2540 struct rb_node *node;
2541
2542 spin_lock(&cluster->lock);
2543 if (cluster->block_group != block_group)
2544 goto out;
2545
2546 cluster->block_group = NULL;
2547 cluster->window_start = 0;
2548 list_del_init(&cluster->block_group_list);
2549
2550 node = rb_first(&cluster->root);
2551 while (node) {
2552 bool bitmap;
2553
2554 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2555 node = rb_next(&entry->offset_index);
2556 rb_erase(&entry->offset_index, &cluster->root);
2557 RB_CLEAR_NODE(&entry->offset_index);
2558
2559 bitmap = (entry->bitmap != NULL);
2560 if (!bitmap) {
2561 try_merge_free_space(ctl, entry, false);
2562 steal_from_bitmap(ctl, entry, false);
2563 }
2564 tree_insert_offset(&ctl->free_space_offset,
2565 entry->offset, &entry->offset_index, bitmap);
2566 }
2567 cluster->root = RB_ROOT;
2568
2569out:
2570 spin_unlock(&cluster->lock);
2571 btrfs_put_block_group(block_group);
2572 return 0;
2573}
2574
2575static void __btrfs_remove_free_space_cache_locked(
2576 struct btrfs_free_space_ctl *ctl)
2577{
2578 struct btrfs_free_space *info;
2579 struct rb_node *node;
2580
2581 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2582 info = rb_entry(node, struct btrfs_free_space, offset_index);
2583 if (!info->bitmap) {
2584 unlink_free_space(ctl, info);
2585 kmem_cache_free(btrfs_free_space_cachep, info);
2586 } else {
2587 free_bitmap(ctl, info);
2588 }
2589
2590 cond_resched_lock(&ctl->tree_lock);
2591 }
2592}
2593
2594void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2595{
2596 spin_lock(&ctl->tree_lock);
2597 __btrfs_remove_free_space_cache_locked(ctl);
2598 spin_unlock(&ctl->tree_lock);
2599}
2600
2601void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2602{
2603 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2604 struct btrfs_free_cluster *cluster;
2605 struct list_head *head;
2606
2607 spin_lock(&ctl->tree_lock);
2608 while ((head = block_group->cluster_list.next) !=
2609 &block_group->cluster_list) {
2610 cluster = list_entry(head, struct btrfs_free_cluster,
2611 block_group_list);
2612
2613 WARN_ON(cluster->block_group != block_group);
2614 __btrfs_return_cluster_to_free_space(block_group, cluster);
2615
2616 cond_resched_lock(&ctl->tree_lock);
2617 }
2618 __btrfs_remove_free_space_cache_locked(ctl);
2619 spin_unlock(&ctl->tree_lock);
2620
2621}
2622
2623u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2624 u64 offset, u64 bytes, u64 empty_size,
2625 u64 *max_extent_size)
2626{
2627 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2628 struct btrfs_free_space *entry = NULL;
2629 u64 bytes_search = bytes + empty_size;
2630 u64 ret = 0;
2631 u64 align_gap = 0;
2632 u64 align_gap_len = 0;
2633
2634 spin_lock(&ctl->tree_lock);
2635 entry = find_free_space(ctl, &offset, &bytes_search,
2636 block_group->full_stripe_len, max_extent_size);
2637 if (!entry)
2638 goto out;
2639
2640 ret = offset;
2641 if (entry->bitmap) {
2642 bitmap_clear_bits(ctl, entry, offset, bytes);
2643 if (!entry->bytes)
2644 free_bitmap(ctl, entry);
2645 } else {
2646 unlink_free_space(ctl, entry);
2647 align_gap_len = offset - entry->offset;
2648 align_gap = entry->offset;
2649
2650 entry->offset = offset + bytes;
2651 WARN_ON(entry->bytes < bytes + align_gap_len);
2652
2653 entry->bytes -= bytes + align_gap_len;
2654 if (!entry->bytes)
2655 kmem_cache_free(btrfs_free_space_cachep, entry);
2656 else
2657 link_free_space(ctl, entry);
2658 }
2659out:
2660 spin_unlock(&ctl->tree_lock);
2661
2662 if (align_gap_len)
2663 __btrfs_add_free_space(block_group->fs_info, ctl,
2664 align_gap, align_gap_len);
2665 return ret;
2666}
2667
2668/*
2669 * given a cluster, put all of its extents back into the free space
2670 * cache. If a block group is passed, this function will only free
2671 * a cluster that belongs to the passed block group.
2672 *
2673 * Otherwise, it'll get a reference on the block group pointed to by the
2674 * cluster and remove the cluster from it.
2675 */
2676int btrfs_return_cluster_to_free_space(
2677 struct btrfs_block_group_cache *block_group,
2678 struct btrfs_free_cluster *cluster)
2679{
2680 struct btrfs_free_space_ctl *ctl;
2681 int ret;
2682
2683 /* first, get a safe pointer to the block group */
2684 spin_lock(&cluster->lock);
2685 if (!block_group) {
2686 block_group = cluster->block_group;
2687 if (!block_group) {
2688 spin_unlock(&cluster->lock);
2689 return 0;
2690 }
2691 } else if (cluster->block_group != block_group) {
2692 /* someone else has already freed it don't redo their work */
2693 spin_unlock(&cluster->lock);
2694 return 0;
2695 }
2696 atomic_inc(&block_group->count);
2697 spin_unlock(&cluster->lock);
2698
2699 ctl = block_group->free_space_ctl;
2700
2701 /* now return any extents the cluster had on it */
2702 spin_lock(&ctl->tree_lock);
2703 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2704 spin_unlock(&ctl->tree_lock);
2705
2706 /* finally drop our ref */
2707 btrfs_put_block_group(block_group);
2708 return ret;
2709}
2710
2711static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2712 struct btrfs_free_cluster *cluster,
2713 struct btrfs_free_space *entry,
2714 u64 bytes, u64 min_start,
2715 u64 *max_extent_size)
2716{
2717 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2718 int err;
2719 u64 search_start = cluster->window_start;
2720 u64 search_bytes = bytes;
2721 u64 ret = 0;
2722
2723 search_start = min_start;
2724 search_bytes = bytes;
2725
2726 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2727 if (err) {
2728 *max_extent_size = max(get_max_extent_size(entry),
2729 *max_extent_size);
2730 return 0;
2731 }
2732
2733 ret = search_start;
2734 __bitmap_clear_bits(ctl, entry, ret, bytes);
2735
2736 return ret;
2737}
2738
2739/*
2740 * given a cluster, try to allocate 'bytes' from it, returns 0
2741 * if it couldn't find anything suitably large, or a logical disk offset
2742 * if things worked out
2743 */
2744u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2745 struct btrfs_free_cluster *cluster, u64 bytes,
2746 u64 min_start, u64 *max_extent_size)
2747{
2748 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2749 struct btrfs_free_space *entry = NULL;
2750 struct rb_node *node;
2751 u64 ret = 0;
2752
2753 spin_lock(&cluster->lock);
2754 if (bytes > cluster->max_size)
2755 goto out;
2756
2757 if (cluster->block_group != block_group)
2758 goto out;
2759
2760 node = rb_first(&cluster->root);
2761 if (!node)
2762 goto out;
2763
2764 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2765 while (1) {
2766 if (entry->bytes < bytes)
2767 *max_extent_size = max(get_max_extent_size(entry),
2768 *max_extent_size);
2769
2770 if (entry->bytes < bytes ||
2771 (!entry->bitmap && entry->offset < min_start)) {
2772 node = rb_next(&entry->offset_index);
2773 if (!node)
2774 break;
2775 entry = rb_entry(node, struct btrfs_free_space,
2776 offset_index);
2777 continue;
2778 }
2779
2780 if (entry->bitmap) {
2781 ret = btrfs_alloc_from_bitmap(block_group,
2782 cluster, entry, bytes,
2783 cluster->window_start,
2784 max_extent_size);
2785 if (ret == 0) {
2786 node = rb_next(&entry->offset_index);
2787 if (!node)
2788 break;
2789 entry = rb_entry(node, struct btrfs_free_space,
2790 offset_index);
2791 continue;
2792 }
2793 cluster->window_start += bytes;
2794 } else {
2795 ret = entry->offset;
2796
2797 entry->offset += bytes;
2798 entry->bytes -= bytes;
2799 }
2800
2801 if (entry->bytes == 0)
2802 rb_erase(&entry->offset_index, &cluster->root);
2803 break;
2804 }
2805out:
2806 spin_unlock(&cluster->lock);
2807
2808 if (!ret)
2809 return 0;
2810
2811 spin_lock(&ctl->tree_lock);
2812
2813 ctl->free_space -= bytes;
2814 if (entry->bytes == 0) {
2815 ctl->free_extents--;
2816 if (entry->bitmap) {
2817 kmem_cache_free(btrfs_free_space_bitmap_cachep,
2818 entry->bitmap);
2819 ctl->total_bitmaps--;
2820 ctl->op->recalc_thresholds(ctl);
2821 }
2822 kmem_cache_free(btrfs_free_space_cachep, entry);
2823 }
2824
2825 spin_unlock(&ctl->tree_lock);
2826
2827 return ret;
2828}
2829
2830static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2831 struct btrfs_free_space *entry,
2832 struct btrfs_free_cluster *cluster,
2833 u64 offset, u64 bytes,
2834 u64 cont1_bytes, u64 min_bytes)
2835{
2836 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2837 unsigned long next_zero;
2838 unsigned long i;
2839 unsigned long want_bits;
2840 unsigned long min_bits;
2841 unsigned long found_bits;
2842 unsigned long max_bits = 0;
2843 unsigned long start = 0;
2844 unsigned long total_found = 0;
2845 int ret;
2846
2847 i = offset_to_bit(entry->offset, ctl->unit,
2848 max_t(u64, offset, entry->offset));
2849 want_bits = bytes_to_bits(bytes, ctl->unit);
2850 min_bits = bytes_to_bits(min_bytes, ctl->unit);
2851
2852 /*
2853 * Don't bother looking for a cluster in this bitmap if it's heavily
2854 * fragmented.
2855 */
2856 if (entry->max_extent_size &&
2857 entry->max_extent_size < cont1_bytes)
2858 return -ENOSPC;
2859again:
2860 found_bits = 0;
2861 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2862 next_zero = find_next_zero_bit(entry->bitmap,
2863 BITS_PER_BITMAP, i);
2864 if (next_zero - i >= min_bits) {
2865 found_bits = next_zero - i;
2866 if (found_bits > max_bits)
2867 max_bits = found_bits;
2868 break;
2869 }
2870 if (next_zero - i > max_bits)
2871 max_bits = next_zero - i;
2872 i = next_zero;
2873 }
2874
2875 if (!found_bits) {
2876 entry->max_extent_size = (u64)max_bits * ctl->unit;
2877 return -ENOSPC;
2878 }
2879
2880 if (!total_found) {
2881 start = i;
2882 cluster->max_size = 0;
2883 }
2884
2885 total_found += found_bits;
2886
2887 if (cluster->max_size < found_bits * ctl->unit)
2888 cluster->max_size = found_bits * ctl->unit;
2889
2890 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2891 i = next_zero + 1;
2892 goto again;
2893 }
2894
2895 cluster->window_start = start * ctl->unit + entry->offset;
2896 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2897 ret = tree_insert_offset(&cluster->root, entry->offset,
2898 &entry->offset_index, 1);
2899 ASSERT(!ret); /* -EEXIST; Logic error */
2900
2901 trace_btrfs_setup_cluster(block_group, cluster,
2902 total_found * ctl->unit, 1);
2903 return 0;
2904}
2905
2906/*
2907 * This searches the block group for just extents to fill the cluster with.
2908 * Try to find a cluster with at least bytes total bytes, at least one
2909 * extent of cont1_bytes, and other clusters of at least min_bytes.
2910 */
2911static noinline int
2912setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2913 struct btrfs_free_cluster *cluster,
2914 struct list_head *bitmaps, u64 offset, u64 bytes,
2915 u64 cont1_bytes, u64 min_bytes)
2916{
2917 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2918 struct btrfs_free_space *first = NULL;
2919 struct btrfs_free_space *entry = NULL;
2920 struct btrfs_free_space *last;
2921 struct rb_node *node;
2922 u64 window_free;
2923 u64 max_extent;
2924 u64 total_size = 0;
2925
2926 entry = tree_search_offset(ctl, offset, 0, 1);
2927 if (!entry)
2928 return -ENOSPC;
2929
2930 /*
2931 * We don't want bitmaps, so just move along until we find a normal
2932 * extent entry.
2933 */
2934 while (entry->bitmap || entry->bytes < min_bytes) {
2935 if (entry->bitmap && list_empty(&entry->list))
2936 list_add_tail(&entry->list, bitmaps);
2937 node = rb_next(&entry->offset_index);
2938 if (!node)
2939 return -ENOSPC;
2940 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2941 }
2942
2943 window_free = entry->bytes;
2944 max_extent = entry->bytes;
2945 first = entry;
2946 last = entry;
2947
2948 for (node = rb_next(&entry->offset_index); node;
2949 node = rb_next(&entry->offset_index)) {
2950 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2951
2952 if (entry->bitmap) {
2953 if (list_empty(&entry->list))
2954 list_add_tail(&entry->list, bitmaps);
2955 continue;
2956 }
2957
2958 if (entry->bytes < min_bytes)
2959 continue;
2960
2961 last = entry;
2962 window_free += entry->bytes;
2963 if (entry->bytes > max_extent)
2964 max_extent = entry->bytes;
2965 }
2966
2967 if (window_free < bytes || max_extent < cont1_bytes)
2968 return -ENOSPC;
2969
2970 cluster->window_start = first->offset;
2971
2972 node = &first->offset_index;
2973
2974 /*
2975 * now we've found our entries, pull them out of the free space
2976 * cache and put them into the cluster rbtree
2977 */
2978 do {
2979 int ret;
2980
2981 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2982 node = rb_next(&entry->offset_index);
2983 if (entry->bitmap || entry->bytes < min_bytes)
2984 continue;
2985
2986 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2987 ret = tree_insert_offset(&cluster->root, entry->offset,
2988 &entry->offset_index, 0);
2989 total_size += entry->bytes;
2990 ASSERT(!ret); /* -EEXIST; Logic error */
2991 } while (node && entry != last);
2992
2993 cluster->max_size = max_extent;
2994 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2995 return 0;
2996}
2997
2998/*
2999 * This specifically looks for bitmaps that may work in the cluster, we assume
3000 * that we have already failed to find extents that will work.
3001 */
3002static noinline int
3003setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
3004 struct btrfs_free_cluster *cluster,
3005 struct list_head *bitmaps, u64 offset, u64 bytes,
3006 u64 cont1_bytes, u64 min_bytes)
3007{
3008 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3009 struct btrfs_free_space *entry = NULL;
3010 int ret = -ENOSPC;
3011 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3012
3013 if (ctl->total_bitmaps == 0)
3014 return -ENOSPC;
3015
3016 /*
3017 * The bitmap that covers offset won't be in the list unless offset
3018 * is just its start offset.
3019 */
3020 if (!list_empty(bitmaps))
3021 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3022
3023 if (!entry || entry->offset != bitmap_offset) {
3024 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3025 if (entry && list_empty(&entry->list))
3026 list_add(&entry->list, bitmaps);
3027 }
3028
3029 list_for_each_entry(entry, bitmaps, list) {
3030 if (entry->bytes < bytes)
3031 continue;
3032 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3033 bytes, cont1_bytes, min_bytes);
3034 if (!ret)
3035 return 0;
3036 }
3037
3038 /*
3039 * The bitmaps list has all the bitmaps that record free space
3040 * starting after offset, so no more search is required.
3041 */
3042 return -ENOSPC;
3043}
3044
3045/*
3046 * here we try to find a cluster of blocks in a block group. The goal
3047 * is to find at least bytes+empty_size.
3048 * We might not find them all in one contiguous area.
3049 *
3050 * returns zero and sets up cluster if things worked out, otherwise
3051 * it returns -enospc
3052 */
3053int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
3054 struct btrfs_free_cluster *cluster,
3055 u64 offset, u64 bytes, u64 empty_size)
3056{
3057 struct btrfs_fs_info *fs_info = block_group->fs_info;
3058 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3059 struct btrfs_free_space *entry, *tmp;
3060 LIST_HEAD(bitmaps);
3061 u64 min_bytes;
3062 u64 cont1_bytes;
3063 int ret;
3064
3065 /*
3066 * Choose the minimum extent size we'll require for this
3067 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3068 * For metadata, allow allocates with smaller extents. For
3069 * data, keep it dense.
3070 */
3071 if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3072 cont1_bytes = min_bytes = bytes + empty_size;
3073 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3074 cont1_bytes = bytes;
3075 min_bytes = fs_info->sectorsize;
3076 } else {
3077 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3078 min_bytes = fs_info->sectorsize;
3079 }
3080
3081 spin_lock(&ctl->tree_lock);
3082
3083 /*
3084 * If we know we don't have enough space to make a cluster don't even
3085 * bother doing all the work to try and find one.
3086 */
3087 if (ctl->free_space < bytes) {
3088 spin_unlock(&ctl->tree_lock);
3089 return -ENOSPC;
3090 }
3091
3092 spin_lock(&cluster->lock);
3093
3094 /* someone already found a cluster, hooray */
3095 if (cluster->block_group) {
3096 ret = 0;
3097 goto out;
3098 }
3099
3100 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3101 min_bytes);
3102
3103 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3104 bytes + empty_size,
3105 cont1_bytes, min_bytes);
3106 if (ret)
3107 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3108 offset, bytes + empty_size,
3109 cont1_bytes, min_bytes);
3110
3111 /* Clear our temporary list */
3112 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3113 list_del_init(&entry->list);
3114
3115 if (!ret) {
3116 atomic_inc(&block_group->count);
3117 list_add_tail(&cluster->block_group_list,
3118 &block_group->cluster_list);
3119 cluster->block_group = block_group;
3120 } else {
3121 trace_btrfs_failed_cluster_setup(block_group);
3122 }
3123out:
3124 spin_unlock(&cluster->lock);
3125 spin_unlock(&ctl->tree_lock);
3126
3127 return ret;
3128}
3129
3130/*
3131 * simple code to zero out a cluster
3132 */
3133void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3134{
3135 spin_lock_init(&cluster->lock);
3136 spin_lock_init(&cluster->refill_lock);
3137 cluster->root = RB_ROOT;
3138 cluster->max_size = 0;
3139 cluster->fragmented = false;
3140 INIT_LIST_HEAD(&cluster->block_group_list);
3141 cluster->block_group = NULL;
3142}
3143
3144static int do_trimming(struct btrfs_block_group_cache *block_group,
3145 u64 *total_trimmed, u64 start, u64 bytes,
3146 u64 reserved_start, u64 reserved_bytes,
3147 struct btrfs_trim_range *trim_entry)
3148{
3149 struct btrfs_space_info *space_info = block_group->space_info;
3150 struct btrfs_fs_info *fs_info = block_group->fs_info;
3151 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3152 int ret;
3153 int update = 0;
3154 u64 trimmed = 0;
3155
3156 spin_lock(&space_info->lock);
3157 spin_lock(&block_group->lock);
3158 if (!block_group->ro) {
3159 block_group->reserved += reserved_bytes;
3160 space_info->bytes_reserved += reserved_bytes;
3161 update = 1;
3162 }
3163 spin_unlock(&block_group->lock);
3164 spin_unlock(&space_info->lock);
3165
3166 ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3167 if (!ret)
3168 *total_trimmed += trimmed;
3169
3170 mutex_lock(&ctl->cache_writeout_mutex);
3171 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3172 list_del(&trim_entry->list);
3173 mutex_unlock(&ctl->cache_writeout_mutex);
3174
3175 if (update) {
3176 spin_lock(&space_info->lock);
3177 spin_lock(&block_group->lock);
3178 if (block_group->ro)
3179 space_info->bytes_readonly += reserved_bytes;
3180 block_group->reserved -= reserved_bytes;
3181 space_info->bytes_reserved -= reserved_bytes;
3182 spin_unlock(&block_group->lock);
3183 spin_unlock(&space_info->lock);
3184 }
3185
3186 return ret;
3187}
3188
3189static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
3190 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3191{
3192 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3193 struct btrfs_free_space *entry;
3194 struct rb_node *node;
3195 int ret = 0;
3196 u64 extent_start;
3197 u64 extent_bytes;
3198 u64 bytes;
3199
3200 while (start < end) {
3201 struct btrfs_trim_range trim_entry;
3202
3203 mutex_lock(&ctl->cache_writeout_mutex);
3204 spin_lock(&ctl->tree_lock);
3205
3206 if (ctl->free_space < minlen) {
3207 spin_unlock(&ctl->tree_lock);
3208 mutex_unlock(&ctl->cache_writeout_mutex);
3209 break;
3210 }
3211
3212 entry = tree_search_offset(ctl, start, 0, 1);
3213 if (!entry) {
3214 spin_unlock(&ctl->tree_lock);
3215 mutex_unlock(&ctl->cache_writeout_mutex);
3216 break;
3217 }
3218
3219 /* skip bitmaps */
3220 while (entry->bitmap) {
3221 node = rb_next(&entry->offset_index);
3222 if (!node) {
3223 spin_unlock(&ctl->tree_lock);
3224 mutex_unlock(&ctl->cache_writeout_mutex);
3225 goto out;
3226 }
3227 entry = rb_entry(node, struct btrfs_free_space,
3228 offset_index);
3229 }
3230
3231 if (entry->offset >= end) {
3232 spin_unlock(&ctl->tree_lock);
3233 mutex_unlock(&ctl->cache_writeout_mutex);
3234 break;
3235 }
3236
3237 extent_start = entry->offset;
3238 extent_bytes = entry->bytes;
3239 start = max(start, extent_start);
3240 bytes = min(extent_start + extent_bytes, end) - start;
3241 if (bytes < minlen) {
3242 spin_unlock(&ctl->tree_lock);
3243 mutex_unlock(&ctl->cache_writeout_mutex);
3244 goto next;
3245 }
3246
3247 unlink_free_space(ctl, entry);
3248 kmem_cache_free(btrfs_free_space_cachep, entry);
3249
3250 spin_unlock(&ctl->tree_lock);
3251 trim_entry.start = extent_start;
3252 trim_entry.bytes = extent_bytes;
3253 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3254 mutex_unlock(&ctl->cache_writeout_mutex);
3255
3256 ret = do_trimming(block_group, total_trimmed, start, bytes,
3257 extent_start, extent_bytes, &trim_entry);
3258 if (ret)
3259 break;
3260next:
3261 start += bytes;
3262
3263 if (fatal_signal_pending(current)) {
3264 ret = -ERESTARTSYS;
3265 break;
3266 }
3267
3268 cond_resched();
3269 }
3270out:
3271 return ret;
3272}
3273
3274static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3275 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3276{
3277 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3278 struct btrfs_free_space *entry;
3279 int ret = 0;
3280 int ret2;
3281 u64 bytes;
3282 u64 offset = offset_to_bitmap(ctl, start);
3283
3284 while (offset < end) {
3285 bool next_bitmap = false;
3286 struct btrfs_trim_range trim_entry;
3287
3288 mutex_lock(&ctl->cache_writeout_mutex);
3289 spin_lock(&ctl->tree_lock);
3290
3291 if (ctl->free_space < minlen) {
3292 spin_unlock(&ctl->tree_lock);
3293 mutex_unlock(&ctl->cache_writeout_mutex);
3294 break;
3295 }
3296
3297 entry = tree_search_offset(ctl, offset, 1, 0);
3298 if (!entry) {
3299 spin_unlock(&ctl->tree_lock);
3300 mutex_unlock(&ctl->cache_writeout_mutex);
3301 next_bitmap = true;
3302 goto next;
3303 }
3304
3305 bytes = minlen;
3306 ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3307 if (ret2 || start >= end) {
3308 spin_unlock(&ctl->tree_lock);
3309 mutex_unlock(&ctl->cache_writeout_mutex);
3310 next_bitmap = true;
3311 goto next;
3312 }
3313
3314 bytes = min(bytes, end - start);
3315 if (bytes < minlen) {
3316 spin_unlock(&ctl->tree_lock);
3317 mutex_unlock(&ctl->cache_writeout_mutex);
3318 goto next;
3319 }
3320
3321 bitmap_clear_bits(ctl, entry, start, bytes);
3322 if (entry->bytes == 0)
3323 free_bitmap(ctl, entry);
3324
3325 spin_unlock(&ctl->tree_lock);
3326 trim_entry.start = start;
3327 trim_entry.bytes = bytes;
3328 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3329 mutex_unlock(&ctl->cache_writeout_mutex);
3330
3331 ret = do_trimming(block_group, total_trimmed, start, bytes,
3332 start, bytes, &trim_entry);
3333 if (ret)
3334 break;
3335next:
3336 if (next_bitmap) {
3337 offset += BITS_PER_BITMAP * ctl->unit;
3338 } else {
3339 start += bytes;
3340 if (start >= offset + BITS_PER_BITMAP * ctl->unit)
3341 offset += BITS_PER_BITMAP * ctl->unit;
3342 }
3343
3344 if (fatal_signal_pending(current)) {
3345 ret = -ERESTARTSYS;
3346 break;
3347 }
3348
3349 cond_resched();
3350 }
3351
3352 return ret;
3353}
3354
3355void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3356{
3357 atomic_inc(&cache->trimming);
3358}
3359
3360void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
3361{
3362 struct btrfs_fs_info *fs_info = block_group->fs_info;
3363 struct extent_map_tree *em_tree;
3364 struct extent_map *em;
3365 bool cleanup;
3366
3367 spin_lock(&block_group->lock);
3368 cleanup = (atomic_dec_and_test(&block_group->trimming) &&
3369 block_group->removed);
3370 spin_unlock(&block_group->lock);
3371
3372 if (cleanup) {
3373 mutex_lock(&fs_info->chunk_mutex);
3374 em_tree = &fs_info->mapping_tree;
3375 write_lock(&em_tree->lock);
3376 em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3377 1);
3378 BUG_ON(!em); /* logic error, can't happen */
3379 remove_extent_mapping(em_tree, em);
3380 write_unlock(&em_tree->lock);
3381 mutex_unlock(&fs_info->chunk_mutex);
3382
3383 /* once for us and once for the tree */
3384 free_extent_map(em);
3385 free_extent_map(em);
3386
3387 /*
3388 * We've left one free space entry and other tasks trimming
3389 * this block group have left 1 entry each one. Free them.
3390 */
3391 __btrfs_remove_free_space_cache(block_group->free_space_ctl);
3392 }
3393}
3394
3395int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
3396 u64 *trimmed, u64 start, u64 end, u64 minlen)
3397{
3398 int ret;
3399
3400 *trimmed = 0;
3401
3402 spin_lock(&block_group->lock);
3403 if (block_group->removed) {
3404 spin_unlock(&block_group->lock);
3405 return 0;
3406 }
3407 btrfs_get_block_group_trimming(block_group);
3408 spin_unlock(&block_group->lock);
3409
3410 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
3411 if (ret)
3412 goto out;
3413
3414 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
3415out:
3416 btrfs_put_block_group_trimming(block_group);
3417 return ret;
3418}
3419
3420/*
3421 * Find the left-most item in the cache tree, and then return the
3422 * smallest inode number in the item.
3423 *
3424 * Note: the returned inode number may not be the smallest one in
3425 * the tree, if the left-most item is a bitmap.
3426 */
3427u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
3428{
3429 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
3430 struct btrfs_free_space *entry = NULL;
3431 u64 ino = 0;
3432
3433 spin_lock(&ctl->tree_lock);
3434
3435 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
3436 goto out;
3437
3438 entry = rb_entry(rb_first(&ctl->free_space_offset),
3439 struct btrfs_free_space, offset_index);
3440
3441 if (!entry->bitmap) {
3442 ino = entry->offset;
3443
3444 unlink_free_space(ctl, entry);
3445 entry->offset++;
3446 entry->bytes--;
3447 if (!entry->bytes)
3448 kmem_cache_free(btrfs_free_space_cachep, entry);
3449 else
3450 link_free_space(ctl, entry);
3451 } else {
3452 u64 offset = 0;
3453 u64 count = 1;
3454 int ret;
3455
3456 ret = search_bitmap(ctl, entry, &offset, &count, true);
3457 /* Logic error; Should be empty if it can't find anything */
3458 ASSERT(!ret);
3459
3460 ino = offset;
3461 bitmap_clear_bits(ctl, entry, offset, 1);
3462 if (entry->bytes == 0)
3463 free_bitmap(ctl, entry);
3464 }
3465out:
3466 spin_unlock(&ctl->tree_lock);
3467
3468 return ino;
3469}
3470
3471struct inode *lookup_free_ino_inode(struct btrfs_root *root,
3472 struct btrfs_path *path)
3473{
3474 struct inode *inode = NULL;
3475
3476 spin_lock(&root->ino_cache_lock);
3477 if (root->ino_cache_inode)
3478 inode = igrab(root->ino_cache_inode);
3479 spin_unlock(&root->ino_cache_lock);
3480 if (inode)
3481 return inode;
3482
3483 inode = __lookup_free_space_inode(root, path, 0);
3484 if (IS_ERR(inode))
3485 return inode;
3486
3487 spin_lock(&root->ino_cache_lock);
3488 if (!btrfs_fs_closing(root->fs_info))
3489 root->ino_cache_inode = igrab(inode);
3490 spin_unlock(&root->ino_cache_lock);
3491
3492 return inode;
3493}
3494
3495int create_free_ino_inode(struct btrfs_root *root,
3496 struct btrfs_trans_handle *trans,
3497 struct btrfs_path *path)
3498{
3499 return __create_free_space_inode(root, trans, path,
3500 BTRFS_FREE_INO_OBJECTID, 0);
3501}
3502
3503int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3504{
3505 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3506 struct btrfs_path *path;
3507 struct inode *inode;
3508 int ret = 0;
3509 u64 root_gen = btrfs_root_generation(&root->root_item);
3510
3511 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3512 return 0;
3513
3514 /*
3515 * If we're unmounting then just return, since this does a search on the
3516 * normal root and not the commit root and we could deadlock.
3517 */
3518 if (btrfs_fs_closing(fs_info))
3519 return 0;
3520
3521 path = btrfs_alloc_path();
3522 if (!path)
3523 return 0;
3524
3525 inode = lookup_free_ino_inode(root, path);
3526 if (IS_ERR(inode))
3527 goto out;
3528
3529 if (root_gen != BTRFS_I(inode)->generation)
3530 goto out_put;
3531
3532 ret = __load_free_space_cache(root, inode, ctl, path, 0);
3533
3534 if (ret < 0)
3535 btrfs_err(fs_info,
3536 "failed to load free ino cache for root %llu",
3537 root->root_key.objectid);
3538out_put:
3539 iput(inode);
3540out:
3541 btrfs_free_path(path);
3542 return ret;
3543}
3544
3545int btrfs_write_out_ino_cache(struct btrfs_root *root,
3546 struct btrfs_trans_handle *trans,
3547 struct btrfs_path *path,
3548 struct inode *inode)
3549{
3550 struct btrfs_fs_info *fs_info = root->fs_info;
3551 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3552 int ret;
3553 struct btrfs_io_ctl io_ctl;
3554 bool release_metadata = true;
3555
3556 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3557 return 0;
3558
3559 memset(&io_ctl, 0, sizeof(io_ctl));
3560 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans);
3561 if (!ret) {
3562 /*
3563 * At this point writepages() didn't error out, so our metadata
3564 * reservation is released when the writeback finishes, at
3565 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3566 * with or without an error.
3567 */
3568 release_metadata = false;
3569 ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
3570 }
3571
3572 if (ret) {
3573 if (release_metadata)
3574 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3575 inode->i_size, true);
3576#ifdef DEBUG
3577 btrfs_err(fs_info,
3578 "failed to write free ino cache for root %llu",
3579 root->root_key.objectid);
3580#endif
3581 }
3582
3583 return ret;
3584}
3585
3586#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3587/*
3588 * Use this if you need to make a bitmap or extent entry specifically, it
3589 * doesn't do any of the merging that add_free_space does, this acts a lot like
3590 * how the free space cache loading stuff works, so you can get really weird
3591 * configurations.
3592 */
3593int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
3594 u64 offset, u64 bytes, bool bitmap)
3595{
3596 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3597 struct btrfs_free_space *info = NULL, *bitmap_info;
3598 void *map = NULL;
3599 u64 bytes_added;
3600 int ret;
3601
3602again:
3603 if (!info) {
3604 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
3605 if (!info)
3606 return -ENOMEM;
3607 }
3608
3609 if (!bitmap) {
3610 spin_lock(&ctl->tree_lock);
3611 info->offset = offset;
3612 info->bytes = bytes;
3613 info->max_extent_size = 0;
3614 ret = link_free_space(ctl, info);
3615 spin_unlock(&ctl->tree_lock);
3616 if (ret)
3617 kmem_cache_free(btrfs_free_space_cachep, info);
3618 return ret;
3619 }
3620
3621 if (!map) {
3622 map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
3623 if (!map) {
3624 kmem_cache_free(btrfs_free_space_cachep, info);
3625 return -ENOMEM;
3626 }
3627 }
3628
3629 spin_lock(&ctl->tree_lock);
3630 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3631 1, 0);
3632 if (!bitmap_info) {
3633 info->bitmap = map;
3634 map = NULL;
3635 add_new_bitmap(ctl, info, offset);
3636 bitmap_info = info;
3637 info = NULL;
3638 }
3639
3640 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3641
3642 bytes -= bytes_added;
3643 offset += bytes_added;
3644 spin_unlock(&ctl->tree_lock);
3645
3646 if (bytes)
3647 goto again;
3648
3649 if (info)
3650 kmem_cache_free(btrfs_free_space_cachep, info);
3651 if (map)
3652 kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
3653 return 0;
3654}
3655
3656/*
3657 * Checks to see if the given range is in the free space cache. This is really
3658 * just used to check the absence of space, so if there is free space in the
3659 * range at all we will return 1.
3660 */
3661int test_check_exists(struct btrfs_block_group_cache *cache,
3662 u64 offset, u64 bytes)
3663{
3664 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3665 struct btrfs_free_space *info;
3666 int ret = 0;
3667
3668 spin_lock(&ctl->tree_lock);
3669 info = tree_search_offset(ctl, offset, 0, 0);
3670 if (!info) {
3671 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3672 1, 0);
3673 if (!info)
3674 goto out;
3675 }
3676
3677have_info:
3678 if (info->bitmap) {
3679 u64 bit_off, bit_bytes;
3680 struct rb_node *n;
3681 struct btrfs_free_space *tmp;
3682
3683 bit_off = offset;
3684 bit_bytes = ctl->unit;
3685 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3686 if (!ret) {
3687 if (bit_off == offset) {
3688 ret = 1;
3689 goto out;
3690 } else if (bit_off > offset &&
3691 offset + bytes > bit_off) {
3692 ret = 1;
3693 goto out;
3694 }
3695 }
3696
3697 n = rb_prev(&info->offset_index);
3698 while (n) {
3699 tmp = rb_entry(n, struct btrfs_free_space,
3700 offset_index);
3701 if (tmp->offset + tmp->bytes < offset)
3702 break;
3703 if (offset + bytes < tmp->offset) {
3704 n = rb_prev(&tmp->offset_index);
3705 continue;
3706 }
3707 info = tmp;
3708 goto have_info;
3709 }
3710
3711 n = rb_next(&info->offset_index);
3712 while (n) {
3713 tmp = rb_entry(n, struct btrfs_free_space,
3714 offset_index);
3715 if (offset + bytes < tmp->offset)
3716 break;
3717 if (tmp->offset + tmp->bytes < offset) {
3718 n = rb_next(&tmp->offset_index);
3719 continue;
3720 }
3721 info = tmp;
3722 goto have_info;
3723 }
3724
3725 ret = 0;
3726 goto out;
3727 }
3728
3729 if (info->offset == offset) {
3730 ret = 1;
3731 goto out;
3732 }
3733
3734 if (offset > info->offset && offset < info->offset + info->bytes)
3735 ret = 1;
3736out:
3737 spin_unlock(&ctl->tree_lock);
3738 return ret;
3739}
3740#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
1/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/math64.h>
23#include <linux/ratelimit.h>
24#include "ctree.h"
25#include "free-space-cache.h"
26#include "transaction.h"
27#include "disk-io.h"
28#include "extent_io.h"
29#include "inode-map.h"
30#include "volumes.h"
31
32#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34
35struct btrfs_trim_range {
36 u64 start;
37 u64 bytes;
38 struct list_head list;
39};
40
41static int link_free_space(struct btrfs_free_space_ctl *ctl,
42 struct btrfs_free_space *info);
43static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
44 struct btrfs_free_space *info);
45static int btrfs_wait_cache_io_root(struct btrfs_root *root,
46 struct btrfs_trans_handle *trans,
47 struct btrfs_io_ctl *io_ctl,
48 struct btrfs_path *path);
49
50static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
51 struct btrfs_path *path,
52 u64 offset)
53{
54 struct btrfs_fs_info *fs_info = root->fs_info;
55 struct btrfs_key key;
56 struct btrfs_key location;
57 struct btrfs_disk_key disk_key;
58 struct btrfs_free_space_header *header;
59 struct extent_buffer *leaf;
60 struct inode *inode = NULL;
61 int ret;
62
63 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
64 key.offset = offset;
65 key.type = 0;
66
67 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
68 if (ret < 0)
69 return ERR_PTR(ret);
70 if (ret > 0) {
71 btrfs_release_path(path);
72 return ERR_PTR(-ENOENT);
73 }
74
75 leaf = path->nodes[0];
76 header = btrfs_item_ptr(leaf, path->slots[0],
77 struct btrfs_free_space_header);
78 btrfs_free_space_key(leaf, header, &disk_key);
79 btrfs_disk_key_to_cpu(&location, &disk_key);
80 btrfs_release_path(path);
81
82 inode = btrfs_iget(fs_info->sb, &location, root, NULL);
83 if (IS_ERR(inode))
84 return inode;
85 if (is_bad_inode(inode)) {
86 iput(inode);
87 return ERR_PTR(-ENOENT);
88 }
89
90 mapping_set_gfp_mask(inode->i_mapping,
91 mapping_gfp_constraint(inode->i_mapping,
92 ~(__GFP_FS | __GFP_HIGHMEM)));
93
94 return inode;
95}
96
97struct inode *lookup_free_space_inode(struct btrfs_root *root,
98 struct btrfs_block_group_cache
99 *block_group, struct btrfs_path *path)
100{
101 struct inode *inode = NULL;
102 struct btrfs_fs_info *fs_info = root->fs_info;
103 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
104
105 spin_lock(&block_group->lock);
106 if (block_group->inode)
107 inode = igrab(block_group->inode);
108 spin_unlock(&block_group->lock);
109 if (inode)
110 return inode;
111
112 inode = __lookup_free_space_inode(root, path,
113 block_group->key.objectid);
114 if (IS_ERR(inode))
115 return inode;
116
117 spin_lock(&block_group->lock);
118 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
119 btrfs_info(fs_info, "Old style space inode found, converting.");
120 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
121 BTRFS_INODE_NODATACOW;
122 block_group->disk_cache_state = BTRFS_DC_CLEAR;
123 }
124
125 if (!block_group->iref) {
126 block_group->inode = igrab(inode);
127 block_group->iref = 1;
128 }
129 spin_unlock(&block_group->lock);
130
131 return inode;
132}
133
134static int __create_free_space_inode(struct btrfs_root *root,
135 struct btrfs_trans_handle *trans,
136 struct btrfs_path *path,
137 u64 ino, u64 offset)
138{
139 struct btrfs_key key;
140 struct btrfs_disk_key disk_key;
141 struct btrfs_free_space_header *header;
142 struct btrfs_inode_item *inode_item;
143 struct extent_buffer *leaf;
144 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
145 int ret;
146
147 ret = btrfs_insert_empty_inode(trans, root, path, ino);
148 if (ret)
149 return ret;
150
151 /* We inline crc's for the free disk space cache */
152 if (ino != BTRFS_FREE_INO_OBJECTID)
153 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
154
155 leaf = path->nodes[0];
156 inode_item = btrfs_item_ptr(leaf, path->slots[0],
157 struct btrfs_inode_item);
158 btrfs_item_key(leaf, &disk_key, path->slots[0]);
159 memzero_extent_buffer(leaf, (unsigned long)inode_item,
160 sizeof(*inode_item));
161 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
162 btrfs_set_inode_size(leaf, inode_item, 0);
163 btrfs_set_inode_nbytes(leaf, inode_item, 0);
164 btrfs_set_inode_uid(leaf, inode_item, 0);
165 btrfs_set_inode_gid(leaf, inode_item, 0);
166 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
167 btrfs_set_inode_flags(leaf, inode_item, flags);
168 btrfs_set_inode_nlink(leaf, inode_item, 1);
169 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
170 btrfs_set_inode_block_group(leaf, inode_item, offset);
171 btrfs_mark_buffer_dirty(leaf);
172 btrfs_release_path(path);
173
174 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
175 key.offset = offset;
176 key.type = 0;
177 ret = btrfs_insert_empty_item(trans, root, path, &key,
178 sizeof(struct btrfs_free_space_header));
179 if (ret < 0) {
180 btrfs_release_path(path);
181 return ret;
182 }
183
184 leaf = path->nodes[0];
185 header = btrfs_item_ptr(leaf, path->slots[0],
186 struct btrfs_free_space_header);
187 memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
188 btrfs_set_free_space_key(leaf, header, &disk_key);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_release_path(path);
191
192 return 0;
193}
194
195int create_free_space_inode(struct btrfs_root *root,
196 struct btrfs_trans_handle *trans,
197 struct btrfs_block_group_cache *block_group,
198 struct btrfs_path *path)
199{
200 int ret;
201 u64 ino;
202
203 ret = btrfs_find_free_objectid(root, &ino);
204 if (ret < 0)
205 return ret;
206
207 return __create_free_space_inode(root, trans, path, ino,
208 block_group->key.objectid);
209}
210
211int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
212 struct btrfs_block_rsv *rsv)
213{
214 u64 needed_bytes;
215 int ret;
216
217 /* 1 for slack space, 1 for updating the inode */
218 needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
219 btrfs_calc_trans_metadata_size(fs_info, 1);
220
221 spin_lock(&rsv->lock);
222 if (rsv->reserved < needed_bytes)
223 ret = -ENOSPC;
224 else
225 ret = 0;
226 spin_unlock(&rsv->lock);
227 return ret;
228}
229
230int btrfs_truncate_free_space_cache(struct btrfs_root *root,
231 struct btrfs_trans_handle *trans,
232 struct btrfs_block_group_cache *block_group,
233 struct inode *inode)
234{
235 int ret = 0;
236 struct btrfs_path *path = btrfs_alloc_path();
237 bool locked = false;
238
239 if (!path) {
240 ret = -ENOMEM;
241 goto fail;
242 }
243
244 if (block_group) {
245 locked = true;
246 mutex_lock(&trans->transaction->cache_write_mutex);
247 if (!list_empty(&block_group->io_list)) {
248 list_del_init(&block_group->io_list);
249
250 btrfs_wait_cache_io(trans, block_group, path);
251 btrfs_put_block_group(block_group);
252 }
253
254 /*
255 * now that we've truncated the cache away, its no longer
256 * setup or written
257 */
258 spin_lock(&block_group->lock);
259 block_group->disk_cache_state = BTRFS_DC_CLEAR;
260 spin_unlock(&block_group->lock);
261 }
262 btrfs_free_path(path);
263
264 btrfs_i_size_write(inode, 0);
265 truncate_pagecache(inode, 0);
266
267 /*
268 * We don't need an orphan item because truncating the free space cache
269 * will never be split across transactions.
270 * We don't need to check for -EAGAIN because we're a free space
271 * cache inode
272 */
273 ret = btrfs_truncate_inode_items(trans, root, inode,
274 0, BTRFS_EXTENT_DATA_KEY);
275 if (ret)
276 goto fail;
277
278 ret = btrfs_update_inode(trans, root, inode);
279
280fail:
281 if (locked)
282 mutex_unlock(&trans->transaction->cache_write_mutex);
283 if (ret)
284 btrfs_abort_transaction(trans, ret);
285
286 return ret;
287}
288
289static int readahead_cache(struct inode *inode)
290{
291 struct file_ra_state *ra;
292 unsigned long last_index;
293
294 ra = kzalloc(sizeof(*ra), GFP_NOFS);
295 if (!ra)
296 return -ENOMEM;
297
298 file_ra_state_init(ra, inode->i_mapping);
299 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
300
301 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
302
303 kfree(ra);
304
305 return 0;
306}
307
308static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
309 int write)
310{
311 int num_pages;
312 int check_crcs = 0;
313
314 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
315
316 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
317 check_crcs = 1;
318
319 /* Make sure we can fit our crcs into the first page */
320 if (write && check_crcs &&
321 (num_pages * sizeof(u32)) >= PAGE_SIZE)
322 return -ENOSPC;
323
324 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
325
326 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
327 if (!io_ctl->pages)
328 return -ENOMEM;
329
330 io_ctl->num_pages = num_pages;
331 io_ctl->fs_info = btrfs_sb(inode->i_sb);
332 io_ctl->check_crcs = check_crcs;
333 io_ctl->inode = inode;
334
335 return 0;
336}
337
338static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
339{
340 kfree(io_ctl->pages);
341 io_ctl->pages = NULL;
342}
343
344static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
345{
346 if (io_ctl->cur) {
347 io_ctl->cur = NULL;
348 io_ctl->orig = NULL;
349 }
350}
351
352static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
353{
354 ASSERT(io_ctl->index < io_ctl->num_pages);
355 io_ctl->page = io_ctl->pages[io_ctl->index++];
356 io_ctl->cur = page_address(io_ctl->page);
357 io_ctl->orig = io_ctl->cur;
358 io_ctl->size = PAGE_SIZE;
359 if (clear)
360 memset(io_ctl->cur, 0, PAGE_SIZE);
361}
362
363static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
364{
365 int i;
366
367 io_ctl_unmap_page(io_ctl);
368
369 for (i = 0; i < io_ctl->num_pages; i++) {
370 if (io_ctl->pages[i]) {
371 ClearPageChecked(io_ctl->pages[i]);
372 unlock_page(io_ctl->pages[i]);
373 put_page(io_ctl->pages[i]);
374 }
375 }
376}
377
378static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
379 int uptodate)
380{
381 struct page *page;
382 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
383 int i;
384
385 for (i = 0; i < io_ctl->num_pages; i++) {
386 page = find_or_create_page(inode->i_mapping, i, mask);
387 if (!page) {
388 io_ctl_drop_pages(io_ctl);
389 return -ENOMEM;
390 }
391 io_ctl->pages[i] = page;
392 if (uptodate && !PageUptodate(page)) {
393 btrfs_readpage(NULL, page);
394 lock_page(page);
395 if (!PageUptodate(page)) {
396 btrfs_err(BTRFS_I(inode)->root->fs_info,
397 "error reading free space cache");
398 io_ctl_drop_pages(io_ctl);
399 return -EIO;
400 }
401 }
402 }
403
404 for (i = 0; i < io_ctl->num_pages; i++) {
405 clear_page_dirty_for_io(io_ctl->pages[i]);
406 set_page_extent_mapped(io_ctl->pages[i]);
407 }
408
409 return 0;
410}
411
412static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
413{
414 __le64 *val;
415
416 io_ctl_map_page(io_ctl, 1);
417
418 /*
419 * Skip the csum areas. If we don't check crcs then we just have a
420 * 64bit chunk at the front of the first page.
421 */
422 if (io_ctl->check_crcs) {
423 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
424 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
425 } else {
426 io_ctl->cur += sizeof(u64);
427 io_ctl->size -= sizeof(u64) * 2;
428 }
429
430 val = io_ctl->cur;
431 *val = cpu_to_le64(generation);
432 io_ctl->cur += sizeof(u64);
433}
434
435static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
436{
437 __le64 *gen;
438
439 /*
440 * Skip the crc area. If we don't check crcs then we just have a 64bit
441 * chunk at the front of the first page.
442 */
443 if (io_ctl->check_crcs) {
444 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
445 io_ctl->size -= sizeof(u64) +
446 (sizeof(u32) * io_ctl->num_pages);
447 } else {
448 io_ctl->cur += sizeof(u64);
449 io_ctl->size -= sizeof(u64) * 2;
450 }
451
452 gen = io_ctl->cur;
453 if (le64_to_cpu(*gen) != generation) {
454 btrfs_err_rl(io_ctl->fs_info,
455 "space cache generation (%llu) does not match inode (%llu)",
456 *gen, generation);
457 io_ctl_unmap_page(io_ctl);
458 return -EIO;
459 }
460 io_ctl->cur += sizeof(u64);
461 return 0;
462}
463
464static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
465{
466 u32 *tmp;
467 u32 crc = ~(u32)0;
468 unsigned offset = 0;
469
470 if (!io_ctl->check_crcs) {
471 io_ctl_unmap_page(io_ctl);
472 return;
473 }
474
475 if (index == 0)
476 offset = sizeof(u32) * io_ctl->num_pages;
477
478 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
479 PAGE_SIZE - offset);
480 btrfs_csum_final(crc, (u8 *)&crc);
481 io_ctl_unmap_page(io_ctl);
482 tmp = page_address(io_ctl->pages[0]);
483 tmp += index;
484 *tmp = crc;
485}
486
487static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
488{
489 u32 *tmp, val;
490 u32 crc = ~(u32)0;
491 unsigned offset = 0;
492
493 if (!io_ctl->check_crcs) {
494 io_ctl_map_page(io_ctl, 0);
495 return 0;
496 }
497
498 if (index == 0)
499 offset = sizeof(u32) * io_ctl->num_pages;
500
501 tmp = page_address(io_ctl->pages[0]);
502 tmp += index;
503 val = *tmp;
504
505 io_ctl_map_page(io_ctl, 0);
506 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
507 PAGE_SIZE - offset);
508 btrfs_csum_final(crc, (u8 *)&crc);
509 if (val != crc) {
510 btrfs_err_rl(io_ctl->fs_info,
511 "csum mismatch on free space cache");
512 io_ctl_unmap_page(io_ctl);
513 return -EIO;
514 }
515
516 return 0;
517}
518
519static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
520 void *bitmap)
521{
522 struct btrfs_free_space_entry *entry;
523
524 if (!io_ctl->cur)
525 return -ENOSPC;
526
527 entry = io_ctl->cur;
528 entry->offset = cpu_to_le64(offset);
529 entry->bytes = cpu_to_le64(bytes);
530 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
531 BTRFS_FREE_SPACE_EXTENT;
532 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
533 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
534
535 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
536 return 0;
537
538 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
539
540 /* No more pages to map */
541 if (io_ctl->index >= io_ctl->num_pages)
542 return 0;
543
544 /* map the next page */
545 io_ctl_map_page(io_ctl, 1);
546 return 0;
547}
548
549static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
550{
551 if (!io_ctl->cur)
552 return -ENOSPC;
553
554 /*
555 * If we aren't at the start of the current page, unmap this one and
556 * map the next one if there is any left.
557 */
558 if (io_ctl->cur != io_ctl->orig) {
559 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
560 if (io_ctl->index >= io_ctl->num_pages)
561 return -ENOSPC;
562 io_ctl_map_page(io_ctl, 0);
563 }
564
565 memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
566 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
567 if (io_ctl->index < io_ctl->num_pages)
568 io_ctl_map_page(io_ctl, 0);
569 return 0;
570}
571
572static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
573{
574 /*
575 * If we're not on the boundary we know we've modified the page and we
576 * need to crc the page.
577 */
578 if (io_ctl->cur != io_ctl->orig)
579 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
580 else
581 io_ctl_unmap_page(io_ctl);
582
583 while (io_ctl->index < io_ctl->num_pages) {
584 io_ctl_map_page(io_ctl, 1);
585 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
586 }
587}
588
589static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
590 struct btrfs_free_space *entry, u8 *type)
591{
592 struct btrfs_free_space_entry *e;
593 int ret;
594
595 if (!io_ctl->cur) {
596 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
597 if (ret)
598 return ret;
599 }
600
601 e = io_ctl->cur;
602 entry->offset = le64_to_cpu(e->offset);
603 entry->bytes = le64_to_cpu(e->bytes);
604 *type = e->type;
605 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
606 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
607
608 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
609 return 0;
610
611 io_ctl_unmap_page(io_ctl);
612
613 return 0;
614}
615
616static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
617 struct btrfs_free_space *entry)
618{
619 int ret;
620
621 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
622 if (ret)
623 return ret;
624
625 memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
626 io_ctl_unmap_page(io_ctl);
627
628 return 0;
629}
630
631/*
632 * Since we attach pinned extents after the fact we can have contiguous sections
633 * of free space that are split up in entries. This poses a problem with the
634 * tree logging stuff since it could have allocated across what appears to be 2
635 * entries since we would have merged the entries when adding the pinned extents
636 * back to the free space cache. So run through the space cache that we just
637 * loaded and merge contiguous entries. This will make the log replay stuff not
638 * blow up and it will make for nicer allocator behavior.
639 */
640static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
641{
642 struct btrfs_free_space *e, *prev = NULL;
643 struct rb_node *n;
644
645again:
646 spin_lock(&ctl->tree_lock);
647 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
648 e = rb_entry(n, struct btrfs_free_space, offset_index);
649 if (!prev)
650 goto next;
651 if (e->bitmap || prev->bitmap)
652 goto next;
653 if (prev->offset + prev->bytes == e->offset) {
654 unlink_free_space(ctl, prev);
655 unlink_free_space(ctl, e);
656 prev->bytes += e->bytes;
657 kmem_cache_free(btrfs_free_space_cachep, e);
658 link_free_space(ctl, prev);
659 prev = NULL;
660 spin_unlock(&ctl->tree_lock);
661 goto again;
662 }
663next:
664 prev = e;
665 }
666 spin_unlock(&ctl->tree_lock);
667}
668
669static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
670 struct btrfs_free_space_ctl *ctl,
671 struct btrfs_path *path, u64 offset)
672{
673 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
674 struct btrfs_free_space_header *header;
675 struct extent_buffer *leaf;
676 struct btrfs_io_ctl io_ctl;
677 struct btrfs_key key;
678 struct btrfs_free_space *e, *n;
679 LIST_HEAD(bitmaps);
680 u64 num_entries;
681 u64 num_bitmaps;
682 u64 generation;
683 u8 type;
684 int ret = 0;
685
686 /* Nothing in the space cache, goodbye */
687 if (!i_size_read(inode))
688 return 0;
689
690 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
691 key.offset = offset;
692 key.type = 0;
693
694 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
695 if (ret < 0)
696 return 0;
697 else if (ret > 0) {
698 btrfs_release_path(path);
699 return 0;
700 }
701
702 ret = -1;
703
704 leaf = path->nodes[0];
705 header = btrfs_item_ptr(leaf, path->slots[0],
706 struct btrfs_free_space_header);
707 num_entries = btrfs_free_space_entries(leaf, header);
708 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
709 generation = btrfs_free_space_generation(leaf, header);
710 btrfs_release_path(path);
711
712 if (!BTRFS_I(inode)->generation) {
713 btrfs_info(fs_info,
714 "The free space cache file (%llu) is invalid. skip it\n",
715 offset);
716 return 0;
717 }
718
719 if (BTRFS_I(inode)->generation != generation) {
720 btrfs_err(fs_info,
721 "free space inode generation (%llu) did not match free space cache generation (%llu)",
722 BTRFS_I(inode)->generation, generation);
723 return 0;
724 }
725
726 if (!num_entries)
727 return 0;
728
729 ret = io_ctl_init(&io_ctl, inode, 0);
730 if (ret)
731 return ret;
732
733 ret = readahead_cache(inode);
734 if (ret)
735 goto out;
736
737 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
738 if (ret)
739 goto out;
740
741 ret = io_ctl_check_crc(&io_ctl, 0);
742 if (ret)
743 goto free_cache;
744
745 ret = io_ctl_check_generation(&io_ctl, generation);
746 if (ret)
747 goto free_cache;
748
749 while (num_entries) {
750 e = kmem_cache_zalloc(btrfs_free_space_cachep,
751 GFP_NOFS);
752 if (!e)
753 goto free_cache;
754
755 ret = io_ctl_read_entry(&io_ctl, e, &type);
756 if (ret) {
757 kmem_cache_free(btrfs_free_space_cachep, e);
758 goto free_cache;
759 }
760
761 if (!e->bytes) {
762 kmem_cache_free(btrfs_free_space_cachep, e);
763 goto free_cache;
764 }
765
766 if (type == BTRFS_FREE_SPACE_EXTENT) {
767 spin_lock(&ctl->tree_lock);
768 ret = link_free_space(ctl, e);
769 spin_unlock(&ctl->tree_lock);
770 if (ret) {
771 btrfs_err(fs_info,
772 "Duplicate entries in free space cache, dumping");
773 kmem_cache_free(btrfs_free_space_cachep, e);
774 goto free_cache;
775 }
776 } else {
777 ASSERT(num_bitmaps);
778 num_bitmaps--;
779 e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
780 if (!e->bitmap) {
781 kmem_cache_free(
782 btrfs_free_space_cachep, e);
783 goto free_cache;
784 }
785 spin_lock(&ctl->tree_lock);
786 ret = link_free_space(ctl, e);
787 ctl->total_bitmaps++;
788 ctl->op->recalc_thresholds(ctl);
789 spin_unlock(&ctl->tree_lock);
790 if (ret) {
791 btrfs_err(fs_info,
792 "Duplicate entries in free space cache, dumping");
793 kmem_cache_free(btrfs_free_space_cachep, e);
794 goto free_cache;
795 }
796 list_add_tail(&e->list, &bitmaps);
797 }
798
799 num_entries--;
800 }
801
802 io_ctl_unmap_page(&io_ctl);
803
804 /*
805 * We add the bitmaps at the end of the entries in order that
806 * the bitmap entries are added to the cache.
807 */
808 list_for_each_entry_safe(e, n, &bitmaps, list) {
809 list_del_init(&e->list);
810 ret = io_ctl_read_bitmap(&io_ctl, e);
811 if (ret)
812 goto free_cache;
813 }
814
815 io_ctl_drop_pages(&io_ctl);
816 merge_space_tree(ctl);
817 ret = 1;
818out:
819 io_ctl_free(&io_ctl);
820 return ret;
821free_cache:
822 io_ctl_drop_pages(&io_ctl);
823 __btrfs_remove_free_space_cache(ctl);
824 goto out;
825}
826
827int load_free_space_cache(struct btrfs_fs_info *fs_info,
828 struct btrfs_block_group_cache *block_group)
829{
830 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
831 struct btrfs_root *root = fs_info->tree_root;
832 struct inode *inode;
833 struct btrfs_path *path;
834 int ret = 0;
835 bool matched;
836 u64 used = btrfs_block_group_used(&block_group->item);
837
838 /*
839 * If this block group has been marked to be cleared for one reason or
840 * another then we can't trust the on disk cache, so just return.
841 */
842 spin_lock(&block_group->lock);
843 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
844 spin_unlock(&block_group->lock);
845 return 0;
846 }
847 spin_unlock(&block_group->lock);
848
849 path = btrfs_alloc_path();
850 if (!path)
851 return 0;
852 path->search_commit_root = 1;
853 path->skip_locking = 1;
854
855 inode = lookup_free_space_inode(root, block_group, path);
856 if (IS_ERR(inode)) {
857 btrfs_free_path(path);
858 return 0;
859 }
860
861 /* We may have converted the inode and made the cache invalid. */
862 spin_lock(&block_group->lock);
863 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
864 spin_unlock(&block_group->lock);
865 btrfs_free_path(path);
866 goto out;
867 }
868 spin_unlock(&block_group->lock);
869
870 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
871 path, block_group->key.objectid);
872 btrfs_free_path(path);
873 if (ret <= 0)
874 goto out;
875
876 spin_lock(&ctl->tree_lock);
877 matched = (ctl->free_space == (block_group->key.offset - used -
878 block_group->bytes_super));
879 spin_unlock(&ctl->tree_lock);
880
881 if (!matched) {
882 __btrfs_remove_free_space_cache(ctl);
883 btrfs_warn(fs_info,
884 "block group %llu has wrong amount of free space",
885 block_group->key.objectid);
886 ret = -1;
887 }
888out:
889 if (ret < 0) {
890 /* This cache is bogus, make sure it gets cleared */
891 spin_lock(&block_group->lock);
892 block_group->disk_cache_state = BTRFS_DC_CLEAR;
893 spin_unlock(&block_group->lock);
894 ret = 0;
895
896 btrfs_warn(fs_info,
897 "failed to load free space cache for block group %llu, rebuilding it now",
898 block_group->key.objectid);
899 }
900
901 iput(inode);
902 return ret;
903}
904
905static noinline_for_stack
906int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
907 struct btrfs_free_space_ctl *ctl,
908 struct btrfs_block_group_cache *block_group,
909 int *entries, int *bitmaps,
910 struct list_head *bitmap_list)
911{
912 int ret;
913 struct btrfs_free_cluster *cluster = NULL;
914 struct btrfs_free_cluster *cluster_locked = NULL;
915 struct rb_node *node = rb_first(&ctl->free_space_offset);
916 struct btrfs_trim_range *trim_entry;
917
918 /* Get the cluster for this block_group if it exists */
919 if (block_group && !list_empty(&block_group->cluster_list)) {
920 cluster = list_entry(block_group->cluster_list.next,
921 struct btrfs_free_cluster,
922 block_group_list);
923 }
924
925 if (!node && cluster) {
926 cluster_locked = cluster;
927 spin_lock(&cluster_locked->lock);
928 node = rb_first(&cluster->root);
929 cluster = NULL;
930 }
931
932 /* Write out the extent entries */
933 while (node) {
934 struct btrfs_free_space *e;
935
936 e = rb_entry(node, struct btrfs_free_space, offset_index);
937 *entries += 1;
938
939 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
940 e->bitmap);
941 if (ret)
942 goto fail;
943
944 if (e->bitmap) {
945 list_add_tail(&e->list, bitmap_list);
946 *bitmaps += 1;
947 }
948 node = rb_next(node);
949 if (!node && cluster) {
950 node = rb_first(&cluster->root);
951 cluster_locked = cluster;
952 spin_lock(&cluster_locked->lock);
953 cluster = NULL;
954 }
955 }
956 if (cluster_locked) {
957 spin_unlock(&cluster_locked->lock);
958 cluster_locked = NULL;
959 }
960
961 /*
962 * Make sure we don't miss any range that was removed from our rbtree
963 * because trimming is running. Otherwise after a umount+mount (or crash
964 * after committing the transaction) we would leak free space and get
965 * an inconsistent free space cache report from fsck.
966 */
967 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
968 ret = io_ctl_add_entry(io_ctl, trim_entry->start,
969 trim_entry->bytes, NULL);
970 if (ret)
971 goto fail;
972 *entries += 1;
973 }
974
975 return 0;
976fail:
977 if (cluster_locked)
978 spin_unlock(&cluster_locked->lock);
979 return -ENOSPC;
980}
981
982static noinline_for_stack int
983update_cache_item(struct btrfs_trans_handle *trans,
984 struct btrfs_root *root,
985 struct inode *inode,
986 struct btrfs_path *path, u64 offset,
987 int entries, int bitmaps)
988{
989 struct btrfs_key key;
990 struct btrfs_free_space_header *header;
991 struct extent_buffer *leaf;
992 int ret;
993
994 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
995 key.offset = offset;
996 key.type = 0;
997
998 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
999 if (ret < 0) {
1000 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1001 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1002 GFP_NOFS);
1003 goto fail;
1004 }
1005 leaf = path->nodes[0];
1006 if (ret > 0) {
1007 struct btrfs_key found_key;
1008 ASSERT(path->slots[0]);
1009 path->slots[0]--;
1010 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1011 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1012 found_key.offset != offset) {
1013 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1014 inode->i_size - 1,
1015 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1016 NULL, GFP_NOFS);
1017 btrfs_release_path(path);
1018 goto fail;
1019 }
1020 }
1021
1022 BTRFS_I(inode)->generation = trans->transid;
1023 header = btrfs_item_ptr(leaf, path->slots[0],
1024 struct btrfs_free_space_header);
1025 btrfs_set_free_space_entries(leaf, header, entries);
1026 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1027 btrfs_set_free_space_generation(leaf, header, trans->transid);
1028 btrfs_mark_buffer_dirty(leaf);
1029 btrfs_release_path(path);
1030
1031 return 0;
1032
1033fail:
1034 return -1;
1035}
1036
1037static noinline_for_stack int
1038write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
1039 struct btrfs_block_group_cache *block_group,
1040 struct btrfs_io_ctl *io_ctl,
1041 int *entries)
1042{
1043 u64 start, extent_start, extent_end, len;
1044 struct extent_io_tree *unpin = NULL;
1045 int ret;
1046
1047 if (!block_group)
1048 return 0;
1049
1050 /*
1051 * We want to add any pinned extents to our free space cache
1052 * so we don't leak the space
1053 *
1054 * We shouldn't have switched the pinned extents yet so this is the
1055 * right one
1056 */
1057 unpin = fs_info->pinned_extents;
1058
1059 start = block_group->key.objectid;
1060
1061 while (start < block_group->key.objectid + block_group->key.offset) {
1062 ret = find_first_extent_bit(unpin, start,
1063 &extent_start, &extent_end,
1064 EXTENT_DIRTY, NULL);
1065 if (ret)
1066 return 0;
1067
1068 /* This pinned extent is out of our range */
1069 if (extent_start >= block_group->key.objectid +
1070 block_group->key.offset)
1071 return 0;
1072
1073 extent_start = max(extent_start, start);
1074 extent_end = min(block_group->key.objectid +
1075 block_group->key.offset, extent_end + 1);
1076 len = extent_end - extent_start;
1077
1078 *entries += 1;
1079 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1080 if (ret)
1081 return -ENOSPC;
1082
1083 start = extent_end;
1084 }
1085
1086 return 0;
1087}
1088
1089static noinline_for_stack int
1090write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1091{
1092 struct btrfs_free_space *entry, *next;
1093 int ret;
1094
1095 /* Write out the bitmaps */
1096 list_for_each_entry_safe(entry, next, bitmap_list, list) {
1097 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1098 if (ret)
1099 return -ENOSPC;
1100 list_del_init(&entry->list);
1101 }
1102
1103 return 0;
1104}
1105
1106static int flush_dirty_cache(struct inode *inode)
1107{
1108 int ret;
1109
1110 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1111 if (ret)
1112 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1113 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1114 GFP_NOFS);
1115
1116 return ret;
1117}
1118
1119static void noinline_for_stack
1120cleanup_bitmap_list(struct list_head *bitmap_list)
1121{
1122 struct btrfs_free_space *entry, *next;
1123
1124 list_for_each_entry_safe(entry, next, bitmap_list, list)
1125 list_del_init(&entry->list);
1126}
1127
1128static void noinline_for_stack
1129cleanup_write_cache_enospc(struct inode *inode,
1130 struct btrfs_io_ctl *io_ctl,
1131 struct extent_state **cached_state,
1132 struct list_head *bitmap_list)
1133{
1134 io_ctl_drop_pages(io_ctl);
1135 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1136 i_size_read(inode) - 1, cached_state,
1137 GFP_NOFS);
1138}
1139
1140static int __btrfs_wait_cache_io(struct btrfs_root *root,
1141 struct btrfs_trans_handle *trans,
1142 struct btrfs_block_group_cache *block_group,
1143 struct btrfs_io_ctl *io_ctl,
1144 struct btrfs_path *path, u64 offset)
1145{
1146 int ret;
1147 struct inode *inode = io_ctl->inode;
1148 struct btrfs_fs_info *fs_info;
1149
1150 if (!inode)
1151 return 0;
1152
1153 fs_info = btrfs_sb(inode->i_sb);
1154
1155 /* Flush the dirty pages in the cache file. */
1156 ret = flush_dirty_cache(inode);
1157 if (ret)
1158 goto out;
1159
1160 /* Update the cache item to tell everyone this cache file is valid. */
1161 ret = update_cache_item(trans, root, inode, path, offset,
1162 io_ctl->entries, io_ctl->bitmaps);
1163out:
1164 io_ctl_free(io_ctl);
1165 if (ret) {
1166 invalidate_inode_pages2(inode->i_mapping);
1167 BTRFS_I(inode)->generation = 0;
1168 if (block_group) {
1169#ifdef DEBUG
1170 btrfs_err(fs_info,
1171 "failed to write free space cache for block group %llu",
1172 block_group->key.objectid);
1173#endif
1174 }
1175 }
1176 btrfs_update_inode(trans, root, inode);
1177
1178 if (block_group) {
1179 /* the dirty list is protected by the dirty_bgs_lock */
1180 spin_lock(&trans->transaction->dirty_bgs_lock);
1181
1182 /* the disk_cache_state is protected by the block group lock */
1183 spin_lock(&block_group->lock);
1184
1185 /*
1186 * only mark this as written if we didn't get put back on
1187 * the dirty list while waiting for IO. Otherwise our
1188 * cache state won't be right, and we won't get written again
1189 */
1190 if (!ret && list_empty(&block_group->dirty_list))
1191 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1192 else if (ret)
1193 block_group->disk_cache_state = BTRFS_DC_ERROR;
1194
1195 spin_unlock(&block_group->lock);
1196 spin_unlock(&trans->transaction->dirty_bgs_lock);
1197 io_ctl->inode = NULL;
1198 iput(inode);
1199 }
1200
1201 return ret;
1202
1203}
1204
1205static int btrfs_wait_cache_io_root(struct btrfs_root *root,
1206 struct btrfs_trans_handle *trans,
1207 struct btrfs_io_ctl *io_ctl,
1208 struct btrfs_path *path)
1209{
1210 return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
1211}
1212
1213int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1214 struct btrfs_block_group_cache *block_group,
1215 struct btrfs_path *path)
1216{
1217 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1218 block_group, &block_group->io_ctl,
1219 path, block_group->key.objectid);
1220}
1221
1222/**
1223 * __btrfs_write_out_cache - write out cached info to an inode
1224 * @root - the root the inode belongs to
1225 * @ctl - the free space cache we are going to write out
1226 * @block_group - the block_group for this cache if it belongs to a block_group
1227 * @trans - the trans handle
1228 * @path - the path to use
1229 * @offset - the offset for the key we'll insert
1230 *
1231 * This function writes out a free space cache struct to disk for quick recovery
1232 * on mount. This will return 0 if it was successful in writing the cache out,
1233 * or an errno if it was not.
1234 */
1235static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1236 struct btrfs_free_space_ctl *ctl,
1237 struct btrfs_block_group_cache *block_group,
1238 struct btrfs_io_ctl *io_ctl,
1239 struct btrfs_trans_handle *trans,
1240 struct btrfs_path *path, u64 offset)
1241{
1242 struct btrfs_fs_info *fs_info = root->fs_info;
1243 struct extent_state *cached_state = NULL;
1244 LIST_HEAD(bitmap_list);
1245 int entries = 0;
1246 int bitmaps = 0;
1247 int ret;
1248 int must_iput = 0;
1249
1250 if (!i_size_read(inode))
1251 return -EIO;
1252
1253 WARN_ON(io_ctl->pages);
1254 ret = io_ctl_init(io_ctl, inode, 1);
1255 if (ret)
1256 return ret;
1257
1258 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1259 down_write(&block_group->data_rwsem);
1260 spin_lock(&block_group->lock);
1261 if (block_group->delalloc_bytes) {
1262 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1263 spin_unlock(&block_group->lock);
1264 up_write(&block_group->data_rwsem);
1265 BTRFS_I(inode)->generation = 0;
1266 ret = 0;
1267 must_iput = 1;
1268 goto out;
1269 }
1270 spin_unlock(&block_group->lock);
1271 }
1272
1273 /* Lock all pages first so we can lock the extent safely. */
1274 ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1275 if (ret)
1276 goto out;
1277
1278 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1279 &cached_state);
1280
1281 io_ctl_set_generation(io_ctl, trans->transid);
1282
1283 mutex_lock(&ctl->cache_writeout_mutex);
1284 /* Write out the extent entries in the free space cache */
1285 spin_lock(&ctl->tree_lock);
1286 ret = write_cache_extent_entries(io_ctl, ctl,
1287 block_group, &entries, &bitmaps,
1288 &bitmap_list);
1289 if (ret)
1290 goto out_nospc_locked;
1291
1292 /*
1293 * Some spaces that are freed in the current transaction are pinned,
1294 * they will be added into free space cache after the transaction is
1295 * committed, we shouldn't lose them.
1296 *
1297 * If this changes while we are working we'll get added back to
1298 * the dirty list and redo it. No locking needed
1299 */
1300 ret = write_pinned_extent_entries(fs_info, block_group,
1301 io_ctl, &entries);
1302 if (ret)
1303 goto out_nospc_locked;
1304
1305 /*
1306 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1307 * locked while doing it because a concurrent trim can be manipulating
1308 * or freeing the bitmap.
1309 */
1310 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1311 spin_unlock(&ctl->tree_lock);
1312 mutex_unlock(&ctl->cache_writeout_mutex);
1313 if (ret)
1314 goto out_nospc;
1315
1316 /* Zero out the rest of the pages just to make sure */
1317 io_ctl_zero_remaining_pages(io_ctl);
1318
1319 /* Everything is written out, now we dirty the pages in the file. */
1320 ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
1321 i_size_read(inode), &cached_state);
1322 if (ret)
1323 goto out_nospc;
1324
1325 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1326 up_write(&block_group->data_rwsem);
1327 /*
1328 * Release the pages and unlock the extent, we will flush
1329 * them out later
1330 */
1331 io_ctl_drop_pages(io_ctl);
1332
1333 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1334 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1335
1336 /*
1337 * at this point the pages are under IO and we're happy,
1338 * The caller is responsible for waiting on them and updating the
1339 * the cache and the inode
1340 */
1341 io_ctl->entries = entries;
1342 io_ctl->bitmaps = bitmaps;
1343
1344 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1345 if (ret)
1346 goto out;
1347
1348 return 0;
1349
1350out:
1351 io_ctl->inode = NULL;
1352 io_ctl_free(io_ctl);
1353 if (ret) {
1354 invalidate_inode_pages2(inode->i_mapping);
1355 BTRFS_I(inode)->generation = 0;
1356 }
1357 btrfs_update_inode(trans, root, inode);
1358 if (must_iput)
1359 iput(inode);
1360 return ret;
1361
1362out_nospc_locked:
1363 cleanup_bitmap_list(&bitmap_list);
1364 spin_unlock(&ctl->tree_lock);
1365 mutex_unlock(&ctl->cache_writeout_mutex);
1366
1367out_nospc:
1368 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1369
1370 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1371 up_write(&block_group->data_rwsem);
1372
1373 goto out;
1374}
1375
1376int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
1377 struct btrfs_trans_handle *trans,
1378 struct btrfs_block_group_cache *block_group,
1379 struct btrfs_path *path)
1380{
1381 struct btrfs_root *root = fs_info->tree_root;
1382 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1383 struct inode *inode;
1384 int ret = 0;
1385
1386 spin_lock(&block_group->lock);
1387 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1388 spin_unlock(&block_group->lock);
1389 return 0;
1390 }
1391 spin_unlock(&block_group->lock);
1392
1393 inode = lookup_free_space_inode(root, block_group, path);
1394 if (IS_ERR(inode))
1395 return 0;
1396
1397 ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
1398 &block_group->io_ctl, trans,
1399 path, block_group->key.objectid);
1400 if (ret) {
1401#ifdef DEBUG
1402 btrfs_err(fs_info,
1403 "failed to write free space cache for block group %llu",
1404 block_group->key.objectid);
1405#endif
1406 spin_lock(&block_group->lock);
1407 block_group->disk_cache_state = BTRFS_DC_ERROR;
1408 spin_unlock(&block_group->lock);
1409
1410 block_group->io_ctl.inode = NULL;
1411 iput(inode);
1412 }
1413
1414 /*
1415 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1416 * to wait for IO and put the inode
1417 */
1418
1419 return ret;
1420}
1421
1422static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1423 u64 offset)
1424{
1425 ASSERT(offset >= bitmap_start);
1426 offset -= bitmap_start;
1427 return (unsigned long)(div_u64(offset, unit));
1428}
1429
1430static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1431{
1432 return (unsigned long)(div_u64(bytes, unit));
1433}
1434
1435static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1436 u64 offset)
1437{
1438 u64 bitmap_start;
1439 u64 bytes_per_bitmap;
1440
1441 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1442 bitmap_start = offset - ctl->start;
1443 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1444 bitmap_start *= bytes_per_bitmap;
1445 bitmap_start += ctl->start;
1446
1447 return bitmap_start;
1448}
1449
1450static int tree_insert_offset(struct rb_root *root, u64 offset,
1451 struct rb_node *node, int bitmap)
1452{
1453 struct rb_node **p = &root->rb_node;
1454 struct rb_node *parent = NULL;
1455 struct btrfs_free_space *info;
1456
1457 while (*p) {
1458 parent = *p;
1459 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1460
1461 if (offset < info->offset) {
1462 p = &(*p)->rb_left;
1463 } else if (offset > info->offset) {
1464 p = &(*p)->rb_right;
1465 } else {
1466 /*
1467 * we could have a bitmap entry and an extent entry
1468 * share the same offset. If this is the case, we want
1469 * the extent entry to always be found first if we do a
1470 * linear search through the tree, since we want to have
1471 * the quickest allocation time, and allocating from an
1472 * extent is faster than allocating from a bitmap. So
1473 * if we're inserting a bitmap and we find an entry at
1474 * this offset, we want to go right, or after this entry
1475 * logically. If we are inserting an extent and we've
1476 * found a bitmap, we want to go left, or before
1477 * logically.
1478 */
1479 if (bitmap) {
1480 if (info->bitmap) {
1481 WARN_ON_ONCE(1);
1482 return -EEXIST;
1483 }
1484 p = &(*p)->rb_right;
1485 } else {
1486 if (!info->bitmap) {
1487 WARN_ON_ONCE(1);
1488 return -EEXIST;
1489 }
1490 p = &(*p)->rb_left;
1491 }
1492 }
1493 }
1494
1495 rb_link_node(node, parent, p);
1496 rb_insert_color(node, root);
1497
1498 return 0;
1499}
1500
1501/*
1502 * searches the tree for the given offset.
1503 *
1504 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1505 * want a section that has at least bytes size and comes at or after the given
1506 * offset.
1507 */
1508static struct btrfs_free_space *
1509tree_search_offset(struct btrfs_free_space_ctl *ctl,
1510 u64 offset, int bitmap_only, int fuzzy)
1511{
1512 struct rb_node *n = ctl->free_space_offset.rb_node;
1513 struct btrfs_free_space *entry, *prev = NULL;
1514
1515 /* find entry that is closest to the 'offset' */
1516 while (1) {
1517 if (!n) {
1518 entry = NULL;
1519 break;
1520 }
1521
1522 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1523 prev = entry;
1524
1525 if (offset < entry->offset)
1526 n = n->rb_left;
1527 else if (offset > entry->offset)
1528 n = n->rb_right;
1529 else
1530 break;
1531 }
1532
1533 if (bitmap_only) {
1534 if (!entry)
1535 return NULL;
1536 if (entry->bitmap)
1537 return entry;
1538
1539 /*
1540 * bitmap entry and extent entry may share same offset,
1541 * in that case, bitmap entry comes after extent entry.
1542 */
1543 n = rb_next(n);
1544 if (!n)
1545 return NULL;
1546 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1547 if (entry->offset != offset)
1548 return NULL;
1549
1550 WARN_ON(!entry->bitmap);
1551 return entry;
1552 } else if (entry) {
1553 if (entry->bitmap) {
1554 /*
1555 * if previous extent entry covers the offset,
1556 * we should return it instead of the bitmap entry
1557 */
1558 n = rb_prev(&entry->offset_index);
1559 if (n) {
1560 prev = rb_entry(n, struct btrfs_free_space,
1561 offset_index);
1562 if (!prev->bitmap &&
1563 prev->offset + prev->bytes > offset)
1564 entry = prev;
1565 }
1566 }
1567 return entry;
1568 }
1569
1570 if (!prev)
1571 return NULL;
1572
1573 /* find last entry before the 'offset' */
1574 entry = prev;
1575 if (entry->offset > offset) {
1576 n = rb_prev(&entry->offset_index);
1577 if (n) {
1578 entry = rb_entry(n, struct btrfs_free_space,
1579 offset_index);
1580 ASSERT(entry->offset <= offset);
1581 } else {
1582 if (fuzzy)
1583 return entry;
1584 else
1585 return NULL;
1586 }
1587 }
1588
1589 if (entry->bitmap) {
1590 n = rb_prev(&entry->offset_index);
1591 if (n) {
1592 prev = rb_entry(n, struct btrfs_free_space,
1593 offset_index);
1594 if (!prev->bitmap &&
1595 prev->offset + prev->bytes > offset)
1596 return prev;
1597 }
1598 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1599 return entry;
1600 } else if (entry->offset + entry->bytes > offset)
1601 return entry;
1602
1603 if (!fuzzy)
1604 return NULL;
1605
1606 while (1) {
1607 if (entry->bitmap) {
1608 if (entry->offset + BITS_PER_BITMAP *
1609 ctl->unit > offset)
1610 break;
1611 } else {
1612 if (entry->offset + entry->bytes > offset)
1613 break;
1614 }
1615
1616 n = rb_next(&entry->offset_index);
1617 if (!n)
1618 return NULL;
1619 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1620 }
1621 return entry;
1622}
1623
1624static inline void
1625__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1626 struct btrfs_free_space *info)
1627{
1628 rb_erase(&info->offset_index, &ctl->free_space_offset);
1629 ctl->free_extents--;
1630}
1631
1632static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1633 struct btrfs_free_space *info)
1634{
1635 __unlink_free_space(ctl, info);
1636 ctl->free_space -= info->bytes;
1637}
1638
1639static int link_free_space(struct btrfs_free_space_ctl *ctl,
1640 struct btrfs_free_space *info)
1641{
1642 int ret = 0;
1643
1644 ASSERT(info->bytes || info->bitmap);
1645 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1646 &info->offset_index, (info->bitmap != NULL));
1647 if (ret)
1648 return ret;
1649
1650 ctl->free_space += info->bytes;
1651 ctl->free_extents++;
1652 return ret;
1653}
1654
1655static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1656{
1657 struct btrfs_block_group_cache *block_group = ctl->private;
1658 u64 max_bytes;
1659 u64 bitmap_bytes;
1660 u64 extent_bytes;
1661 u64 size = block_group->key.offset;
1662 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1663 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1664
1665 max_bitmaps = max_t(u64, max_bitmaps, 1);
1666
1667 ASSERT(ctl->total_bitmaps <= max_bitmaps);
1668
1669 /*
1670 * The goal is to keep the total amount of memory used per 1gb of space
1671 * at or below 32k, so we need to adjust how much memory we allow to be
1672 * used by extent based free space tracking
1673 */
1674 if (size < SZ_1G)
1675 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1676 else
1677 max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1678
1679 /*
1680 * we want to account for 1 more bitmap than what we have so we can make
1681 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1682 * we add more bitmaps.
1683 */
1684 bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1685
1686 if (bitmap_bytes >= max_bytes) {
1687 ctl->extents_thresh = 0;
1688 return;
1689 }
1690
1691 /*
1692 * we want the extent entry threshold to always be at most 1/2 the max
1693 * bytes we can have, or whatever is less than that.
1694 */
1695 extent_bytes = max_bytes - bitmap_bytes;
1696 extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1697
1698 ctl->extents_thresh =
1699 div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1700}
1701
1702static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1703 struct btrfs_free_space *info,
1704 u64 offset, u64 bytes)
1705{
1706 unsigned long start, count;
1707
1708 start = offset_to_bit(info->offset, ctl->unit, offset);
1709 count = bytes_to_bits(bytes, ctl->unit);
1710 ASSERT(start + count <= BITS_PER_BITMAP);
1711
1712 bitmap_clear(info->bitmap, start, count);
1713
1714 info->bytes -= bytes;
1715}
1716
1717static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1718 struct btrfs_free_space *info, u64 offset,
1719 u64 bytes)
1720{
1721 __bitmap_clear_bits(ctl, info, offset, bytes);
1722 ctl->free_space -= bytes;
1723}
1724
1725static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1726 struct btrfs_free_space *info, u64 offset,
1727 u64 bytes)
1728{
1729 unsigned long start, count;
1730
1731 start = offset_to_bit(info->offset, ctl->unit, offset);
1732 count = bytes_to_bits(bytes, ctl->unit);
1733 ASSERT(start + count <= BITS_PER_BITMAP);
1734
1735 bitmap_set(info->bitmap, start, count);
1736
1737 info->bytes += bytes;
1738 ctl->free_space += bytes;
1739}
1740
1741/*
1742 * If we can not find suitable extent, we will use bytes to record
1743 * the size of the max extent.
1744 */
1745static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1746 struct btrfs_free_space *bitmap_info, u64 *offset,
1747 u64 *bytes, bool for_alloc)
1748{
1749 unsigned long found_bits = 0;
1750 unsigned long max_bits = 0;
1751 unsigned long bits, i;
1752 unsigned long next_zero;
1753 unsigned long extent_bits;
1754
1755 /*
1756 * Skip searching the bitmap if we don't have a contiguous section that
1757 * is large enough for this allocation.
1758 */
1759 if (for_alloc &&
1760 bitmap_info->max_extent_size &&
1761 bitmap_info->max_extent_size < *bytes) {
1762 *bytes = bitmap_info->max_extent_size;
1763 return -1;
1764 }
1765
1766 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1767 max_t(u64, *offset, bitmap_info->offset));
1768 bits = bytes_to_bits(*bytes, ctl->unit);
1769
1770 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1771 if (for_alloc && bits == 1) {
1772 found_bits = 1;
1773 break;
1774 }
1775 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1776 BITS_PER_BITMAP, i);
1777 extent_bits = next_zero - i;
1778 if (extent_bits >= bits) {
1779 found_bits = extent_bits;
1780 break;
1781 } else if (extent_bits > max_bits) {
1782 max_bits = extent_bits;
1783 }
1784 i = next_zero;
1785 }
1786
1787 if (found_bits) {
1788 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1789 *bytes = (u64)(found_bits) * ctl->unit;
1790 return 0;
1791 }
1792
1793 *bytes = (u64)(max_bits) * ctl->unit;
1794 bitmap_info->max_extent_size = *bytes;
1795 return -1;
1796}
1797
1798/* Cache the size of the max extent in bytes */
1799static struct btrfs_free_space *
1800find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1801 unsigned long align, u64 *max_extent_size)
1802{
1803 struct btrfs_free_space *entry;
1804 struct rb_node *node;
1805 u64 tmp;
1806 u64 align_off;
1807 int ret;
1808
1809 if (!ctl->free_space_offset.rb_node)
1810 goto out;
1811
1812 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1813 if (!entry)
1814 goto out;
1815
1816 for (node = &entry->offset_index; node; node = rb_next(node)) {
1817 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1818 if (entry->bytes < *bytes) {
1819 if (entry->bytes > *max_extent_size)
1820 *max_extent_size = entry->bytes;
1821 continue;
1822 }
1823
1824 /* make sure the space returned is big enough
1825 * to match our requested alignment
1826 */
1827 if (*bytes >= align) {
1828 tmp = entry->offset - ctl->start + align - 1;
1829 tmp = div64_u64(tmp, align);
1830 tmp = tmp * align + ctl->start;
1831 align_off = tmp - entry->offset;
1832 } else {
1833 align_off = 0;
1834 tmp = entry->offset;
1835 }
1836
1837 if (entry->bytes < *bytes + align_off) {
1838 if (entry->bytes > *max_extent_size)
1839 *max_extent_size = entry->bytes;
1840 continue;
1841 }
1842
1843 if (entry->bitmap) {
1844 u64 size = *bytes;
1845
1846 ret = search_bitmap(ctl, entry, &tmp, &size, true);
1847 if (!ret) {
1848 *offset = tmp;
1849 *bytes = size;
1850 return entry;
1851 } else if (size > *max_extent_size) {
1852 *max_extent_size = size;
1853 }
1854 continue;
1855 }
1856
1857 *offset = tmp;
1858 *bytes = entry->bytes - align_off;
1859 return entry;
1860 }
1861out:
1862 return NULL;
1863}
1864
1865static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1866 struct btrfs_free_space *info, u64 offset)
1867{
1868 info->offset = offset_to_bitmap(ctl, offset);
1869 info->bytes = 0;
1870 INIT_LIST_HEAD(&info->list);
1871 link_free_space(ctl, info);
1872 ctl->total_bitmaps++;
1873
1874 ctl->op->recalc_thresholds(ctl);
1875}
1876
1877static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1878 struct btrfs_free_space *bitmap_info)
1879{
1880 unlink_free_space(ctl, bitmap_info);
1881 kfree(bitmap_info->bitmap);
1882 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1883 ctl->total_bitmaps--;
1884 ctl->op->recalc_thresholds(ctl);
1885}
1886
1887static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1888 struct btrfs_free_space *bitmap_info,
1889 u64 *offset, u64 *bytes)
1890{
1891 u64 end;
1892 u64 search_start, search_bytes;
1893 int ret;
1894
1895again:
1896 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1897
1898 /*
1899 * We need to search for bits in this bitmap. We could only cover some
1900 * of the extent in this bitmap thanks to how we add space, so we need
1901 * to search for as much as it as we can and clear that amount, and then
1902 * go searching for the next bit.
1903 */
1904 search_start = *offset;
1905 search_bytes = ctl->unit;
1906 search_bytes = min(search_bytes, end - search_start + 1);
1907 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
1908 false);
1909 if (ret < 0 || search_start != *offset)
1910 return -EINVAL;
1911
1912 /* We may have found more bits than what we need */
1913 search_bytes = min(search_bytes, *bytes);
1914
1915 /* Cannot clear past the end of the bitmap */
1916 search_bytes = min(search_bytes, end - search_start + 1);
1917
1918 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1919 *offset += search_bytes;
1920 *bytes -= search_bytes;
1921
1922 if (*bytes) {
1923 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1924 if (!bitmap_info->bytes)
1925 free_bitmap(ctl, bitmap_info);
1926
1927 /*
1928 * no entry after this bitmap, but we still have bytes to
1929 * remove, so something has gone wrong.
1930 */
1931 if (!next)
1932 return -EINVAL;
1933
1934 bitmap_info = rb_entry(next, struct btrfs_free_space,
1935 offset_index);
1936
1937 /*
1938 * if the next entry isn't a bitmap we need to return to let the
1939 * extent stuff do its work.
1940 */
1941 if (!bitmap_info->bitmap)
1942 return -EAGAIN;
1943
1944 /*
1945 * Ok the next item is a bitmap, but it may not actually hold
1946 * the information for the rest of this free space stuff, so
1947 * look for it, and if we don't find it return so we can try
1948 * everything over again.
1949 */
1950 search_start = *offset;
1951 search_bytes = ctl->unit;
1952 ret = search_bitmap(ctl, bitmap_info, &search_start,
1953 &search_bytes, false);
1954 if (ret < 0 || search_start != *offset)
1955 return -EAGAIN;
1956
1957 goto again;
1958 } else if (!bitmap_info->bytes)
1959 free_bitmap(ctl, bitmap_info);
1960
1961 return 0;
1962}
1963
1964static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1965 struct btrfs_free_space *info, u64 offset,
1966 u64 bytes)
1967{
1968 u64 bytes_to_set = 0;
1969 u64 end;
1970
1971 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1972
1973 bytes_to_set = min(end - offset, bytes);
1974
1975 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1976
1977 /*
1978 * We set some bytes, we have no idea what the max extent size is
1979 * anymore.
1980 */
1981 info->max_extent_size = 0;
1982
1983 return bytes_to_set;
1984
1985}
1986
1987static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1988 struct btrfs_free_space *info)
1989{
1990 struct btrfs_block_group_cache *block_group = ctl->private;
1991 struct btrfs_fs_info *fs_info = block_group->fs_info;
1992 bool forced = false;
1993
1994#ifdef CONFIG_BTRFS_DEBUG
1995 if (btrfs_should_fragment_free_space(block_group))
1996 forced = true;
1997#endif
1998
1999 /*
2000 * If we are below the extents threshold then we can add this as an
2001 * extent, and don't have to deal with the bitmap
2002 */
2003 if (!forced && ctl->free_extents < ctl->extents_thresh) {
2004 /*
2005 * If this block group has some small extents we don't want to
2006 * use up all of our free slots in the cache with them, we want
2007 * to reserve them to larger extents, however if we have plenty
2008 * of cache left then go ahead an dadd them, no sense in adding
2009 * the overhead of a bitmap if we don't have to.
2010 */
2011 if (info->bytes <= fs_info->sectorsize * 4) {
2012 if (ctl->free_extents * 2 <= ctl->extents_thresh)
2013 return false;
2014 } else {
2015 return false;
2016 }
2017 }
2018
2019 /*
2020 * The original block groups from mkfs can be really small, like 8
2021 * megabytes, so don't bother with a bitmap for those entries. However
2022 * some block groups can be smaller than what a bitmap would cover but
2023 * are still large enough that they could overflow the 32k memory limit,
2024 * so allow those block groups to still be allowed to have a bitmap
2025 * entry.
2026 */
2027 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
2028 return false;
2029
2030 return true;
2031}
2032
2033static const struct btrfs_free_space_op free_space_op = {
2034 .recalc_thresholds = recalculate_thresholds,
2035 .use_bitmap = use_bitmap,
2036};
2037
2038static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2039 struct btrfs_free_space *info)
2040{
2041 struct btrfs_free_space *bitmap_info;
2042 struct btrfs_block_group_cache *block_group = NULL;
2043 int added = 0;
2044 u64 bytes, offset, bytes_added;
2045 int ret;
2046
2047 bytes = info->bytes;
2048 offset = info->offset;
2049
2050 if (!ctl->op->use_bitmap(ctl, info))
2051 return 0;
2052
2053 if (ctl->op == &free_space_op)
2054 block_group = ctl->private;
2055again:
2056 /*
2057 * Since we link bitmaps right into the cluster we need to see if we
2058 * have a cluster here, and if so and it has our bitmap we need to add
2059 * the free space to that bitmap.
2060 */
2061 if (block_group && !list_empty(&block_group->cluster_list)) {
2062 struct btrfs_free_cluster *cluster;
2063 struct rb_node *node;
2064 struct btrfs_free_space *entry;
2065
2066 cluster = list_entry(block_group->cluster_list.next,
2067 struct btrfs_free_cluster,
2068 block_group_list);
2069 spin_lock(&cluster->lock);
2070 node = rb_first(&cluster->root);
2071 if (!node) {
2072 spin_unlock(&cluster->lock);
2073 goto no_cluster_bitmap;
2074 }
2075
2076 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2077 if (!entry->bitmap) {
2078 spin_unlock(&cluster->lock);
2079 goto no_cluster_bitmap;
2080 }
2081
2082 if (entry->offset == offset_to_bitmap(ctl, offset)) {
2083 bytes_added = add_bytes_to_bitmap(ctl, entry,
2084 offset, bytes);
2085 bytes -= bytes_added;
2086 offset += bytes_added;
2087 }
2088 spin_unlock(&cluster->lock);
2089 if (!bytes) {
2090 ret = 1;
2091 goto out;
2092 }
2093 }
2094
2095no_cluster_bitmap:
2096 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2097 1, 0);
2098 if (!bitmap_info) {
2099 ASSERT(added == 0);
2100 goto new_bitmap;
2101 }
2102
2103 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
2104 bytes -= bytes_added;
2105 offset += bytes_added;
2106 added = 0;
2107
2108 if (!bytes) {
2109 ret = 1;
2110 goto out;
2111 } else
2112 goto again;
2113
2114new_bitmap:
2115 if (info && info->bitmap) {
2116 add_new_bitmap(ctl, info, offset);
2117 added = 1;
2118 info = NULL;
2119 goto again;
2120 } else {
2121 spin_unlock(&ctl->tree_lock);
2122
2123 /* no pre-allocated info, allocate a new one */
2124 if (!info) {
2125 info = kmem_cache_zalloc(btrfs_free_space_cachep,
2126 GFP_NOFS);
2127 if (!info) {
2128 spin_lock(&ctl->tree_lock);
2129 ret = -ENOMEM;
2130 goto out;
2131 }
2132 }
2133
2134 /* allocate the bitmap */
2135 info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2136 spin_lock(&ctl->tree_lock);
2137 if (!info->bitmap) {
2138 ret = -ENOMEM;
2139 goto out;
2140 }
2141 goto again;
2142 }
2143
2144out:
2145 if (info) {
2146 if (info->bitmap)
2147 kfree(info->bitmap);
2148 kmem_cache_free(btrfs_free_space_cachep, info);
2149 }
2150
2151 return ret;
2152}
2153
2154static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2155 struct btrfs_free_space *info, bool update_stat)
2156{
2157 struct btrfs_free_space *left_info;
2158 struct btrfs_free_space *right_info;
2159 bool merged = false;
2160 u64 offset = info->offset;
2161 u64 bytes = info->bytes;
2162
2163 /*
2164 * first we want to see if there is free space adjacent to the range we
2165 * are adding, if there is remove that struct and add a new one to
2166 * cover the entire range
2167 */
2168 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2169 if (right_info && rb_prev(&right_info->offset_index))
2170 left_info = rb_entry(rb_prev(&right_info->offset_index),
2171 struct btrfs_free_space, offset_index);
2172 else
2173 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2174
2175 if (right_info && !right_info->bitmap) {
2176 if (update_stat)
2177 unlink_free_space(ctl, right_info);
2178 else
2179 __unlink_free_space(ctl, right_info);
2180 info->bytes += right_info->bytes;
2181 kmem_cache_free(btrfs_free_space_cachep, right_info);
2182 merged = true;
2183 }
2184
2185 if (left_info && !left_info->bitmap &&
2186 left_info->offset + left_info->bytes == offset) {
2187 if (update_stat)
2188 unlink_free_space(ctl, left_info);
2189 else
2190 __unlink_free_space(ctl, left_info);
2191 info->offset = left_info->offset;
2192 info->bytes += left_info->bytes;
2193 kmem_cache_free(btrfs_free_space_cachep, left_info);
2194 merged = true;
2195 }
2196
2197 return merged;
2198}
2199
2200static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2201 struct btrfs_free_space *info,
2202 bool update_stat)
2203{
2204 struct btrfs_free_space *bitmap;
2205 unsigned long i;
2206 unsigned long j;
2207 const u64 end = info->offset + info->bytes;
2208 const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2209 u64 bytes;
2210
2211 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2212 if (!bitmap)
2213 return false;
2214
2215 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2216 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2217 if (j == i)
2218 return false;
2219 bytes = (j - i) * ctl->unit;
2220 info->bytes += bytes;
2221
2222 if (update_stat)
2223 bitmap_clear_bits(ctl, bitmap, end, bytes);
2224 else
2225 __bitmap_clear_bits(ctl, bitmap, end, bytes);
2226
2227 if (!bitmap->bytes)
2228 free_bitmap(ctl, bitmap);
2229
2230 return true;
2231}
2232
2233static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2234 struct btrfs_free_space *info,
2235 bool update_stat)
2236{
2237 struct btrfs_free_space *bitmap;
2238 u64 bitmap_offset;
2239 unsigned long i;
2240 unsigned long j;
2241 unsigned long prev_j;
2242 u64 bytes;
2243
2244 bitmap_offset = offset_to_bitmap(ctl, info->offset);
2245 /* If we're on a boundary, try the previous logical bitmap. */
2246 if (bitmap_offset == info->offset) {
2247 if (info->offset == 0)
2248 return false;
2249 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2250 }
2251
2252 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2253 if (!bitmap)
2254 return false;
2255
2256 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2257 j = 0;
2258 prev_j = (unsigned long)-1;
2259 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2260 if (j > i)
2261 break;
2262 prev_j = j;
2263 }
2264 if (prev_j == i)
2265 return false;
2266
2267 if (prev_j == (unsigned long)-1)
2268 bytes = (i + 1) * ctl->unit;
2269 else
2270 bytes = (i - prev_j) * ctl->unit;
2271
2272 info->offset -= bytes;
2273 info->bytes += bytes;
2274
2275 if (update_stat)
2276 bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2277 else
2278 __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2279
2280 if (!bitmap->bytes)
2281 free_bitmap(ctl, bitmap);
2282
2283 return true;
2284}
2285
2286/*
2287 * We prefer always to allocate from extent entries, both for clustered and
2288 * non-clustered allocation requests. So when attempting to add a new extent
2289 * entry, try to see if there's adjacent free space in bitmap entries, and if
2290 * there is, migrate that space from the bitmaps to the extent.
2291 * Like this we get better chances of satisfying space allocation requests
2292 * because we attempt to satisfy them based on a single cache entry, and never
2293 * on 2 or more entries - even if the entries represent a contiguous free space
2294 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2295 * ends).
2296 */
2297static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2298 struct btrfs_free_space *info,
2299 bool update_stat)
2300{
2301 /*
2302 * Only work with disconnected entries, as we can change their offset,
2303 * and must be extent entries.
2304 */
2305 ASSERT(!info->bitmap);
2306 ASSERT(RB_EMPTY_NODE(&info->offset_index));
2307
2308 if (ctl->total_bitmaps > 0) {
2309 bool stole_end;
2310 bool stole_front = false;
2311
2312 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2313 if (ctl->total_bitmaps > 0)
2314 stole_front = steal_from_bitmap_to_front(ctl, info,
2315 update_stat);
2316
2317 if (stole_end || stole_front)
2318 try_merge_free_space(ctl, info, update_stat);
2319 }
2320}
2321
2322int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
2323 struct btrfs_free_space_ctl *ctl,
2324 u64 offset, u64 bytes)
2325{
2326 struct btrfs_free_space *info;
2327 int ret = 0;
2328
2329 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2330 if (!info)
2331 return -ENOMEM;
2332
2333 info->offset = offset;
2334 info->bytes = bytes;
2335 RB_CLEAR_NODE(&info->offset_index);
2336
2337 spin_lock(&ctl->tree_lock);
2338
2339 if (try_merge_free_space(ctl, info, true))
2340 goto link;
2341
2342 /*
2343 * There was no extent directly to the left or right of this new
2344 * extent then we know we're going to have to allocate a new extent, so
2345 * before we do that see if we need to drop this into a bitmap
2346 */
2347 ret = insert_into_bitmap(ctl, info);
2348 if (ret < 0) {
2349 goto out;
2350 } else if (ret) {
2351 ret = 0;
2352 goto out;
2353 }
2354link:
2355 /*
2356 * Only steal free space from adjacent bitmaps if we're sure we're not
2357 * going to add the new free space to existing bitmap entries - because
2358 * that would mean unnecessary work that would be reverted. Therefore
2359 * attempt to steal space from bitmaps if we're adding an extent entry.
2360 */
2361 steal_from_bitmap(ctl, info, true);
2362
2363 ret = link_free_space(ctl, info);
2364 if (ret)
2365 kmem_cache_free(btrfs_free_space_cachep, info);
2366out:
2367 spin_unlock(&ctl->tree_lock);
2368
2369 if (ret) {
2370 btrfs_crit(fs_info, "unable to add free space :%d", ret);
2371 ASSERT(ret != -EEXIST);
2372 }
2373
2374 return ret;
2375}
2376
2377int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
2378 u64 offset, u64 bytes)
2379{
2380 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2381 struct btrfs_free_space *info;
2382 int ret;
2383 bool re_search = false;
2384
2385 spin_lock(&ctl->tree_lock);
2386
2387again:
2388 ret = 0;
2389 if (!bytes)
2390 goto out_lock;
2391
2392 info = tree_search_offset(ctl, offset, 0, 0);
2393 if (!info) {
2394 /*
2395 * oops didn't find an extent that matched the space we wanted
2396 * to remove, look for a bitmap instead
2397 */
2398 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2399 1, 0);
2400 if (!info) {
2401 /*
2402 * If we found a partial bit of our free space in a
2403 * bitmap but then couldn't find the other part this may
2404 * be a problem, so WARN about it.
2405 */
2406 WARN_ON(re_search);
2407 goto out_lock;
2408 }
2409 }
2410
2411 re_search = false;
2412 if (!info->bitmap) {
2413 unlink_free_space(ctl, info);
2414 if (offset == info->offset) {
2415 u64 to_free = min(bytes, info->bytes);
2416
2417 info->bytes -= to_free;
2418 info->offset += to_free;
2419 if (info->bytes) {
2420 ret = link_free_space(ctl, info);
2421 WARN_ON(ret);
2422 } else {
2423 kmem_cache_free(btrfs_free_space_cachep, info);
2424 }
2425
2426 offset += to_free;
2427 bytes -= to_free;
2428 goto again;
2429 } else {
2430 u64 old_end = info->bytes + info->offset;
2431
2432 info->bytes = offset - info->offset;
2433 ret = link_free_space(ctl, info);
2434 WARN_ON(ret);
2435 if (ret)
2436 goto out_lock;
2437
2438 /* Not enough bytes in this entry to satisfy us */
2439 if (old_end < offset + bytes) {
2440 bytes -= old_end - offset;
2441 offset = old_end;
2442 goto again;
2443 } else if (old_end == offset + bytes) {
2444 /* all done */
2445 goto out_lock;
2446 }
2447 spin_unlock(&ctl->tree_lock);
2448
2449 ret = btrfs_add_free_space(block_group, offset + bytes,
2450 old_end - (offset + bytes));
2451 WARN_ON(ret);
2452 goto out;
2453 }
2454 }
2455
2456 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2457 if (ret == -EAGAIN) {
2458 re_search = true;
2459 goto again;
2460 }
2461out_lock:
2462 spin_unlock(&ctl->tree_lock);
2463out:
2464 return ret;
2465}
2466
2467void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2468 u64 bytes)
2469{
2470 struct btrfs_fs_info *fs_info = block_group->fs_info;
2471 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2472 struct btrfs_free_space *info;
2473 struct rb_node *n;
2474 int count = 0;
2475
2476 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2477 info = rb_entry(n, struct btrfs_free_space, offset_index);
2478 if (info->bytes >= bytes && !block_group->ro)
2479 count++;
2480 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2481 info->offset, info->bytes,
2482 (info->bitmap) ? "yes" : "no");
2483 }
2484 btrfs_info(fs_info, "block group has cluster?: %s",
2485 list_empty(&block_group->cluster_list) ? "no" : "yes");
2486 btrfs_info(fs_info,
2487 "%d blocks of free space at or bigger than bytes is", count);
2488}
2489
2490void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2491{
2492 struct btrfs_fs_info *fs_info = block_group->fs_info;
2493 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2494
2495 spin_lock_init(&ctl->tree_lock);
2496 ctl->unit = fs_info->sectorsize;
2497 ctl->start = block_group->key.objectid;
2498 ctl->private = block_group;
2499 ctl->op = &free_space_op;
2500 INIT_LIST_HEAD(&ctl->trimming_ranges);
2501 mutex_init(&ctl->cache_writeout_mutex);
2502
2503 /*
2504 * we only want to have 32k of ram per block group for keeping
2505 * track of free space, and if we pass 1/2 of that we want to
2506 * start converting things over to using bitmaps
2507 */
2508 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2509}
2510
2511/*
2512 * for a given cluster, put all of its extents back into the free
2513 * space cache. If the block group passed doesn't match the block group
2514 * pointed to by the cluster, someone else raced in and freed the
2515 * cluster already. In that case, we just return without changing anything
2516 */
2517static int
2518__btrfs_return_cluster_to_free_space(
2519 struct btrfs_block_group_cache *block_group,
2520 struct btrfs_free_cluster *cluster)
2521{
2522 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2523 struct btrfs_free_space *entry;
2524 struct rb_node *node;
2525
2526 spin_lock(&cluster->lock);
2527 if (cluster->block_group != block_group)
2528 goto out;
2529
2530 cluster->block_group = NULL;
2531 cluster->window_start = 0;
2532 list_del_init(&cluster->block_group_list);
2533
2534 node = rb_first(&cluster->root);
2535 while (node) {
2536 bool bitmap;
2537
2538 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2539 node = rb_next(&entry->offset_index);
2540 rb_erase(&entry->offset_index, &cluster->root);
2541 RB_CLEAR_NODE(&entry->offset_index);
2542
2543 bitmap = (entry->bitmap != NULL);
2544 if (!bitmap) {
2545 try_merge_free_space(ctl, entry, false);
2546 steal_from_bitmap(ctl, entry, false);
2547 }
2548 tree_insert_offset(&ctl->free_space_offset,
2549 entry->offset, &entry->offset_index, bitmap);
2550 }
2551 cluster->root = RB_ROOT;
2552
2553out:
2554 spin_unlock(&cluster->lock);
2555 btrfs_put_block_group(block_group);
2556 return 0;
2557}
2558
2559static void __btrfs_remove_free_space_cache_locked(
2560 struct btrfs_free_space_ctl *ctl)
2561{
2562 struct btrfs_free_space *info;
2563 struct rb_node *node;
2564
2565 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2566 info = rb_entry(node, struct btrfs_free_space, offset_index);
2567 if (!info->bitmap) {
2568 unlink_free_space(ctl, info);
2569 kmem_cache_free(btrfs_free_space_cachep, info);
2570 } else {
2571 free_bitmap(ctl, info);
2572 }
2573
2574 cond_resched_lock(&ctl->tree_lock);
2575 }
2576}
2577
2578void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2579{
2580 spin_lock(&ctl->tree_lock);
2581 __btrfs_remove_free_space_cache_locked(ctl);
2582 spin_unlock(&ctl->tree_lock);
2583}
2584
2585void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2586{
2587 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2588 struct btrfs_free_cluster *cluster;
2589 struct list_head *head;
2590
2591 spin_lock(&ctl->tree_lock);
2592 while ((head = block_group->cluster_list.next) !=
2593 &block_group->cluster_list) {
2594 cluster = list_entry(head, struct btrfs_free_cluster,
2595 block_group_list);
2596
2597 WARN_ON(cluster->block_group != block_group);
2598 __btrfs_return_cluster_to_free_space(block_group, cluster);
2599
2600 cond_resched_lock(&ctl->tree_lock);
2601 }
2602 __btrfs_remove_free_space_cache_locked(ctl);
2603 spin_unlock(&ctl->tree_lock);
2604
2605}
2606
2607u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2608 u64 offset, u64 bytes, u64 empty_size,
2609 u64 *max_extent_size)
2610{
2611 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2612 struct btrfs_free_space *entry = NULL;
2613 u64 bytes_search = bytes + empty_size;
2614 u64 ret = 0;
2615 u64 align_gap = 0;
2616 u64 align_gap_len = 0;
2617
2618 spin_lock(&ctl->tree_lock);
2619 entry = find_free_space(ctl, &offset, &bytes_search,
2620 block_group->full_stripe_len, max_extent_size);
2621 if (!entry)
2622 goto out;
2623
2624 ret = offset;
2625 if (entry->bitmap) {
2626 bitmap_clear_bits(ctl, entry, offset, bytes);
2627 if (!entry->bytes)
2628 free_bitmap(ctl, entry);
2629 } else {
2630 unlink_free_space(ctl, entry);
2631 align_gap_len = offset - entry->offset;
2632 align_gap = entry->offset;
2633
2634 entry->offset = offset + bytes;
2635 WARN_ON(entry->bytes < bytes + align_gap_len);
2636
2637 entry->bytes -= bytes + align_gap_len;
2638 if (!entry->bytes)
2639 kmem_cache_free(btrfs_free_space_cachep, entry);
2640 else
2641 link_free_space(ctl, entry);
2642 }
2643out:
2644 spin_unlock(&ctl->tree_lock);
2645
2646 if (align_gap_len)
2647 __btrfs_add_free_space(block_group->fs_info, ctl,
2648 align_gap, align_gap_len);
2649 return ret;
2650}
2651
2652/*
2653 * given a cluster, put all of its extents back into the free space
2654 * cache. If a block group is passed, this function will only free
2655 * a cluster that belongs to the passed block group.
2656 *
2657 * Otherwise, it'll get a reference on the block group pointed to by the
2658 * cluster and remove the cluster from it.
2659 */
2660int btrfs_return_cluster_to_free_space(
2661 struct btrfs_block_group_cache *block_group,
2662 struct btrfs_free_cluster *cluster)
2663{
2664 struct btrfs_free_space_ctl *ctl;
2665 int ret;
2666
2667 /* first, get a safe pointer to the block group */
2668 spin_lock(&cluster->lock);
2669 if (!block_group) {
2670 block_group = cluster->block_group;
2671 if (!block_group) {
2672 spin_unlock(&cluster->lock);
2673 return 0;
2674 }
2675 } else if (cluster->block_group != block_group) {
2676 /* someone else has already freed it don't redo their work */
2677 spin_unlock(&cluster->lock);
2678 return 0;
2679 }
2680 atomic_inc(&block_group->count);
2681 spin_unlock(&cluster->lock);
2682
2683 ctl = block_group->free_space_ctl;
2684
2685 /* now return any extents the cluster had on it */
2686 spin_lock(&ctl->tree_lock);
2687 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2688 spin_unlock(&ctl->tree_lock);
2689
2690 /* finally drop our ref */
2691 btrfs_put_block_group(block_group);
2692 return ret;
2693}
2694
2695static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2696 struct btrfs_free_cluster *cluster,
2697 struct btrfs_free_space *entry,
2698 u64 bytes, u64 min_start,
2699 u64 *max_extent_size)
2700{
2701 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2702 int err;
2703 u64 search_start = cluster->window_start;
2704 u64 search_bytes = bytes;
2705 u64 ret = 0;
2706
2707 search_start = min_start;
2708 search_bytes = bytes;
2709
2710 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2711 if (err) {
2712 if (search_bytes > *max_extent_size)
2713 *max_extent_size = search_bytes;
2714 return 0;
2715 }
2716
2717 ret = search_start;
2718 __bitmap_clear_bits(ctl, entry, ret, bytes);
2719
2720 return ret;
2721}
2722
2723/*
2724 * given a cluster, try to allocate 'bytes' from it, returns 0
2725 * if it couldn't find anything suitably large, or a logical disk offset
2726 * if things worked out
2727 */
2728u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2729 struct btrfs_free_cluster *cluster, u64 bytes,
2730 u64 min_start, u64 *max_extent_size)
2731{
2732 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2733 struct btrfs_free_space *entry = NULL;
2734 struct rb_node *node;
2735 u64 ret = 0;
2736
2737 spin_lock(&cluster->lock);
2738 if (bytes > cluster->max_size)
2739 goto out;
2740
2741 if (cluster->block_group != block_group)
2742 goto out;
2743
2744 node = rb_first(&cluster->root);
2745 if (!node)
2746 goto out;
2747
2748 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2749 while (1) {
2750 if (entry->bytes < bytes && entry->bytes > *max_extent_size)
2751 *max_extent_size = entry->bytes;
2752
2753 if (entry->bytes < bytes ||
2754 (!entry->bitmap && entry->offset < min_start)) {
2755 node = rb_next(&entry->offset_index);
2756 if (!node)
2757 break;
2758 entry = rb_entry(node, struct btrfs_free_space,
2759 offset_index);
2760 continue;
2761 }
2762
2763 if (entry->bitmap) {
2764 ret = btrfs_alloc_from_bitmap(block_group,
2765 cluster, entry, bytes,
2766 cluster->window_start,
2767 max_extent_size);
2768 if (ret == 0) {
2769 node = rb_next(&entry->offset_index);
2770 if (!node)
2771 break;
2772 entry = rb_entry(node, struct btrfs_free_space,
2773 offset_index);
2774 continue;
2775 }
2776 cluster->window_start += bytes;
2777 } else {
2778 ret = entry->offset;
2779
2780 entry->offset += bytes;
2781 entry->bytes -= bytes;
2782 }
2783
2784 if (entry->bytes == 0)
2785 rb_erase(&entry->offset_index, &cluster->root);
2786 break;
2787 }
2788out:
2789 spin_unlock(&cluster->lock);
2790
2791 if (!ret)
2792 return 0;
2793
2794 spin_lock(&ctl->tree_lock);
2795
2796 ctl->free_space -= bytes;
2797 if (entry->bytes == 0) {
2798 ctl->free_extents--;
2799 if (entry->bitmap) {
2800 kfree(entry->bitmap);
2801 ctl->total_bitmaps--;
2802 ctl->op->recalc_thresholds(ctl);
2803 }
2804 kmem_cache_free(btrfs_free_space_cachep, entry);
2805 }
2806
2807 spin_unlock(&ctl->tree_lock);
2808
2809 return ret;
2810}
2811
2812static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2813 struct btrfs_free_space *entry,
2814 struct btrfs_free_cluster *cluster,
2815 u64 offset, u64 bytes,
2816 u64 cont1_bytes, u64 min_bytes)
2817{
2818 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2819 unsigned long next_zero;
2820 unsigned long i;
2821 unsigned long want_bits;
2822 unsigned long min_bits;
2823 unsigned long found_bits;
2824 unsigned long max_bits = 0;
2825 unsigned long start = 0;
2826 unsigned long total_found = 0;
2827 int ret;
2828
2829 i = offset_to_bit(entry->offset, ctl->unit,
2830 max_t(u64, offset, entry->offset));
2831 want_bits = bytes_to_bits(bytes, ctl->unit);
2832 min_bits = bytes_to_bits(min_bytes, ctl->unit);
2833
2834 /*
2835 * Don't bother looking for a cluster in this bitmap if it's heavily
2836 * fragmented.
2837 */
2838 if (entry->max_extent_size &&
2839 entry->max_extent_size < cont1_bytes)
2840 return -ENOSPC;
2841again:
2842 found_bits = 0;
2843 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2844 next_zero = find_next_zero_bit(entry->bitmap,
2845 BITS_PER_BITMAP, i);
2846 if (next_zero - i >= min_bits) {
2847 found_bits = next_zero - i;
2848 if (found_bits > max_bits)
2849 max_bits = found_bits;
2850 break;
2851 }
2852 if (next_zero - i > max_bits)
2853 max_bits = next_zero - i;
2854 i = next_zero;
2855 }
2856
2857 if (!found_bits) {
2858 entry->max_extent_size = (u64)max_bits * ctl->unit;
2859 return -ENOSPC;
2860 }
2861
2862 if (!total_found) {
2863 start = i;
2864 cluster->max_size = 0;
2865 }
2866
2867 total_found += found_bits;
2868
2869 if (cluster->max_size < found_bits * ctl->unit)
2870 cluster->max_size = found_bits * ctl->unit;
2871
2872 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2873 i = next_zero + 1;
2874 goto again;
2875 }
2876
2877 cluster->window_start = start * ctl->unit + entry->offset;
2878 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2879 ret = tree_insert_offset(&cluster->root, entry->offset,
2880 &entry->offset_index, 1);
2881 ASSERT(!ret); /* -EEXIST; Logic error */
2882
2883 trace_btrfs_setup_cluster(block_group, cluster,
2884 total_found * ctl->unit, 1);
2885 return 0;
2886}
2887
2888/*
2889 * This searches the block group for just extents to fill the cluster with.
2890 * Try to find a cluster with at least bytes total bytes, at least one
2891 * extent of cont1_bytes, and other clusters of at least min_bytes.
2892 */
2893static noinline int
2894setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2895 struct btrfs_free_cluster *cluster,
2896 struct list_head *bitmaps, u64 offset, u64 bytes,
2897 u64 cont1_bytes, u64 min_bytes)
2898{
2899 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2900 struct btrfs_free_space *first = NULL;
2901 struct btrfs_free_space *entry = NULL;
2902 struct btrfs_free_space *last;
2903 struct rb_node *node;
2904 u64 window_free;
2905 u64 max_extent;
2906 u64 total_size = 0;
2907
2908 entry = tree_search_offset(ctl, offset, 0, 1);
2909 if (!entry)
2910 return -ENOSPC;
2911
2912 /*
2913 * We don't want bitmaps, so just move along until we find a normal
2914 * extent entry.
2915 */
2916 while (entry->bitmap || entry->bytes < min_bytes) {
2917 if (entry->bitmap && list_empty(&entry->list))
2918 list_add_tail(&entry->list, bitmaps);
2919 node = rb_next(&entry->offset_index);
2920 if (!node)
2921 return -ENOSPC;
2922 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2923 }
2924
2925 window_free = entry->bytes;
2926 max_extent = entry->bytes;
2927 first = entry;
2928 last = entry;
2929
2930 for (node = rb_next(&entry->offset_index); node;
2931 node = rb_next(&entry->offset_index)) {
2932 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2933
2934 if (entry->bitmap) {
2935 if (list_empty(&entry->list))
2936 list_add_tail(&entry->list, bitmaps);
2937 continue;
2938 }
2939
2940 if (entry->bytes < min_bytes)
2941 continue;
2942
2943 last = entry;
2944 window_free += entry->bytes;
2945 if (entry->bytes > max_extent)
2946 max_extent = entry->bytes;
2947 }
2948
2949 if (window_free < bytes || max_extent < cont1_bytes)
2950 return -ENOSPC;
2951
2952 cluster->window_start = first->offset;
2953
2954 node = &first->offset_index;
2955
2956 /*
2957 * now we've found our entries, pull them out of the free space
2958 * cache and put them into the cluster rbtree
2959 */
2960 do {
2961 int ret;
2962
2963 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2964 node = rb_next(&entry->offset_index);
2965 if (entry->bitmap || entry->bytes < min_bytes)
2966 continue;
2967
2968 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2969 ret = tree_insert_offset(&cluster->root, entry->offset,
2970 &entry->offset_index, 0);
2971 total_size += entry->bytes;
2972 ASSERT(!ret); /* -EEXIST; Logic error */
2973 } while (node && entry != last);
2974
2975 cluster->max_size = max_extent;
2976 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2977 return 0;
2978}
2979
2980/*
2981 * This specifically looks for bitmaps that may work in the cluster, we assume
2982 * that we have already failed to find extents that will work.
2983 */
2984static noinline int
2985setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2986 struct btrfs_free_cluster *cluster,
2987 struct list_head *bitmaps, u64 offset, u64 bytes,
2988 u64 cont1_bytes, u64 min_bytes)
2989{
2990 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2991 struct btrfs_free_space *entry = NULL;
2992 int ret = -ENOSPC;
2993 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2994
2995 if (ctl->total_bitmaps == 0)
2996 return -ENOSPC;
2997
2998 /*
2999 * The bitmap that covers offset won't be in the list unless offset
3000 * is just its start offset.
3001 */
3002 if (!list_empty(bitmaps))
3003 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3004
3005 if (!entry || entry->offset != bitmap_offset) {
3006 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3007 if (entry && list_empty(&entry->list))
3008 list_add(&entry->list, bitmaps);
3009 }
3010
3011 list_for_each_entry(entry, bitmaps, list) {
3012 if (entry->bytes < bytes)
3013 continue;
3014 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3015 bytes, cont1_bytes, min_bytes);
3016 if (!ret)
3017 return 0;
3018 }
3019
3020 /*
3021 * The bitmaps list has all the bitmaps that record free space
3022 * starting after offset, so no more search is required.
3023 */
3024 return -ENOSPC;
3025}
3026
3027/*
3028 * here we try to find a cluster of blocks in a block group. The goal
3029 * is to find at least bytes+empty_size.
3030 * We might not find them all in one contiguous area.
3031 *
3032 * returns zero and sets up cluster if things worked out, otherwise
3033 * it returns -enospc
3034 */
3035int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
3036 struct btrfs_block_group_cache *block_group,
3037 struct btrfs_free_cluster *cluster,
3038 u64 offset, u64 bytes, u64 empty_size)
3039{
3040 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3041 struct btrfs_free_space *entry, *tmp;
3042 LIST_HEAD(bitmaps);
3043 u64 min_bytes;
3044 u64 cont1_bytes;
3045 int ret;
3046
3047 /*
3048 * Choose the minimum extent size we'll require for this
3049 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3050 * For metadata, allow allocates with smaller extents. For
3051 * data, keep it dense.
3052 */
3053 if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3054 cont1_bytes = min_bytes = bytes + empty_size;
3055 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3056 cont1_bytes = bytes;
3057 min_bytes = fs_info->sectorsize;
3058 } else {
3059 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3060 min_bytes = fs_info->sectorsize;
3061 }
3062
3063 spin_lock(&ctl->tree_lock);
3064
3065 /*
3066 * If we know we don't have enough space to make a cluster don't even
3067 * bother doing all the work to try and find one.
3068 */
3069 if (ctl->free_space < bytes) {
3070 spin_unlock(&ctl->tree_lock);
3071 return -ENOSPC;
3072 }
3073
3074 spin_lock(&cluster->lock);
3075
3076 /* someone already found a cluster, hooray */
3077 if (cluster->block_group) {
3078 ret = 0;
3079 goto out;
3080 }
3081
3082 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3083 min_bytes);
3084
3085 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3086 bytes + empty_size,
3087 cont1_bytes, min_bytes);
3088 if (ret)
3089 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3090 offset, bytes + empty_size,
3091 cont1_bytes, min_bytes);
3092
3093 /* Clear our temporary list */
3094 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3095 list_del_init(&entry->list);
3096
3097 if (!ret) {
3098 atomic_inc(&block_group->count);
3099 list_add_tail(&cluster->block_group_list,
3100 &block_group->cluster_list);
3101 cluster->block_group = block_group;
3102 } else {
3103 trace_btrfs_failed_cluster_setup(block_group);
3104 }
3105out:
3106 spin_unlock(&cluster->lock);
3107 spin_unlock(&ctl->tree_lock);
3108
3109 return ret;
3110}
3111
3112/*
3113 * simple code to zero out a cluster
3114 */
3115void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3116{
3117 spin_lock_init(&cluster->lock);
3118 spin_lock_init(&cluster->refill_lock);
3119 cluster->root = RB_ROOT;
3120 cluster->max_size = 0;
3121 cluster->fragmented = false;
3122 INIT_LIST_HEAD(&cluster->block_group_list);
3123 cluster->block_group = NULL;
3124}
3125
3126static int do_trimming(struct btrfs_block_group_cache *block_group,
3127 u64 *total_trimmed, u64 start, u64 bytes,
3128 u64 reserved_start, u64 reserved_bytes,
3129 struct btrfs_trim_range *trim_entry)
3130{
3131 struct btrfs_space_info *space_info = block_group->space_info;
3132 struct btrfs_fs_info *fs_info = block_group->fs_info;
3133 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3134 int ret;
3135 int update = 0;
3136 u64 trimmed = 0;
3137
3138 spin_lock(&space_info->lock);
3139 spin_lock(&block_group->lock);
3140 if (!block_group->ro) {
3141 block_group->reserved += reserved_bytes;
3142 space_info->bytes_reserved += reserved_bytes;
3143 update = 1;
3144 }
3145 spin_unlock(&block_group->lock);
3146 spin_unlock(&space_info->lock);
3147
3148 ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3149 if (!ret)
3150 *total_trimmed += trimmed;
3151
3152 mutex_lock(&ctl->cache_writeout_mutex);
3153 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3154 list_del(&trim_entry->list);
3155 mutex_unlock(&ctl->cache_writeout_mutex);
3156
3157 if (update) {
3158 spin_lock(&space_info->lock);
3159 spin_lock(&block_group->lock);
3160 if (block_group->ro)
3161 space_info->bytes_readonly += reserved_bytes;
3162 block_group->reserved -= reserved_bytes;
3163 space_info->bytes_reserved -= reserved_bytes;
3164 spin_unlock(&space_info->lock);
3165 spin_unlock(&block_group->lock);
3166 }
3167
3168 return ret;
3169}
3170
3171static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
3172 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3173{
3174 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3175 struct btrfs_free_space *entry;
3176 struct rb_node *node;
3177 int ret = 0;
3178 u64 extent_start;
3179 u64 extent_bytes;
3180 u64 bytes;
3181
3182 while (start < end) {
3183 struct btrfs_trim_range trim_entry;
3184
3185 mutex_lock(&ctl->cache_writeout_mutex);
3186 spin_lock(&ctl->tree_lock);
3187
3188 if (ctl->free_space < minlen) {
3189 spin_unlock(&ctl->tree_lock);
3190 mutex_unlock(&ctl->cache_writeout_mutex);
3191 break;
3192 }
3193
3194 entry = tree_search_offset(ctl, start, 0, 1);
3195 if (!entry) {
3196 spin_unlock(&ctl->tree_lock);
3197 mutex_unlock(&ctl->cache_writeout_mutex);
3198 break;
3199 }
3200
3201 /* skip bitmaps */
3202 while (entry->bitmap) {
3203 node = rb_next(&entry->offset_index);
3204 if (!node) {
3205 spin_unlock(&ctl->tree_lock);
3206 mutex_unlock(&ctl->cache_writeout_mutex);
3207 goto out;
3208 }
3209 entry = rb_entry(node, struct btrfs_free_space,
3210 offset_index);
3211 }
3212
3213 if (entry->offset >= end) {
3214 spin_unlock(&ctl->tree_lock);
3215 mutex_unlock(&ctl->cache_writeout_mutex);
3216 break;
3217 }
3218
3219 extent_start = entry->offset;
3220 extent_bytes = entry->bytes;
3221 start = max(start, extent_start);
3222 bytes = min(extent_start + extent_bytes, end) - start;
3223 if (bytes < minlen) {
3224 spin_unlock(&ctl->tree_lock);
3225 mutex_unlock(&ctl->cache_writeout_mutex);
3226 goto next;
3227 }
3228
3229 unlink_free_space(ctl, entry);
3230 kmem_cache_free(btrfs_free_space_cachep, entry);
3231
3232 spin_unlock(&ctl->tree_lock);
3233 trim_entry.start = extent_start;
3234 trim_entry.bytes = extent_bytes;
3235 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3236 mutex_unlock(&ctl->cache_writeout_mutex);
3237
3238 ret = do_trimming(block_group, total_trimmed, start, bytes,
3239 extent_start, extent_bytes, &trim_entry);
3240 if (ret)
3241 break;
3242next:
3243 start += bytes;
3244
3245 if (fatal_signal_pending(current)) {
3246 ret = -ERESTARTSYS;
3247 break;
3248 }
3249
3250 cond_resched();
3251 }
3252out:
3253 return ret;
3254}
3255
3256static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3257 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3258{
3259 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3260 struct btrfs_free_space *entry;
3261 int ret = 0;
3262 int ret2;
3263 u64 bytes;
3264 u64 offset = offset_to_bitmap(ctl, start);
3265
3266 while (offset < end) {
3267 bool next_bitmap = false;
3268 struct btrfs_trim_range trim_entry;
3269
3270 mutex_lock(&ctl->cache_writeout_mutex);
3271 spin_lock(&ctl->tree_lock);
3272
3273 if (ctl->free_space < minlen) {
3274 spin_unlock(&ctl->tree_lock);
3275 mutex_unlock(&ctl->cache_writeout_mutex);
3276 break;
3277 }
3278
3279 entry = tree_search_offset(ctl, offset, 1, 0);
3280 if (!entry) {
3281 spin_unlock(&ctl->tree_lock);
3282 mutex_unlock(&ctl->cache_writeout_mutex);
3283 next_bitmap = true;
3284 goto next;
3285 }
3286
3287 bytes = minlen;
3288 ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3289 if (ret2 || start >= end) {
3290 spin_unlock(&ctl->tree_lock);
3291 mutex_unlock(&ctl->cache_writeout_mutex);
3292 next_bitmap = true;
3293 goto next;
3294 }
3295
3296 bytes = min(bytes, end - start);
3297 if (bytes < minlen) {
3298 spin_unlock(&ctl->tree_lock);
3299 mutex_unlock(&ctl->cache_writeout_mutex);
3300 goto next;
3301 }
3302
3303 bitmap_clear_bits(ctl, entry, start, bytes);
3304 if (entry->bytes == 0)
3305 free_bitmap(ctl, entry);
3306
3307 spin_unlock(&ctl->tree_lock);
3308 trim_entry.start = start;
3309 trim_entry.bytes = bytes;
3310 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3311 mutex_unlock(&ctl->cache_writeout_mutex);
3312
3313 ret = do_trimming(block_group, total_trimmed, start, bytes,
3314 start, bytes, &trim_entry);
3315 if (ret)
3316 break;
3317next:
3318 if (next_bitmap) {
3319 offset += BITS_PER_BITMAP * ctl->unit;
3320 } else {
3321 start += bytes;
3322 if (start >= offset + BITS_PER_BITMAP * ctl->unit)
3323 offset += BITS_PER_BITMAP * ctl->unit;
3324 }
3325
3326 if (fatal_signal_pending(current)) {
3327 ret = -ERESTARTSYS;
3328 break;
3329 }
3330
3331 cond_resched();
3332 }
3333
3334 return ret;
3335}
3336
3337void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3338{
3339 atomic_inc(&cache->trimming);
3340}
3341
3342void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
3343{
3344 struct btrfs_fs_info *fs_info = block_group->fs_info;
3345 struct extent_map_tree *em_tree;
3346 struct extent_map *em;
3347 bool cleanup;
3348
3349 spin_lock(&block_group->lock);
3350 cleanup = (atomic_dec_and_test(&block_group->trimming) &&
3351 block_group->removed);
3352 spin_unlock(&block_group->lock);
3353
3354 if (cleanup) {
3355 mutex_lock(&fs_info->chunk_mutex);
3356 em_tree = &fs_info->mapping_tree.map_tree;
3357 write_lock(&em_tree->lock);
3358 em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3359 1);
3360 BUG_ON(!em); /* logic error, can't happen */
3361 /*
3362 * remove_extent_mapping() will delete us from the pinned_chunks
3363 * list, which is protected by the chunk mutex.
3364 */
3365 remove_extent_mapping(em_tree, em);
3366 write_unlock(&em_tree->lock);
3367 mutex_unlock(&fs_info->chunk_mutex);
3368
3369 /* once for us and once for the tree */
3370 free_extent_map(em);
3371 free_extent_map(em);
3372
3373 /*
3374 * We've left one free space entry and other tasks trimming
3375 * this block group have left 1 entry each one. Free them.
3376 */
3377 __btrfs_remove_free_space_cache(block_group->free_space_ctl);
3378 }
3379}
3380
3381int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
3382 u64 *trimmed, u64 start, u64 end, u64 minlen)
3383{
3384 int ret;
3385
3386 *trimmed = 0;
3387
3388 spin_lock(&block_group->lock);
3389 if (block_group->removed) {
3390 spin_unlock(&block_group->lock);
3391 return 0;
3392 }
3393 btrfs_get_block_group_trimming(block_group);
3394 spin_unlock(&block_group->lock);
3395
3396 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
3397 if (ret)
3398 goto out;
3399
3400 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
3401out:
3402 btrfs_put_block_group_trimming(block_group);
3403 return ret;
3404}
3405
3406/*
3407 * Find the left-most item in the cache tree, and then return the
3408 * smallest inode number in the item.
3409 *
3410 * Note: the returned inode number may not be the smallest one in
3411 * the tree, if the left-most item is a bitmap.
3412 */
3413u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
3414{
3415 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
3416 struct btrfs_free_space *entry = NULL;
3417 u64 ino = 0;
3418
3419 spin_lock(&ctl->tree_lock);
3420
3421 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
3422 goto out;
3423
3424 entry = rb_entry(rb_first(&ctl->free_space_offset),
3425 struct btrfs_free_space, offset_index);
3426
3427 if (!entry->bitmap) {
3428 ino = entry->offset;
3429
3430 unlink_free_space(ctl, entry);
3431 entry->offset++;
3432 entry->bytes--;
3433 if (!entry->bytes)
3434 kmem_cache_free(btrfs_free_space_cachep, entry);
3435 else
3436 link_free_space(ctl, entry);
3437 } else {
3438 u64 offset = 0;
3439 u64 count = 1;
3440 int ret;
3441
3442 ret = search_bitmap(ctl, entry, &offset, &count, true);
3443 /* Logic error; Should be empty if it can't find anything */
3444 ASSERT(!ret);
3445
3446 ino = offset;
3447 bitmap_clear_bits(ctl, entry, offset, 1);
3448 if (entry->bytes == 0)
3449 free_bitmap(ctl, entry);
3450 }
3451out:
3452 spin_unlock(&ctl->tree_lock);
3453
3454 return ino;
3455}
3456
3457struct inode *lookup_free_ino_inode(struct btrfs_root *root,
3458 struct btrfs_path *path)
3459{
3460 struct inode *inode = NULL;
3461
3462 spin_lock(&root->ino_cache_lock);
3463 if (root->ino_cache_inode)
3464 inode = igrab(root->ino_cache_inode);
3465 spin_unlock(&root->ino_cache_lock);
3466 if (inode)
3467 return inode;
3468
3469 inode = __lookup_free_space_inode(root, path, 0);
3470 if (IS_ERR(inode))
3471 return inode;
3472
3473 spin_lock(&root->ino_cache_lock);
3474 if (!btrfs_fs_closing(root->fs_info))
3475 root->ino_cache_inode = igrab(inode);
3476 spin_unlock(&root->ino_cache_lock);
3477
3478 return inode;
3479}
3480
3481int create_free_ino_inode(struct btrfs_root *root,
3482 struct btrfs_trans_handle *trans,
3483 struct btrfs_path *path)
3484{
3485 return __create_free_space_inode(root, trans, path,
3486 BTRFS_FREE_INO_OBJECTID, 0);
3487}
3488
3489int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3490{
3491 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3492 struct btrfs_path *path;
3493 struct inode *inode;
3494 int ret = 0;
3495 u64 root_gen = btrfs_root_generation(&root->root_item);
3496
3497 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3498 return 0;
3499
3500 /*
3501 * If we're unmounting then just return, since this does a search on the
3502 * normal root and not the commit root and we could deadlock.
3503 */
3504 if (btrfs_fs_closing(fs_info))
3505 return 0;
3506
3507 path = btrfs_alloc_path();
3508 if (!path)
3509 return 0;
3510
3511 inode = lookup_free_ino_inode(root, path);
3512 if (IS_ERR(inode))
3513 goto out;
3514
3515 if (root_gen != BTRFS_I(inode)->generation)
3516 goto out_put;
3517
3518 ret = __load_free_space_cache(root, inode, ctl, path, 0);
3519
3520 if (ret < 0)
3521 btrfs_err(fs_info,
3522 "failed to load free ino cache for root %llu",
3523 root->root_key.objectid);
3524out_put:
3525 iput(inode);
3526out:
3527 btrfs_free_path(path);
3528 return ret;
3529}
3530
3531int btrfs_write_out_ino_cache(struct btrfs_root *root,
3532 struct btrfs_trans_handle *trans,
3533 struct btrfs_path *path,
3534 struct inode *inode)
3535{
3536 struct btrfs_fs_info *fs_info = root->fs_info;
3537 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3538 int ret;
3539 struct btrfs_io_ctl io_ctl;
3540 bool release_metadata = true;
3541
3542 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
3543 return 0;
3544
3545 memset(&io_ctl, 0, sizeof(io_ctl));
3546 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
3547 trans, path, 0);
3548 if (!ret) {
3549 /*
3550 * At this point writepages() didn't error out, so our metadata
3551 * reservation is released when the writeback finishes, at
3552 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3553 * with or without an error.
3554 */
3555 release_metadata = false;
3556 ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
3557 }
3558
3559 if (ret) {
3560 if (release_metadata)
3561 btrfs_delalloc_release_metadata(inode, inode->i_size);
3562#ifdef DEBUG
3563 btrfs_err(fs_info,
3564 "failed to write free ino cache for root %llu",
3565 root->root_key.objectid);
3566#endif
3567 }
3568
3569 return ret;
3570}
3571
3572#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3573/*
3574 * Use this if you need to make a bitmap or extent entry specifically, it
3575 * doesn't do any of the merging that add_free_space does, this acts a lot like
3576 * how the free space cache loading stuff works, so you can get really weird
3577 * configurations.
3578 */
3579int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
3580 u64 offset, u64 bytes, bool bitmap)
3581{
3582 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3583 struct btrfs_free_space *info = NULL, *bitmap_info;
3584 void *map = NULL;
3585 u64 bytes_added;
3586 int ret;
3587
3588again:
3589 if (!info) {
3590 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
3591 if (!info)
3592 return -ENOMEM;
3593 }
3594
3595 if (!bitmap) {
3596 spin_lock(&ctl->tree_lock);
3597 info->offset = offset;
3598 info->bytes = bytes;
3599 info->max_extent_size = 0;
3600 ret = link_free_space(ctl, info);
3601 spin_unlock(&ctl->tree_lock);
3602 if (ret)
3603 kmem_cache_free(btrfs_free_space_cachep, info);
3604 return ret;
3605 }
3606
3607 if (!map) {
3608 map = kzalloc(PAGE_SIZE, GFP_NOFS);
3609 if (!map) {
3610 kmem_cache_free(btrfs_free_space_cachep, info);
3611 return -ENOMEM;
3612 }
3613 }
3614
3615 spin_lock(&ctl->tree_lock);
3616 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3617 1, 0);
3618 if (!bitmap_info) {
3619 info->bitmap = map;
3620 map = NULL;
3621 add_new_bitmap(ctl, info, offset);
3622 bitmap_info = info;
3623 info = NULL;
3624 }
3625
3626 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3627
3628 bytes -= bytes_added;
3629 offset += bytes_added;
3630 spin_unlock(&ctl->tree_lock);
3631
3632 if (bytes)
3633 goto again;
3634
3635 if (info)
3636 kmem_cache_free(btrfs_free_space_cachep, info);
3637 if (map)
3638 kfree(map);
3639 return 0;
3640}
3641
3642/*
3643 * Checks to see if the given range is in the free space cache. This is really
3644 * just used to check the absence of space, so if there is free space in the
3645 * range at all we will return 1.
3646 */
3647int test_check_exists(struct btrfs_block_group_cache *cache,
3648 u64 offset, u64 bytes)
3649{
3650 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3651 struct btrfs_free_space *info;
3652 int ret = 0;
3653
3654 spin_lock(&ctl->tree_lock);
3655 info = tree_search_offset(ctl, offset, 0, 0);
3656 if (!info) {
3657 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3658 1, 0);
3659 if (!info)
3660 goto out;
3661 }
3662
3663have_info:
3664 if (info->bitmap) {
3665 u64 bit_off, bit_bytes;
3666 struct rb_node *n;
3667 struct btrfs_free_space *tmp;
3668
3669 bit_off = offset;
3670 bit_bytes = ctl->unit;
3671 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3672 if (!ret) {
3673 if (bit_off == offset) {
3674 ret = 1;
3675 goto out;
3676 } else if (bit_off > offset &&
3677 offset + bytes > bit_off) {
3678 ret = 1;
3679 goto out;
3680 }
3681 }
3682
3683 n = rb_prev(&info->offset_index);
3684 while (n) {
3685 tmp = rb_entry(n, struct btrfs_free_space,
3686 offset_index);
3687 if (tmp->offset + tmp->bytes < offset)
3688 break;
3689 if (offset + bytes < tmp->offset) {
3690 n = rb_prev(&tmp->offset_index);
3691 continue;
3692 }
3693 info = tmp;
3694 goto have_info;
3695 }
3696
3697 n = rb_next(&info->offset_index);
3698 while (n) {
3699 tmp = rb_entry(n, struct btrfs_free_space,
3700 offset_index);
3701 if (offset + bytes < tmp->offset)
3702 break;
3703 if (tmp->offset + tmp->bytes < offset) {
3704 n = rb_next(&tmp->offset_index);
3705 continue;
3706 }
3707 info = tmp;
3708 goto have_info;
3709 }
3710
3711 ret = 0;
3712 goto out;
3713 }
3714
3715 if (info->offset == offset) {
3716 ret = 1;
3717 goto out;
3718 }
3719
3720 if (offset > info->offset && offset < info->offset + info->bytes)
3721 ret = 1;
3722out:
3723 spin_unlock(&ctl->tree_lock);
3724 return ret;
3725}
3726#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */