Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/sched/mm.h>
8#include <linux/slab.h>
9#include <linux/ratelimit.h>
10#include <linux/kthread.h>
11#include <linux/semaphore.h>
12#include <linux/uuid.h>
13#include <linux/list_sort.h>
14#include <linux/namei.h>
15#include "misc.h"
16#include "ctree.h"
17#include "extent_map.h"
18#include "disk-io.h"
19#include "transaction.h"
20#include "print-tree.h"
21#include "volumes.h"
22#include "raid56.h"
23#include "rcu-string.h"
24#include "dev-replace.h"
25#include "sysfs.h"
26#include "tree-checker.h"
27#include "space-info.h"
28#include "block-group.h"
29#include "discard.h"
30#include "zoned.h"
31#include "fs.h"
32#include "accessors.h"
33#include "uuid-tree.h"
34#include "ioctl.h"
35#include "relocation.h"
36#include "scrub.h"
37#include "super.h"
38#include "raid-stripe-tree.h"
39
40#define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
41 BTRFS_BLOCK_GROUP_RAID10 | \
42 BTRFS_BLOCK_GROUP_RAID56_MASK)
43
44struct btrfs_io_geometry {
45 u32 stripe_index;
46 u32 stripe_nr;
47 int mirror_num;
48 int num_stripes;
49 u64 stripe_offset;
50 u64 raid56_full_stripe_start;
51 int max_errors;
52 enum btrfs_map_op op;
53};
54
55const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
56 [BTRFS_RAID_RAID10] = {
57 .sub_stripes = 2,
58 .dev_stripes = 1,
59 .devs_max = 0, /* 0 == as many as possible */
60 .devs_min = 2,
61 .tolerated_failures = 1,
62 .devs_increment = 2,
63 .ncopies = 2,
64 .nparity = 0,
65 .raid_name = "raid10",
66 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
67 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
68 },
69 [BTRFS_RAID_RAID1] = {
70 .sub_stripes = 1,
71 .dev_stripes = 1,
72 .devs_max = 2,
73 .devs_min = 2,
74 .tolerated_failures = 1,
75 .devs_increment = 2,
76 .ncopies = 2,
77 .nparity = 0,
78 .raid_name = "raid1",
79 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
80 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
81 },
82 [BTRFS_RAID_RAID1C3] = {
83 .sub_stripes = 1,
84 .dev_stripes = 1,
85 .devs_max = 3,
86 .devs_min = 3,
87 .tolerated_failures = 2,
88 .devs_increment = 3,
89 .ncopies = 3,
90 .nparity = 0,
91 .raid_name = "raid1c3",
92 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
93 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
94 },
95 [BTRFS_RAID_RAID1C4] = {
96 .sub_stripes = 1,
97 .dev_stripes = 1,
98 .devs_max = 4,
99 .devs_min = 4,
100 .tolerated_failures = 3,
101 .devs_increment = 4,
102 .ncopies = 4,
103 .nparity = 0,
104 .raid_name = "raid1c4",
105 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
106 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
107 },
108 [BTRFS_RAID_DUP] = {
109 .sub_stripes = 1,
110 .dev_stripes = 2,
111 .devs_max = 1,
112 .devs_min = 1,
113 .tolerated_failures = 0,
114 .devs_increment = 1,
115 .ncopies = 2,
116 .nparity = 0,
117 .raid_name = "dup",
118 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
119 .mindev_error = 0,
120 },
121 [BTRFS_RAID_RAID0] = {
122 .sub_stripes = 1,
123 .dev_stripes = 1,
124 .devs_max = 0,
125 .devs_min = 1,
126 .tolerated_failures = 0,
127 .devs_increment = 1,
128 .ncopies = 1,
129 .nparity = 0,
130 .raid_name = "raid0",
131 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
132 .mindev_error = 0,
133 },
134 [BTRFS_RAID_SINGLE] = {
135 .sub_stripes = 1,
136 .dev_stripes = 1,
137 .devs_max = 1,
138 .devs_min = 1,
139 .tolerated_failures = 0,
140 .devs_increment = 1,
141 .ncopies = 1,
142 .nparity = 0,
143 .raid_name = "single",
144 .bg_flag = 0,
145 .mindev_error = 0,
146 },
147 [BTRFS_RAID_RAID5] = {
148 .sub_stripes = 1,
149 .dev_stripes = 1,
150 .devs_max = 0,
151 .devs_min = 2,
152 .tolerated_failures = 1,
153 .devs_increment = 1,
154 .ncopies = 1,
155 .nparity = 1,
156 .raid_name = "raid5",
157 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
158 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
159 },
160 [BTRFS_RAID_RAID6] = {
161 .sub_stripes = 1,
162 .dev_stripes = 1,
163 .devs_max = 0,
164 .devs_min = 3,
165 .tolerated_failures = 2,
166 .devs_increment = 1,
167 .ncopies = 1,
168 .nparity = 2,
169 .raid_name = "raid6",
170 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
171 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
172 },
173};
174
175/*
176 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
177 * can be used as index to access btrfs_raid_array[].
178 */
179enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
180{
181 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
182
183 if (!profile)
184 return BTRFS_RAID_SINGLE;
185
186 return BTRFS_BG_FLAG_TO_INDEX(profile);
187}
188
189const char *btrfs_bg_type_to_raid_name(u64 flags)
190{
191 const int index = btrfs_bg_flags_to_raid_index(flags);
192
193 if (index >= BTRFS_NR_RAID_TYPES)
194 return NULL;
195
196 return btrfs_raid_array[index].raid_name;
197}
198
199int btrfs_nr_parity_stripes(u64 type)
200{
201 enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
202
203 return btrfs_raid_array[index].nparity;
204}
205
206/*
207 * Fill @buf with textual description of @bg_flags, no more than @size_buf
208 * bytes including terminating null byte.
209 */
210void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
211{
212 int i;
213 int ret;
214 char *bp = buf;
215 u64 flags = bg_flags;
216 u32 size_bp = size_buf;
217
218 if (!flags) {
219 strcpy(bp, "NONE");
220 return;
221 }
222
223#define DESCRIBE_FLAG(flag, desc) \
224 do { \
225 if (flags & (flag)) { \
226 ret = snprintf(bp, size_bp, "%s|", (desc)); \
227 if (ret < 0 || ret >= size_bp) \
228 goto out_overflow; \
229 size_bp -= ret; \
230 bp += ret; \
231 flags &= ~(flag); \
232 } \
233 } while (0)
234
235 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
236 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
237 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
238
239 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
240 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
241 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
242 btrfs_raid_array[i].raid_name);
243#undef DESCRIBE_FLAG
244
245 if (flags) {
246 ret = snprintf(bp, size_bp, "0x%llx|", flags);
247 size_bp -= ret;
248 }
249
250 if (size_bp < size_buf)
251 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
252
253 /*
254 * The text is trimmed, it's up to the caller to provide sufficiently
255 * large buffer
256 */
257out_overflow:;
258}
259
260static int init_first_rw_device(struct btrfs_trans_handle *trans);
261static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
262static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
263
264/*
265 * Device locking
266 * ==============
267 *
268 * There are several mutexes that protect manipulation of devices and low-level
269 * structures like chunks but not block groups, extents or files
270 *
271 * uuid_mutex (global lock)
272 * ------------------------
273 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
274 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
275 * device) or requested by the device= mount option
276 *
277 * the mutex can be very coarse and can cover long-running operations
278 *
279 * protects: updates to fs_devices counters like missing devices, rw devices,
280 * seeding, structure cloning, opening/closing devices at mount/umount time
281 *
282 * global::fs_devs - add, remove, updates to the global list
283 *
284 * does not protect: manipulation of the fs_devices::devices list in general
285 * but in mount context it could be used to exclude list modifications by eg.
286 * scan ioctl
287 *
288 * btrfs_device::name - renames (write side), read is RCU
289 *
290 * fs_devices::device_list_mutex (per-fs, with RCU)
291 * ------------------------------------------------
292 * protects updates to fs_devices::devices, ie. adding and deleting
293 *
294 * simple list traversal with read-only actions can be done with RCU protection
295 *
296 * may be used to exclude some operations from running concurrently without any
297 * modifications to the list (see write_all_supers)
298 *
299 * Is not required at mount and close times, because our device list is
300 * protected by the uuid_mutex at that point.
301 *
302 * balance_mutex
303 * -------------
304 * protects balance structures (status, state) and context accessed from
305 * several places (internally, ioctl)
306 *
307 * chunk_mutex
308 * -----------
309 * protects chunks, adding or removing during allocation, trim or when a new
310 * device is added/removed. Additionally it also protects post_commit_list of
311 * individual devices, since they can be added to the transaction's
312 * post_commit_list only with chunk_mutex held.
313 *
314 * cleaner_mutex
315 * -------------
316 * a big lock that is held by the cleaner thread and prevents running subvolume
317 * cleaning together with relocation or delayed iputs
318 *
319 *
320 * Lock nesting
321 * ============
322 *
323 * uuid_mutex
324 * device_list_mutex
325 * chunk_mutex
326 * balance_mutex
327 *
328 *
329 * Exclusive operations
330 * ====================
331 *
332 * Maintains the exclusivity of the following operations that apply to the
333 * whole filesystem and cannot run in parallel.
334 *
335 * - Balance (*)
336 * - Device add
337 * - Device remove
338 * - Device replace (*)
339 * - Resize
340 *
341 * The device operations (as above) can be in one of the following states:
342 *
343 * - Running state
344 * - Paused state
345 * - Completed state
346 *
347 * Only device operations marked with (*) can go into the Paused state for the
348 * following reasons:
349 *
350 * - ioctl (only Balance can be Paused through ioctl)
351 * - filesystem remounted as read-only
352 * - filesystem unmounted and mounted as read-only
353 * - system power-cycle and filesystem mounted as read-only
354 * - filesystem or device errors leading to forced read-only
355 *
356 * The status of exclusive operation is set and cleared atomically.
357 * During the course of Paused state, fs_info::exclusive_operation remains set.
358 * A device operation in Paused or Running state can be canceled or resumed
359 * either by ioctl (Balance only) or when remounted as read-write.
360 * The exclusive status is cleared when the device operation is canceled or
361 * completed.
362 */
363
364DEFINE_MUTEX(uuid_mutex);
365static LIST_HEAD(fs_uuids);
366struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
367{
368 return &fs_uuids;
369}
370
371/*
372 * Allocate new btrfs_fs_devices structure identified by a fsid.
373 *
374 * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
375 * fs_devices::metadata_fsid
376 *
377 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
378 * The returned struct is not linked onto any lists and can be destroyed with
379 * kfree() right away.
380 */
381static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
382{
383 struct btrfs_fs_devices *fs_devs;
384
385 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
386 if (!fs_devs)
387 return ERR_PTR(-ENOMEM);
388
389 mutex_init(&fs_devs->device_list_mutex);
390
391 INIT_LIST_HEAD(&fs_devs->devices);
392 INIT_LIST_HEAD(&fs_devs->alloc_list);
393 INIT_LIST_HEAD(&fs_devs->fs_list);
394 INIT_LIST_HEAD(&fs_devs->seed_list);
395
396 if (fsid) {
397 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
398 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
399 }
400
401 return fs_devs;
402}
403
404static void btrfs_free_device(struct btrfs_device *device)
405{
406 WARN_ON(!list_empty(&device->post_commit_list));
407 rcu_string_free(device->name);
408 extent_io_tree_release(&device->alloc_state);
409 btrfs_destroy_dev_zone_info(device);
410 kfree(device);
411}
412
413static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
414{
415 struct btrfs_device *device;
416
417 WARN_ON(fs_devices->opened);
418 while (!list_empty(&fs_devices->devices)) {
419 device = list_entry(fs_devices->devices.next,
420 struct btrfs_device, dev_list);
421 list_del(&device->dev_list);
422 btrfs_free_device(device);
423 }
424 kfree(fs_devices);
425}
426
427void __exit btrfs_cleanup_fs_uuids(void)
428{
429 struct btrfs_fs_devices *fs_devices;
430
431 while (!list_empty(&fs_uuids)) {
432 fs_devices = list_entry(fs_uuids.next,
433 struct btrfs_fs_devices, fs_list);
434 list_del(&fs_devices->fs_list);
435 free_fs_devices(fs_devices);
436 }
437}
438
439static bool match_fsid_fs_devices(const struct btrfs_fs_devices *fs_devices,
440 const u8 *fsid, const u8 *metadata_fsid)
441{
442 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0)
443 return false;
444
445 if (!metadata_fsid)
446 return true;
447
448 if (memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0)
449 return false;
450
451 return true;
452}
453
454static noinline struct btrfs_fs_devices *find_fsid(
455 const u8 *fsid, const u8 *metadata_fsid)
456{
457 struct btrfs_fs_devices *fs_devices;
458
459 ASSERT(fsid);
460
461 /* Handle non-split brain cases */
462 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
463 if (match_fsid_fs_devices(fs_devices, fsid, metadata_fsid))
464 return fs_devices;
465 }
466 return NULL;
467}
468
469static int
470btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
471 int flush, struct bdev_handle **bdev_handle,
472 struct btrfs_super_block **disk_super)
473{
474 struct block_device *bdev;
475 int ret;
476
477 *bdev_handle = bdev_open_by_path(device_path, flags, holder, NULL);
478
479 if (IS_ERR(*bdev_handle)) {
480 ret = PTR_ERR(*bdev_handle);
481 goto error;
482 }
483 bdev = (*bdev_handle)->bdev;
484
485 if (flush)
486 sync_blockdev(bdev);
487 ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE);
488 if (ret) {
489 bdev_release(*bdev_handle);
490 goto error;
491 }
492 invalidate_bdev(bdev);
493 *disk_super = btrfs_read_dev_super(bdev);
494 if (IS_ERR(*disk_super)) {
495 ret = PTR_ERR(*disk_super);
496 bdev_release(*bdev_handle);
497 goto error;
498 }
499
500 return 0;
501
502error:
503 *bdev_handle = NULL;
504 return ret;
505}
506
507/*
508 * Search and remove all stale devices (which are not mounted). When both
509 * inputs are NULL, it will search and release all stale devices.
510 *
511 * @devt: Optional. When provided will it release all unmounted devices
512 * matching this devt only.
513 * @skip_device: Optional. Will skip this device when searching for the stale
514 * devices.
515 *
516 * Return: 0 for success or if @devt is 0.
517 * -EBUSY if @devt is a mounted device.
518 * -ENOENT if @devt does not match any device in the list.
519 */
520static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
521{
522 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
523 struct btrfs_device *device, *tmp_device;
524 int ret;
525 bool freed = false;
526
527 lockdep_assert_held(&uuid_mutex);
528
529 /* Return good status if there is no instance of devt. */
530 ret = 0;
531 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
532
533 mutex_lock(&fs_devices->device_list_mutex);
534 list_for_each_entry_safe(device, tmp_device,
535 &fs_devices->devices, dev_list) {
536 if (skip_device && skip_device == device)
537 continue;
538 if (devt && devt != device->devt)
539 continue;
540 if (fs_devices->opened) {
541 if (devt)
542 ret = -EBUSY;
543 break;
544 }
545
546 /* delete the stale device */
547 fs_devices->num_devices--;
548 list_del(&device->dev_list);
549 btrfs_free_device(device);
550
551 freed = true;
552 }
553 mutex_unlock(&fs_devices->device_list_mutex);
554
555 if (fs_devices->num_devices == 0) {
556 btrfs_sysfs_remove_fsid(fs_devices);
557 list_del(&fs_devices->fs_list);
558 free_fs_devices(fs_devices);
559 }
560 }
561
562 /* If there is at least one freed device return 0. */
563 if (freed)
564 return 0;
565
566 return ret;
567}
568
569static struct btrfs_fs_devices *find_fsid_by_device(
570 struct btrfs_super_block *disk_super,
571 dev_t devt, bool *same_fsid_diff_dev)
572{
573 struct btrfs_fs_devices *fsid_fs_devices;
574 struct btrfs_fs_devices *devt_fs_devices;
575 const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
576 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
577 bool found_by_devt = false;
578
579 /* Find the fs_device by the usual method, if found use it. */
580 fsid_fs_devices = find_fsid(disk_super->fsid,
581 has_metadata_uuid ? disk_super->metadata_uuid : NULL);
582
583 /* The temp_fsid feature is supported only with single device filesystem. */
584 if (btrfs_super_num_devices(disk_super) != 1)
585 return fsid_fs_devices;
586
587 /*
588 * A seed device is an integral component of the sprout device, which
589 * functions as a multi-device filesystem. So, temp-fsid feature is
590 * not supported.
591 */
592 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)
593 return fsid_fs_devices;
594
595 /* Try to find a fs_devices by matching devt. */
596 list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) {
597 struct btrfs_device *device;
598
599 list_for_each_entry(device, &devt_fs_devices->devices, dev_list) {
600 if (device->devt == devt) {
601 found_by_devt = true;
602 break;
603 }
604 }
605 if (found_by_devt)
606 break;
607 }
608
609 if (found_by_devt) {
610 /* Existing device. */
611 if (fsid_fs_devices == NULL) {
612 if (devt_fs_devices->opened == 0) {
613 /* Stale device. */
614 return NULL;
615 } else {
616 /* temp_fsid is mounting a subvol. */
617 return devt_fs_devices;
618 }
619 } else {
620 /* Regular or temp_fsid device mounting a subvol. */
621 return devt_fs_devices;
622 }
623 } else {
624 /* New device. */
625 if (fsid_fs_devices == NULL) {
626 return NULL;
627 } else {
628 /* sb::fsid is already used create a new temp_fsid. */
629 *same_fsid_diff_dev = true;
630 return NULL;
631 }
632 }
633
634 /* Not reached. */
635}
636
637/*
638 * This is only used on mount, and we are protected from competing things
639 * messing with our fs_devices by the uuid_mutex, thus we do not need the
640 * fs_devices->device_list_mutex here.
641 */
642static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
643 struct btrfs_device *device, blk_mode_t flags,
644 void *holder)
645{
646 struct bdev_handle *bdev_handle;
647 struct btrfs_super_block *disk_super;
648 u64 devid;
649 int ret;
650
651 if (device->bdev)
652 return -EINVAL;
653 if (!device->name)
654 return -EINVAL;
655
656 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
657 &bdev_handle, &disk_super);
658 if (ret)
659 return ret;
660
661 devid = btrfs_stack_device_id(&disk_super->dev_item);
662 if (devid != device->devid)
663 goto error_free_page;
664
665 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
666 goto error_free_page;
667
668 device->generation = btrfs_super_generation(disk_super);
669
670 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
671 if (btrfs_super_incompat_flags(disk_super) &
672 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
673 pr_err(
674 "BTRFS: Invalid seeding and uuid-changed device detected\n");
675 goto error_free_page;
676 }
677
678 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
679 fs_devices->seeding = true;
680 } else {
681 if (bdev_read_only(bdev_handle->bdev))
682 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
683 else
684 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
685 }
686
687 if (!bdev_nonrot(bdev_handle->bdev))
688 fs_devices->rotating = true;
689
690 if (bdev_max_discard_sectors(bdev_handle->bdev))
691 fs_devices->discardable = true;
692
693 device->bdev_handle = bdev_handle;
694 device->bdev = bdev_handle->bdev;
695 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
696
697 fs_devices->open_devices++;
698 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
699 device->devid != BTRFS_DEV_REPLACE_DEVID) {
700 fs_devices->rw_devices++;
701 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
702 }
703 btrfs_release_disk_super(disk_super);
704
705 return 0;
706
707error_free_page:
708 btrfs_release_disk_super(disk_super);
709 bdev_release(bdev_handle);
710
711 return -EINVAL;
712}
713
714u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
715{
716 bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
717 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
718
719 return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
720}
721
722/*
723 * Add new device to list of registered devices
724 *
725 * Returns:
726 * device pointer which was just added or updated when successful
727 * error pointer when failed
728 */
729static noinline struct btrfs_device *device_list_add(const char *path,
730 struct btrfs_super_block *disk_super,
731 bool *new_device_added)
732{
733 struct btrfs_device *device;
734 struct btrfs_fs_devices *fs_devices = NULL;
735 struct rcu_string *name;
736 u64 found_transid = btrfs_super_generation(disk_super);
737 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
738 dev_t path_devt;
739 int error;
740 bool same_fsid_diff_dev = false;
741 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
742 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
743
744 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
745 btrfs_err(NULL,
746"device %s has incomplete metadata_uuid change, please use btrfstune to complete",
747 path);
748 return ERR_PTR(-EAGAIN);
749 }
750
751 error = lookup_bdev(path, &path_devt);
752 if (error) {
753 btrfs_err(NULL, "failed to lookup block device for path %s: %d",
754 path, error);
755 return ERR_PTR(error);
756 }
757
758 fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
759
760 if (!fs_devices) {
761 fs_devices = alloc_fs_devices(disk_super->fsid);
762 if (IS_ERR(fs_devices))
763 return ERR_CAST(fs_devices);
764
765 if (has_metadata_uuid)
766 memcpy(fs_devices->metadata_uuid,
767 disk_super->metadata_uuid, BTRFS_FSID_SIZE);
768
769 if (same_fsid_diff_dev) {
770 generate_random_uuid(fs_devices->fsid);
771 fs_devices->temp_fsid = true;
772 pr_info("BTRFS: device %s using temp-fsid %pU\n",
773 path, fs_devices->fsid);
774 }
775
776 mutex_lock(&fs_devices->device_list_mutex);
777 list_add(&fs_devices->fs_list, &fs_uuids);
778
779 device = NULL;
780 } else {
781 struct btrfs_dev_lookup_args args = {
782 .devid = devid,
783 .uuid = disk_super->dev_item.uuid,
784 };
785
786 mutex_lock(&fs_devices->device_list_mutex);
787 device = btrfs_find_device(fs_devices, &args);
788
789 if (found_transid > fs_devices->latest_generation) {
790 memcpy(fs_devices->fsid, disk_super->fsid,
791 BTRFS_FSID_SIZE);
792 memcpy(fs_devices->metadata_uuid,
793 btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
794 }
795 }
796
797 if (!device) {
798 unsigned int nofs_flag;
799
800 if (fs_devices->opened) {
801 btrfs_err(NULL,
802"device %s belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
803 path, fs_devices->fsid, current->comm,
804 task_pid_nr(current));
805 mutex_unlock(&fs_devices->device_list_mutex);
806 return ERR_PTR(-EBUSY);
807 }
808
809 nofs_flag = memalloc_nofs_save();
810 device = btrfs_alloc_device(NULL, &devid,
811 disk_super->dev_item.uuid, path);
812 memalloc_nofs_restore(nofs_flag);
813 if (IS_ERR(device)) {
814 mutex_unlock(&fs_devices->device_list_mutex);
815 /* we can safely leave the fs_devices entry around */
816 return device;
817 }
818
819 device->devt = path_devt;
820
821 list_add_rcu(&device->dev_list, &fs_devices->devices);
822 fs_devices->num_devices++;
823
824 device->fs_devices = fs_devices;
825 *new_device_added = true;
826
827 if (disk_super->label[0])
828 pr_info(
829 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
830 disk_super->label, devid, found_transid, path,
831 current->comm, task_pid_nr(current));
832 else
833 pr_info(
834 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
835 disk_super->fsid, devid, found_transid, path,
836 current->comm, task_pid_nr(current));
837
838 } else if (!device->name || strcmp(device->name->str, path)) {
839 /*
840 * When FS is already mounted.
841 * 1. If you are here and if the device->name is NULL that
842 * means this device was missing at time of FS mount.
843 * 2. If you are here and if the device->name is different
844 * from 'path' that means either
845 * a. The same device disappeared and reappeared with
846 * different name. or
847 * b. The missing-disk-which-was-replaced, has
848 * reappeared now.
849 *
850 * We must allow 1 and 2a above. But 2b would be a spurious
851 * and unintentional.
852 *
853 * Further in case of 1 and 2a above, the disk at 'path'
854 * would have missed some transaction when it was away and
855 * in case of 2a the stale bdev has to be updated as well.
856 * 2b must not be allowed at all time.
857 */
858
859 /*
860 * For now, we do allow update to btrfs_fs_device through the
861 * btrfs dev scan cli after FS has been mounted. We're still
862 * tracking a problem where systems fail mount by subvolume id
863 * when we reject replacement on a mounted FS.
864 */
865 if (!fs_devices->opened && found_transid < device->generation) {
866 /*
867 * That is if the FS is _not_ mounted and if you
868 * are here, that means there is more than one
869 * disk with same uuid and devid.We keep the one
870 * with larger generation number or the last-in if
871 * generation are equal.
872 */
873 mutex_unlock(&fs_devices->device_list_mutex);
874 btrfs_err(NULL,
875"device %s already registered with a higher generation, found %llu expect %llu",
876 path, found_transid, device->generation);
877 return ERR_PTR(-EEXIST);
878 }
879
880 /*
881 * We are going to replace the device path for a given devid,
882 * make sure it's the same device if the device is mounted
883 *
884 * NOTE: the device->fs_info may not be reliable here so pass
885 * in a NULL to message helpers instead. This avoids a possible
886 * use-after-free when the fs_info and fs_info->sb are already
887 * torn down.
888 */
889 if (device->bdev) {
890 if (device->devt != path_devt) {
891 mutex_unlock(&fs_devices->device_list_mutex);
892 btrfs_warn_in_rcu(NULL,
893 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
894 path, devid, found_transid,
895 current->comm,
896 task_pid_nr(current));
897 return ERR_PTR(-EEXIST);
898 }
899 btrfs_info_in_rcu(NULL,
900 "devid %llu device path %s changed to %s scanned by %s (%d)",
901 devid, btrfs_dev_name(device),
902 path, current->comm,
903 task_pid_nr(current));
904 }
905
906 name = rcu_string_strdup(path, GFP_NOFS);
907 if (!name) {
908 mutex_unlock(&fs_devices->device_list_mutex);
909 return ERR_PTR(-ENOMEM);
910 }
911 rcu_string_free(device->name);
912 rcu_assign_pointer(device->name, name);
913 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
914 fs_devices->missing_devices--;
915 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
916 }
917 device->devt = path_devt;
918 }
919
920 /*
921 * Unmount does not free the btrfs_device struct but would zero
922 * generation along with most of the other members. So just update
923 * it back. We need it to pick the disk with largest generation
924 * (as above).
925 */
926 if (!fs_devices->opened) {
927 device->generation = found_transid;
928 fs_devices->latest_generation = max_t(u64, found_transid,
929 fs_devices->latest_generation);
930 }
931
932 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
933
934 mutex_unlock(&fs_devices->device_list_mutex);
935 return device;
936}
937
938static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
939{
940 struct btrfs_fs_devices *fs_devices;
941 struct btrfs_device *device;
942 struct btrfs_device *orig_dev;
943 int ret = 0;
944
945 lockdep_assert_held(&uuid_mutex);
946
947 fs_devices = alloc_fs_devices(orig->fsid);
948 if (IS_ERR(fs_devices))
949 return fs_devices;
950
951 fs_devices->total_devices = orig->total_devices;
952
953 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
954 const char *dev_path = NULL;
955
956 /*
957 * This is ok to do without RCU read locked because we hold the
958 * uuid mutex so nothing we touch in here is going to disappear.
959 */
960 if (orig_dev->name)
961 dev_path = orig_dev->name->str;
962
963 device = btrfs_alloc_device(NULL, &orig_dev->devid,
964 orig_dev->uuid, dev_path);
965 if (IS_ERR(device)) {
966 ret = PTR_ERR(device);
967 goto error;
968 }
969
970 if (orig_dev->zone_info) {
971 struct btrfs_zoned_device_info *zone_info;
972
973 zone_info = btrfs_clone_dev_zone_info(orig_dev);
974 if (!zone_info) {
975 btrfs_free_device(device);
976 ret = -ENOMEM;
977 goto error;
978 }
979 device->zone_info = zone_info;
980 }
981
982 list_add(&device->dev_list, &fs_devices->devices);
983 device->fs_devices = fs_devices;
984 fs_devices->num_devices++;
985 }
986 return fs_devices;
987error:
988 free_fs_devices(fs_devices);
989 return ERR_PTR(ret);
990}
991
992static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
993 struct btrfs_device **latest_dev)
994{
995 struct btrfs_device *device, *next;
996
997 /* This is the initialized path, it is safe to release the devices. */
998 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
999 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1000 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1001 &device->dev_state) &&
1002 !test_bit(BTRFS_DEV_STATE_MISSING,
1003 &device->dev_state) &&
1004 (!*latest_dev ||
1005 device->generation > (*latest_dev)->generation)) {
1006 *latest_dev = device;
1007 }
1008 continue;
1009 }
1010
1011 /*
1012 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1013 * in btrfs_init_dev_replace() so just continue.
1014 */
1015 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1016 continue;
1017
1018 if (device->bdev_handle) {
1019 bdev_release(device->bdev_handle);
1020 device->bdev = NULL;
1021 device->bdev_handle = NULL;
1022 fs_devices->open_devices--;
1023 }
1024 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1025 list_del_init(&device->dev_alloc_list);
1026 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1027 fs_devices->rw_devices--;
1028 }
1029 list_del_init(&device->dev_list);
1030 fs_devices->num_devices--;
1031 btrfs_free_device(device);
1032 }
1033
1034}
1035
1036/*
1037 * After we have read the system tree and know devids belonging to this
1038 * filesystem, remove the device which does not belong there.
1039 */
1040void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1041{
1042 struct btrfs_device *latest_dev = NULL;
1043 struct btrfs_fs_devices *seed_dev;
1044
1045 mutex_lock(&uuid_mutex);
1046 __btrfs_free_extra_devids(fs_devices, &latest_dev);
1047
1048 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1049 __btrfs_free_extra_devids(seed_dev, &latest_dev);
1050
1051 fs_devices->latest_dev = latest_dev;
1052
1053 mutex_unlock(&uuid_mutex);
1054}
1055
1056static void btrfs_close_bdev(struct btrfs_device *device)
1057{
1058 if (!device->bdev)
1059 return;
1060
1061 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1062 sync_blockdev(device->bdev);
1063 invalidate_bdev(device->bdev);
1064 }
1065
1066 bdev_release(device->bdev_handle);
1067}
1068
1069static void btrfs_close_one_device(struct btrfs_device *device)
1070{
1071 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1072
1073 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1074 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1075 list_del_init(&device->dev_alloc_list);
1076 fs_devices->rw_devices--;
1077 }
1078
1079 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1080 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1081
1082 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1083 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1084 fs_devices->missing_devices--;
1085 }
1086
1087 btrfs_close_bdev(device);
1088 if (device->bdev) {
1089 fs_devices->open_devices--;
1090 device->bdev = NULL;
1091 }
1092 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1093 btrfs_destroy_dev_zone_info(device);
1094
1095 device->fs_info = NULL;
1096 atomic_set(&device->dev_stats_ccnt, 0);
1097 extent_io_tree_release(&device->alloc_state);
1098
1099 /*
1100 * Reset the flush error record. We might have a transient flush error
1101 * in this mount, and if so we aborted the current transaction and set
1102 * the fs to an error state, guaranteeing no super blocks can be further
1103 * committed. However that error might be transient and if we unmount the
1104 * filesystem and mount it again, we should allow the mount to succeed
1105 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1106 * filesystem again we still get flush errors, then we will again abort
1107 * any transaction and set the error state, guaranteeing no commits of
1108 * unsafe super blocks.
1109 */
1110 device->last_flush_error = 0;
1111
1112 /* Verify the device is back in a pristine state */
1113 WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1114 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1115 WARN_ON(!list_empty(&device->dev_alloc_list));
1116 WARN_ON(!list_empty(&device->post_commit_list));
1117}
1118
1119static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1120{
1121 struct btrfs_device *device, *tmp;
1122
1123 lockdep_assert_held(&uuid_mutex);
1124
1125 if (--fs_devices->opened > 0)
1126 return;
1127
1128 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1129 btrfs_close_one_device(device);
1130
1131 WARN_ON(fs_devices->open_devices);
1132 WARN_ON(fs_devices->rw_devices);
1133 fs_devices->opened = 0;
1134 fs_devices->seeding = false;
1135 fs_devices->fs_info = NULL;
1136}
1137
1138void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1139{
1140 LIST_HEAD(list);
1141 struct btrfs_fs_devices *tmp;
1142
1143 mutex_lock(&uuid_mutex);
1144 close_fs_devices(fs_devices);
1145 if (!fs_devices->opened) {
1146 list_splice_init(&fs_devices->seed_list, &list);
1147
1148 /*
1149 * If the struct btrfs_fs_devices is not assembled with any
1150 * other device, it can be re-initialized during the next mount
1151 * without the needing device-scan step. Therefore, it can be
1152 * fully freed.
1153 */
1154 if (fs_devices->num_devices == 1) {
1155 list_del(&fs_devices->fs_list);
1156 free_fs_devices(fs_devices);
1157 }
1158 }
1159
1160
1161 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1162 close_fs_devices(fs_devices);
1163 list_del(&fs_devices->seed_list);
1164 free_fs_devices(fs_devices);
1165 }
1166 mutex_unlock(&uuid_mutex);
1167}
1168
1169static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1170 blk_mode_t flags, void *holder)
1171{
1172 struct btrfs_device *device;
1173 struct btrfs_device *latest_dev = NULL;
1174 struct btrfs_device *tmp_device;
1175
1176 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1177 dev_list) {
1178 int ret;
1179
1180 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1181 if (ret == 0 &&
1182 (!latest_dev || device->generation > latest_dev->generation)) {
1183 latest_dev = device;
1184 } else if (ret == -ENODATA) {
1185 fs_devices->num_devices--;
1186 list_del(&device->dev_list);
1187 btrfs_free_device(device);
1188 }
1189 }
1190 if (fs_devices->open_devices == 0)
1191 return -EINVAL;
1192
1193 fs_devices->opened = 1;
1194 fs_devices->latest_dev = latest_dev;
1195 fs_devices->total_rw_bytes = 0;
1196 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1197 fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1198
1199 return 0;
1200}
1201
1202static int devid_cmp(void *priv, const struct list_head *a,
1203 const struct list_head *b)
1204{
1205 const struct btrfs_device *dev1, *dev2;
1206
1207 dev1 = list_entry(a, struct btrfs_device, dev_list);
1208 dev2 = list_entry(b, struct btrfs_device, dev_list);
1209
1210 if (dev1->devid < dev2->devid)
1211 return -1;
1212 else if (dev1->devid > dev2->devid)
1213 return 1;
1214 return 0;
1215}
1216
1217int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1218 blk_mode_t flags, void *holder)
1219{
1220 int ret;
1221
1222 lockdep_assert_held(&uuid_mutex);
1223 /*
1224 * The device_list_mutex cannot be taken here in case opening the
1225 * underlying device takes further locks like open_mutex.
1226 *
1227 * We also don't need the lock here as this is called during mount and
1228 * exclusion is provided by uuid_mutex
1229 */
1230
1231 if (fs_devices->opened) {
1232 fs_devices->opened++;
1233 ret = 0;
1234 } else {
1235 list_sort(NULL, &fs_devices->devices, devid_cmp);
1236 ret = open_fs_devices(fs_devices, flags, holder);
1237 }
1238
1239 return ret;
1240}
1241
1242void btrfs_release_disk_super(struct btrfs_super_block *super)
1243{
1244 struct page *page = virt_to_page(super);
1245
1246 put_page(page);
1247}
1248
1249static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1250 u64 bytenr, u64 bytenr_orig)
1251{
1252 struct btrfs_super_block *disk_super;
1253 struct page *page;
1254 void *p;
1255 pgoff_t index;
1256
1257 /* make sure our super fits in the device */
1258 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1259 return ERR_PTR(-EINVAL);
1260
1261 /* make sure our super fits in the page */
1262 if (sizeof(*disk_super) > PAGE_SIZE)
1263 return ERR_PTR(-EINVAL);
1264
1265 /* make sure our super doesn't straddle pages on disk */
1266 index = bytenr >> PAGE_SHIFT;
1267 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1268 return ERR_PTR(-EINVAL);
1269
1270 /* pull in the page with our super */
1271 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1272
1273 if (IS_ERR(page))
1274 return ERR_CAST(page);
1275
1276 p = page_address(page);
1277
1278 /* align our pointer to the offset of the super block */
1279 disk_super = p + offset_in_page(bytenr);
1280
1281 if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1282 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1283 btrfs_release_disk_super(p);
1284 return ERR_PTR(-EINVAL);
1285 }
1286
1287 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1288 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1289
1290 return disk_super;
1291}
1292
1293int btrfs_forget_devices(dev_t devt)
1294{
1295 int ret;
1296
1297 mutex_lock(&uuid_mutex);
1298 ret = btrfs_free_stale_devices(devt, NULL);
1299 mutex_unlock(&uuid_mutex);
1300
1301 return ret;
1302}
1303
1304/*
1305 * Look for a btrfs signature on a device. This may be called out of the mount path
1306 * and we are not allowed to call set_blocksize during the scan. The superblock
1307 * is read via pagecache.
1308 *
1309 * With @mount_arg_dev it's a scan during mount time that will always register
1310 * the device or return an error. Multi-device and seeding devices are registered
1311 * in both cases.
1312 */
1313struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
1314 bool mount_arg_dev)
1315{
1316 struct btrfs_super_block *disk_super;
1317 bool new_device_added = false;
1318 struct btrfs_device *device = NULL;
1319 struct bdev_handle *bdev_handle;
1320 u64 bytenr, bytenr_orig;
1321 int ret;
1322
1323 lockdep_assert_held(&uuid_mutex);
1324
1325 /*
1326 * we would like to check all the supers, but that would make
1327 * a btrfs mount succeed after a mkfs from a different FS.
1328 * So, we need to add a special mount option to scan for
1329 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1330 */
1331
1332 /*
1333 * Avoid an exclusive open here, as the systemd-udev may initiate the
1334 * device scan which may race with the user's mount or mkfs command,
1335 * resulting in failure.
1336 * Since the device scan is solely for reading purposes, there is no
1337 * need for an exclusive open. Additionally, the devices are read again
1338 * during the mount process. It is ok to get some inconsistent
1339 * values temporarily, as the device paths of the fsid are the only
1340 * required information for assembling the volume.
1341 */
1342 bdev_handle = bdev_open_by_path(path, flags, NULL, NULL);
1343 if (IS_ERR(bdev_handle))
1344 return ERR_CAST(bdev_handle);
1345
1346 bytenr_orig = btrfs_sb_offset(0);
1347 ret = btrfs_sb_log_location_bdev(bdev_handle->bdev, 0, READ, &bytenr);
1348 if (ret) {
1349 device = ERR_PTR(ret);
1350 goto error_bdev_put;
1351 }
1352
1353 disk_super = btrfs_read_disk_super(bdev_handle->bdev, bytenr,
1354 bytenr_orig);
1355 if (IS_ERR(disk_super)) {
1356 device = ERR_CAST(disk_super);
1357 goto error_bdev_put;
1358 }
1359
1360 if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
1361 !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) {
1362 dev_t devt;
1363
1364 ret = lookup_bdev(path, &devt);
1365 if (ret)
1366 btrfs_warn(NULL, "lookup bdev failed for path %s: %d",
1367 path, ret);
1368 else
1369 btrfs_free_stale_devices(devt, NULL);
1370
1371 pr_debug("BTRFS: skip registering single non-seed device %s\n", path);
1372 device = NULL;
1373 goto free_disk_super;
1374 }
1375
1376 device = device_list_add(path, disk_super, &new_device_added);
1377 if (!IS_ERR(device) && new_device_added)
1378 btrfs_free_stale_devices(device->devt, device);
1379
1380free_disk_super:
1381 btrfs_release_disk_super(disk_super);
1382
1383error_bdev_put:
1384 bdev_release(bdev_handle);
1385
1386 return device;
1387}
1388
1389/*
1390 * Try to find a chunk that intersects [start, start + len] range and when one
1391 * such is found, record the end of it in *start
1392 */
1393static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1394 u64 len)
1395{
1396 u64 physical_start, physical_end;
1397
1398 lockdep_assert_held(&device->fs_info->chunk_mutex);
1399
1400 if (find_first_extent_bit(&device->alloc_state, *start,
1401 &physical_start, &physical_end,
1402 CHUNK_ALLOCATED, NULL)) {
1403
1404 if (in_range(physical_start, *start, len) ||
1405 in_range(*start, physical_start,
1406 physical_end - physical_start)) {
1407 *start = physical_end + 1;
1408 return true;
1409 }
1410 }
1411 return false;
1412}
1413
1414static u64 dev_extent_search_start(struct btrfs_device *device)
1415{
1416 switch (device->fs_devices->chunk_alloc_policy) {
1417 case BTRFS_CHUNK_ALLOC_REGULAR:
1418 return BTRFS_DEVICE_RANGE_RESERVED;
1419 case BTRFS_CHUNK_ALLOC_ZONED:
1420 /*
1421 * We don't care about the starting region like regular
1422 * allocator, because we anyway use/reserve the first two zones
1423 * for superblock logging.
1424 */
1425 return 0;
1426 default:
1427 BUG();
1428 }
1429}
1430
1431static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1432 u64 *hole_start, u64 *hole_size,
1433 u64 num_bytes)
1434{
1435 u64 zone_size = device->zone_info->zone_size;
1436 u64 pos;
1437 int ret;
1438 bool changed = false;
1439
1440 ASSERT(IS_ALIGNED(*hole_start, zone_size));
1441
1442 while (*hole_size > 0) {
1443 pos = btrfs_find_allocatable_zones(device, *hole_start,
1444 *hole_start + *hole_size,
1445 num_bytes);
1446 if (pos != *hole_start) {
1447 *hole_size = *hole_start + *hole_size - pos;
1448 *hole_start = pos;
1449 changed = true;
1450 if (*hole_size < num_bytes)
1451 break;
1452 }
1453
1454 ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1455
1456 /* Range is ensured to be empty */
1457 if (!ret)
1458 return changed;
1459
1460 /* Given hole range was invalid (outside of device) */
1461 if (ret == -ERANGE) {
1462 *hole_start += *hole_size;
1463 *hole_size = 0;
1464 return true;
1465 }
1466
1467 *hole_start += zone_size;
1468 *hole_size -= zone_size;
1469 changed = true;
1470 }
1471
1472 return changed;
1473}
1474
1475/*
1476 * Check if specified hole is suitable for allocation.
1477 *
1478 * @device: the device which we have the hole
1479 * @hole_start: starting position of the hole
1480 * @hole_size: the size of the hole
1481 * @num_bytes: the size of the free space that we need
1482 *
1483 * This function may modify @hole_start and @hole_size to reflect the suitable
1484 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1485 */
1486static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1487 u64 *hole_size, u64 num_bytes)
1488{
1489 bool changed = false;
1490 u64 hole_end = *hole_start + *hole_size;
1491
1492 for (;;) {
1493 /*
1494 * Check before we set max_hole_start, otherwise we could end up
1495 * sending back this offset anyway.
1496 */
1497 if (contains_pending_extent(device, hole_start, *hole_size)) {
1498 if (hole_end >= *hole_start)
1499 *hole_size = hole_end - *hole_start;
1500 else
1501 *hole_size = 0;
1502 changed = true;
1503 }
1504
1505 switch (device->fs_devices->chunk_alloc_policy) {
1506 case BTRFS_CHUNK_ALLOC_REGULAR:
1507 /* No extra check */
1508 break;
1509 case BTRFS_CHUNK_ALLOC_ZONED:
1510 if (dev_extent_hole_check_zoned(device, hole_start,
1511 hole_size, num_bytes)) {
1512 changed = true;
1513 /*
1514 * The changed hole can contain pending extent.
1515 * Loop again to check that.
1516 */
1517 continue;
1518 }
1519 break;
1520 default:
1521 BUG();
1522 }
1523
1524 break;
1525 }
1526
1527 return changed;
1528}
1529
1530/*
1531 * Find free space in the specified device.
1532 *
1533 * @device: the device which we search the free space in
1534 * @num_bytes: the size of the free space that we need
1535 * @search_start: the position from which to begin the search
1536 * @start: store the start of the free space.
1537 * @len: the size of the free space. that we find, or the size
1538 * of the max free space if we don't find suitable free space
1539 *
1540 * This does a pretty simple search, the expectation is that it is called very
1541 * infrequently and that a given device has a small number of extents.
1542 *
1543 * @start is used to store the start of the free space if we find. But if we
1544 * don't find suitable free space, it will be used to store the start position
1545 * of the max free space.
1546 *
1547 * @len is used to store the size of the free space that we find.
1548 * But if we don't find suitable free space, it is used to store the size of
1549 * the max free space.
1550 *
1551 * NOTE: This function will search *commit* root of device tree, and does extra
1552 * check to ensure dev extents are not double allocated.
1553 * This makes the function safe to allocate dev extents but may not report
1554 * correct usable device space, as device extent freed in current transaction
1555 * is not reported as available.
1556 */
1557static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1558 u64 *start, u64 *len)
1559{
1560 struct btrfs_fs_info *fs_info = device->fs_info;
1561 struct btrfs_root *root = fs_info->dev_root;
1562 struct btrfs_key key;
1563 struct btrfs_dev_extent *dev_extent;
1564 struct btrfs_path *path;
1565 u64 search_start;
1566 u64 hole_size;
1567 u64 max_hole_start;
1568 u64 max_hole_size = 0;
1569 u64 extent_end;
1570 u64 search_end = device->total_bytes;
1571 int ret;
1572 int slot;
1573 struct extent_buffer *l;
1574
1575 search_start = dev_extent_search_start(device);
1576 max_hole_start = search_start;
1577
1578 WARN_ON(device->zone_info &&
1579 !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1580
1581 path = btrfs_alloc_path();
1582 if (!path) {
1583 ret = -ENOMEM;
1584 goto out;
1585 }
1586again:
1587 if (search_start >= search_end ||
1588 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1589 ret = -ENOSPC;
1590 goto out;
1591 }
1592
1593 path->reada = READA_FORWARD;
1594 path->search_commit_root = 1;
1595 path->skip_locking = 1;
1596
1597 key.objectid = device->devid;
1598 key.offset = search_start;
1599 key.type = BTRFS_DEV_EXTENT_KEY;
1600
1601 ret = btrfs_search_backwards(root, &key, path);
1602 if (ret < 0)
1603 goto out;
1604
1605 while (search_start < search_end) {
1606 l = path->nodes[0];
1607 slot = path->slots[0];
1608 if (slot >= btrfs_header_nritems(l)) {
1609 ret = btrfs_next_leaf(root, path);
1610 if (ret == 0)
1611 continue;
1612 if (ret < 0)
1613 goto out;
1614
1615 break;
1616 }
1617 btrfs_item_key_to_cpu(l, &key, slot);
1618
1619 if (key.objectid < device->devid)
1620 goto next;
1621
1622 if (key.objectid > device->devid)
1623 break;
1624
1625 if (key.type != BTRFS_DEV_EXTENT_KEY)
1626 goto next;
1627
1628 if (key.offset > search_end)
1629 break;
1630
1631 if (key.offset > search_start) {
1632 hole_size = key.offset - search_start;
1633 dev_extent_hole_check(device, &search_start, &hole_size,
1634 num_bytes);
1635
1636 if (hole_size > max_hole_size) {
1637 max_hole_start = search_start;
1638 max_hole_size = hole_size;
1639 }
1640
1641 /*
1642 * If this free space is greater than which we need,
1643 * it must be the max free space that we have found
1644 * until now, so max_hole_start must point to the start
1645 * of this free space and the length of this free space
1646 * is stored in max_hole_size. Thus, we return
1647 * max_hole_start and max_hole_size and go back to the
1648 * caller.
1649 */
1650 if (hole_size >= num_bytes) {
1651 ret = 0;
1652 goto out;
1653 }
1654 }
1655
1656 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1657 extent_end = key.offset + btrfs_dev_extent_length(l,
1658 dev_extent);
1659 if (extent_end > search_start)
1660 search_start = extent_end;
1661next:
1662 path->slots[0]++;
1663 cond_resched();
1664 }
1665
1666 /*
1667 * At this point, search_start should be the end of
1668 * allocated dev extents, and when shrinking the device,
1669 * search_end may be smaller than search_start.
1670 */
1671 if (search_end > search_start) {
1672 hole_size = search_end - search_start;
1673 if (dev_extent_hole_check(device, &search_start, &hole_size,
1674 num_bytes)) {
1675 btrfs_release_path(path);
1676 goto again;
1677 }
1678
1679 if (hole_size > max_hole_size) {
1680 max_hole_start = search_start;
1681 max_hole_size = hole_size;
1682 }
1683 }
1684
1685 /* See above. */
1686 if (max_hole_size < num_bytes)
1687 ret = -ENOSPC;
1688 else
1689 ret = 0;
1690
1691 ASSERT(max_hole_start + max_hole_size <= search_end);
1692out:
1693 btrfs_free_path(path);
1694 *start = max_hole_start;
1695 if (len)
1696 *len = max_hole_size;
1697 return ret;
1698}
1699
1700static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1701 struct btrfs_device *device,
1702 u64 start, u64 *dev_extent_len)
1703{
1704 struct btrfs_fs_info *fs_info = device->fs_info;
1705 struct btrfs_root *root = fs_info->dev_root;
1706 int ret;
1707 struct btrfs_path *path;
1708 struct btrfs_key key;
1709 struct btrfs_key found_key;
1710 struct extent_buffer *leaf = NULL;
1711 struct btrfs_dev_extent *extent = NULL;
1712
1713 path = btrfs_alloc_path();
1714 if (!path)
1715 return -ENOMEM;
1716
1717 key.objectid = device->devid;
1718 key.offset = start;
1719 key.type = BTRFS_DEV_EXTENT_KEY;
1720again:
1721 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1722 if (ret > 0) {
1723 ret = btrfs_previous_item(root, path, key.objectid,
1724 BTRFS_DEV_EXTENT_KEY);
1725 if (ret)
1726 goto out;
1727 leaf = path->nodes[0];
1728 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1729 extent = btrfs_item_ptr(leaf, path->slots[0],
1730 struct btrfs_dev_extent);
1731 BUG_ON(found_key.offset > start || found_key.offset +
1732 btrfs_dev_extent_length(leaf, extent) < start);
1733 key = found_key;
1734 btrfs_release_path(path);
1735 goto again;
1736 } else if (ret == 0) {
1737 leaf = path->nodes[0];
1738 extent = btrfs_item_ptr(leaf, path->slots[0],
1739 struct btrfs_dev_extent);
1740 } else {
1741 goto out;
1742 }
1743
1744 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1745
1746 ret = btrfs_del_item(trans, root, path);
1747 if (ret == 0)
1748 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1749out:
1750 btrfs_free_path(path);
1751 return ret;
1752}
1753
1754static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1755{
1756 struct rb_node *n;
1757 u64 ret = 0;
1758
1759 read_lock(&fs_info->mapping_tree_lock);
1760 n = rb_last(&fs_info->mapping_tree.rb_root);
1761 if (n) {
1762 struct btrfs_chunk_map *map;
1763
1764 map = rb_entry(n, struct btrfs_chunk_map, rb_node);
1765 ret = map->start + map->chunk_len;
1766 }
1767 read_unlock(&fs_info->mapping_tree_lock);
1768
1769 return ret;
1770}
1771
1772static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1773 u64 *devid_ret)
1774{
1775 int ret;
1776 struct btrfs_key key;
1777 struct btrfs_key found_key;
1778 struct btrfs_path *path;
1779
1780 path = btrfs_alloc_path();
1781 if (!path)
1782 return -ENOMEM;
1783
1784 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1785 key.type = BTRFS_DEV_ITEM_KEY;
1786 key.offset = (u64)-1;
1787
1788 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1789 if (ret < 0)
1790 goto error;
1791
1792 if (ret == 0) {
1793 /* Corruption */
1794 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1795 ret = -EUCLEAN;
1796 goto error;
1797 }
1798
1799 ret = btrfs_previous_item(fs_info->chunk_root, path,
1800 BTRFS_DEV_ITEMS_OBJECTID,
1801 BTRFS_DEV_ITEM_KEY);
1802 if (ret) {
1803 *devid_ret = 1;
1804 } else {
1805 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1806 path->slots[0]);
1807 *devid_ret = found_key.offset + 1;
1808 }
1809 ret = 0;
1810error:
1811 btrfs_free_path(path);
1812 return ret;
1813}
1814
1815/*
1816 * the device information is stored in the chunk root
1817 * the btrfs_device struct should be fully filled in
1818 */
1819static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1820 struct btrfs_device *device)
1821{
1822 int ret;
1823 struct btrfs_path *path;
1824 struct btrfs_dev_item *dev_item;
1825 struct extent_buffer *leaf;
1826 struct btrfs_key key;
1827 unsigned long ptr;
1828
1829 path = btrfs_alloc_path();
1830 if (!path)
1831 return -ENOMEM;
1832
1833 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1834 key.type = BTRFS_DEV_ITEM_KEY;
1835 key.offset = device->devid;
1836
1837 btrfs_reserve_chunk_metadata(trans, true);
1838 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1839 &key, sizeof(*dev_item));
1840 btrfs_trans_release_chunk_metadata(trans);
1841 if (ret)
1842 goto out;
1843
1844 leaf = path->nodes[0];
1845 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1846
1847 btrfs_set_device_id(leaf, dev_item, device->devid);
1848 btrfs_set_device_generation(leaf, dev_item, 0);
1849 btrfs_set_device_type(leaf, dev_item, device->type);
1850 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1851 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1852 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1853 btrfs_set_device_total_bytes(leaf, dev_item,
1854 btrfs_device_get_disk_total_bytes(device));
1855 btrfs_set_device_bytes_used(leaf, dev_item,
1856 btrfs_device_get_bytes_used(device));
1857 btrfs_set_device_group(leaf, dev_item, 0);
1858 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1859 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1860 btrfs_set_device_start_offset(leaf, dev_item, 0);
1861
1862 ptr = btrfs_device_uuid(dev_item);
1863 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1864 ptr = btrfs_device_fsid(dev_item);
1865 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1866 ptr, BTRFS_FSID_SIZE);
1867 btrfs_mark_buffer_dirty(trans, leaf);
1868
1869 ret = 0;
1870out:
1871 btrfs_free_path(path);
1872 return ret;
1873}
1874
1875/*
1876 * Function to update ctime/mtime for a given device path.
1877 * Mainly used for ctime/mtime based probe like libblkid.
1878 *
1879 * We don't care about errors here, this is just to be kind to userspace.
1880 */
1881static void update_dev_time(const char *device_path)
1882{
1883 struct path path;
1884 int ret;
1885
1886 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1887 if (ret)
1888 return;
1889
1890 inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION);
1891 path_put(&path);
1892}
1893
1894static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1895 struct btrfs_device *device)
1896{
1897 struct btrfs_root *root = device->fs_info->chunk_root;
1898 int ret;
1899 struct btrfs_path *path;
1900 struct btrfs_key key;
1901
1902 path = btrfs_alloc_path();
1903 if (!path)
1904 return -ENOMEM;
1905
1906 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1907 key.type = BTRFS_DEV_ITEM_KEY;
1908 key.offset = device->devid;
1909
1910 btrfs_reserve_chunk_metadata(trans, false);
1911 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1912 btrfs_trans_release_chunk_metadata(trans);
1913 if (ret) {
1914 if (ret > 0)
1915 ret = -ENOENT;
1916 goto out;
1917 }
1918
1919 ret = btrfs_del_item(trans, root, path);
1920out:
1921 btrfs_free_path(path);
1922 return ret;
1923}
1924
1925/*
1926 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1927 * filesystem. It's up to the caller to adjust that number regarding eg. device
1928 * replace.
1929 */
1930static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1931 u64 num_devices)
1932{
1933 u64 all_avail;
1934 unsigned seq;
1935 int i;
1936
1937 do {
1938 seq = read_seqbegin(&fs_info->profiles_lock);
1939
1940 all_avail = fs_info->avail_data_alloc_bits |
1941 fs_info->avail_system_alloc_bits |
1942 fs_info->avail_metadata_alloc_bits;
1943 } while (read_seqretry(&fs_info->profiles_lock, seq));
1944
1945 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1946 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1947 continue;
1948
1949 if (num_devices < btrfs_raid_array[i].devs_min)
1950 return btrfs_raid_array[i].mindev_error;
1951 }
1952
1953 return 0;
1954}
1955
1956static struct btrfs_device * btrfs_find_next_active_device(
1957 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1958{
1959 struct btrfs_device *next_device;
1960
1961 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1962 if (next_device != device &&
1963 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1964 && next_device->bdev)
1965 return next_device;
1966 }
1967
1968 return NULL;
1969}
1970
1971/*
1972 * Helper function to check if the given device is part of s_bdev / latest_dev
1973 * and replace it with the provided or the next active device, in the context
1974 * where this function called, there should be always be another device (or
1975 * this_dev) which is active.
1976 */
1977void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1978 struct btrfs_device *next_device)
1979{
1980 struct btrfs_fs_info *fs_info = device->fs_info;
1981
1982 if (!next_device)
1983 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1984 device);
1985 ASSERT(next_device);
1986
1987 if (fs_info->sb->s_bdev &&
1988 (fs_info->sb->s_bdev == device->bdev))
1989 fs_info->sb->s_bdev = next_device->bdev;
1990
1991 if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
1992 fs_info->fs_devices->latest_dev = next_device;
1993}
1994
1995/*
1996 * Return btrfs_fs_devices::num_devices excluding the device that's being
1997 * currently replaced.
1998 */
1999static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2000{
2001 u64 num_devices = fs_info->fs_devices->num_devices;
2002
2003 down_read(&fs_info->dev_replace.rwsem);
2004 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2005 ASSERT(num_devices > 1);
2006 num_devices--;
2007 }
2008 up_read(&fs_info->dev_replace.rwsem);
2009
2010 return num_devices;
2011}
2012
2013static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
2014 struct block_device *bdev, int copy_num)
2015{
2016 struct btrfs_super_block *disk_super;
2017 const size_t len = sizeof(disk_super->magic);
2018 const u64 bytenr = btrfs_sb_offset(copy_num);
2019 int ret;
2020
2021 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
2022 if (IS_ERR(disk_super))
2023 return;
2024
2025 memset(&disk_super->magic, 0, len);
2026 folio_mark_dirty(virt_to_folio(disk_super));
2027 btrfs_release_disk_super(disk_super);
2028
2029 ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
2030 if (ret)
2031 btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
2032 copy_num, ret);
2033}
2034
2035void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2036 struct block_device *bdev,
2037 const char *device_path)
2038{
2039 int copy_num;
2040
2041 if (!bdev)
2042 return;
2043
2044 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2045 if (bdev_is_zoned(bdev))
2046 btrfs_reset_sb_log_zones(bdev, copy_num);
2047 else
2048 btrfs_scratch_superblock(fs_info, bdev, copy_num);
2049 }
2050
2051 /* Notify udev that device has changed */
2052 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2053
2054 /* Update ctime/mtime for device path for libblkid */
2055 update_dev_time(device_path);
2056}
2057
2058int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2059 struct btrfs_dev_lookup_args *args,
2060 struct bdev_handle **bdev_handle)
2061{
2062 struct btrfs_trans_handle *trans;
2063 struct btrfs_device *device;
2064 struct btrfs_fs_devices *cur_devices;
2065 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2066 u64 num_devices;
2067 int ret = 0;
2068
2069 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2070 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2071 return -EINVAL;
2072 }
2073
2074 /*
2075 * The device list in fs_devices is accessed without locks (neither
2076 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2077 * filesystem and another device rm cannot run.
2078 */
2079 num_devices = btrfs_num_devices(fs_info);
2080
2081 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2082 if (ret)
2083 return ret;
2084
2085 device = btrfs_find_device(fs_info->fs_devices, args);
2086 if (!device) {
2087 if (args->missing)
2088 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2089 else
2090 ret = -ENOENT;
2091 return ret;
2092 }
2093
2094 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2095 btrfs_warn_in_rcu(fs_info,
2096 "cannot remove device %s (devid %llu) due to active swapfile",
2097 btrfs_dev_name(device), device->devid);
2098 return -ETXTBSY;
2099 }
2100
2101 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2102 return BTRFS_ERROR_DEV_TGT_REPLACE;
2103
2104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2105 fs_info->fs_devices->rw_devices == 1)
2106 return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2107
2108 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2109 mutex_lock(&fs_info->chunk_mutex);
2110 list_del_init(&device->dev_alloc_list);
2111 device->fs_devices->rw_devices--;
2112 mutex_unlock(&fs_info->chunk_mutex);
2113 }
2114
2115 ret = btrfs_shrink_device(device, 0);
2116 if (ret)
2117 goto error_undo;
2118
2119 trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2120 if (IS_ERR(trans)) {
2121 ret = PTR_ERR(trans);
2122 goto error_undo;
2123 }
2124
2125 ret = btrfs_rm_dev_item(trans, device);
2126 if (ret) {
2127 /* Any error in dev item removal is critical */
2128 btrfs_crit(fs_info,
2129 "failed to remove device item for devid %llu: %d",
2130 device->devid, ret);
2131 btrfs_abort_transaction(trans, ret);
2132 btrfs_end_transaction(trans);
2133 return ret;
2134 }
2135
2136 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2137 btrfs_scrub_cancel_dev(device);
2138
2139 /*
2140 * the device list mutex makes sure that we don't change
2141 * the device list while someone else is writing out all
2142 * the device supers. Whoever is writing all supers, should
2143 * lock the device list mutex before getting the number of
2144 * devices in the super block (super_copy). Conversely,
2145 * whoever updates the number of devices in the super block
2146 * (super_copy) should hold the device list mutex.
2147 */
2148
2149 /*
2150 * In normal cases the cur_devices == fs_devices. But in case
2151 * of deleting a seed device, the cur_devices should point to
2152 * its own fs_devices listed under the fs_devices->seed_list.
2153 */
2154 cur_devices = device->fs_devices;
2155 mutex_lock(&fs_devices->device_list_mutex);
2156 list_del_rcu(&device->dev_list);
2157
2158 cur_devices->num_devices--;
2159 cur_devices->total_devices--;
2160 /* Update total_devices of the parent fs_devices if it's seed */
2161 if (cur_devices != fs_devices)
2162 fs_devices->total_devices--;
2163
2164 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2165 cur_devices->missing_devices--;
2166
2167 btrfs_assign_next_active_device(device, NULL);
2168
2169 if (device->bdev_handle) {
2170 cur_devices->open_devices--;
2171 /* remove sysfs entry */
2172 btrfs_sysfs_remove_device(device);
2173 }
2174
2175 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2176 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2177 mutex_unlock(&fs_devices->device_list_mutex);
2178
2179 /*
2180 * At this point, the device is zero sized and detached from the
2181 * devices list. All that's left is to zero out the old supers and
2182 * free the device.
2183 *
2184 * We cannot call btrfs_close_bdev() here because we're holding the sb
2185 * write lock, and bdev_release() will pull in the ->open_mutex on
2186 * the block device and it's dependencies. Instead just flush the
2187 * device and let the caller do the final bdev_release.
2188 */
2189 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2190 btrfs_scratch_superblocks(fs_info, device->bdev,
2191 device->name->str);
2192 if (device->bdev) {
2193 sync_blockdev(device->bdev);
2194 invalidate_bdev(device->bdev);
2195 }
2196 }
2197
2198 *bdev_handle = device->bdev_handle;
2199 synchronize_rcu();
2200 btrfs_free_device(device);
2201
2202 /*
2203 * This can happen if cur_devices is the private seed devices list. We
2204 * cannot call close_fs_devices() here because it expects the uuid_mutex
2205 * to be held, but in fact we don't need that for the private
2206 * seed_devices, we can simply decrement cur_devices->opened and then
2207 * remove it from our list and free the fs_devices.
2208 */
2209 if (cur_devices->num_devices == 0) {
2210 list_del_init(&cur_devices->seed_list);
2211 ASSERT(cur_devices->opened == 1);
2212 cur_devices->opened--;
2213 free_fs_devices(cur_devices);
2214 }
2215
2216 ret = btrfs_commit_transaction(trans);
2217
2218 return ret;
2219
2220error_undo:
2221 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2222 mutex_lock(&fs_info->chunk_mutex);
2223 list_add(&device->dev_alloc_list,
2224 &fs_devices->alloc_list);
2225 device->fs_devices->rw_devices++;
2226 mutex_unlock(&fs_info->chunk_mutex);
2227 }
2228 return ret;
2229}
2230
2231void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2232{
2233 struct btrfs_fs_devices *fs_devices;
2234
2235 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2236
2237 /*
2238 * in case of fs with no seed, srcdev->fs_devices will point
2239 * to fs_devices of fs_info. However when the dev being replaced is
2240 * a seed dev it will point to the seed's local fs_devices. In short
2241 * srcdev will have its correct fs_devices in both the cases.
2242 */
2243 fs_devices = srcdev->fs_devices;
2244
2245 list_del_rcu(&srcdev->dev_list);
2246 list_del(&srcdev->dev_alloc_list);
2247 fs_devices->num_devices--;
2248 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2249 fs_devices->missing_devices--;
2250
2251 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2252 fs_devices->rw_devices--;
2253
2254 if (srcdev->bdev)
2255 fs_devices->open_devices--;
2256}
2257
2258void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2259{
2260 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2261
2262 mutex_lock(&uuid_mutex);
2263
2264 btrfs_close_bdev(srcdev);
2265 synchronize_rcu();
2266 btrfs_free_device(srcdev);
2267
2268 /* if this is no devs we rather delete the fs_devices */
2269 if (!fs_devices->num_devices) {
2270 /*
2271 * On a mounted FS, num_devices can't be zero unless it's a
2272 * seed. In case of a seed device being replaced, the replace
2273 * target added to the sprout FS, so there will be no more
2274 * device left under the seed FS.
2275 */
2276 ASSERT(fs_devices->seeding);
2277
2278 list_del_init(&fs_devices->seed_list);
2279 close_fs_devices(fs_devices);
2280 free_fs_devices(fs_devices);
2281 }
2282 mutex_unlock(&uuid_mutex);
2283}
2284
2285void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2286{
2287 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2288
2289 mutex_lock(&fs_devices->device_list_mutex);
2290
2291 btrfs_sysfs_remove_device(tgtdev);
2292
2293 if (tgtdev->bdev)
2294 fs_devices->open_devices--;
2295
2296 fs_devices->num_devices--;
2297
2298 btrfs_assign_next_active_device(tgtdev, NULL);
2299
2300 list_del_rcu(&tgtdev->dev_list);
2301
2302 mutex_unlock(&fs_devices->device_list_mutex);
2303
2304 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2305 tgtdev->name->str);
2306
2307 btrfs_close_bdev(tgtdev);
2308 synchronize_rcu();
2309 btrfs_free_device(tgtdev);
2310}
2311
2312/*
2313 * Populate args from device at path.
2314 *
2315 * @fs_info: the filesystem
2316 * @args: the args to populate
2317 * @path: the path to the device
2318 *
2319 * This will read the super block of the device at @path and populate @args with
2320 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to
2321 * lookup a device to operate on, but need to do it before we take any locks.
2322 * This properly handles the special case of "missing" that a user may pass in,
2323 * and does some basic sanity checks. The caller must make sure that @path is
2324 * properly NUL terminated before calling in, and must call
2325 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2326 * uuid buffers.
2327 *
2328 * Return: 0 for success, -errno for failure
2329 */
2330int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2331 struct btrfs_dev_lookup_args *args,
2332 const char *path)
2333{
2334 struct btrfs_super_block *disk_super;
2335 struct bdev_handle *bdev_handle;
2336 int ret;
2337
2338 if (!path || !path[0])
2339 return -EINVAL;
2340 if (!strcmp(path, "missing")) {
2341 args->missing = true;
2342 return 0;
2343 }
2344
2345 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2346 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2347 if (!args->uuid || !args->fsid) {
2348 btrfs_put_dev_args_from_path(args);
2349 return -ENOMEM;
2350 }
2351
2352 ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
2353 &bdev_handle, &disk_super);
2354 if (ret) {
2355 btrfs_put_dev_args_from_path(args);
2356 return ret;
2357 }
2358
2359 args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2360 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2361 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2362 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2363 else
2364 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2365 btrfs_release_disk_super(disk_super);
2366 bdev_release(bdev_handle);
2367 return 0;
2368}
2369
2370/*
2371 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2372 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2373 * that don't need to be freed.
2374 */
2375void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2376{
2377 kfree(args->uuid);
2378 kfree(args->fsid);
2379 args->uuid = NULL;
2380 args->fsid = NULL;
2381}
2382
2383struct btrfs_device *btrfs_find_device_by_devspec(
2384 struct btrfs_fs_info *fs_info, u64 devid,
2385 const char *device_path)
2386{
2387 BTRFS_DEV_LOOKUP_ARGS(args);
2388 struct btrfs_device *device;
2389 int ret;
2390
2391 if (devid) {
2392 args.devid = devid;
2393 device = btrfs_find_device(fs_info->fs_devices, &args);
2394 if (!device)
2395 return ERR_PTR(-ENOENT);
2396 return device;
2397 }
2398
2399 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2400 if (ret)
2401 return ERR_PTR(ret);
2402 device = btrfs_find_device(fs_info->fs_devices, &args);
2403 btrfs_put_dev_args_from_path(&args);
2404 if (!device)
2405 return ERR_PTR(-ENOENT);
2406 return device;
2407}
2408
2409static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
2410{
2411 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2412 struct btrfs_fs_devices *old_devices;
2413 struct btrfs_fs_devices *seed_devices;
2414
2415 lockdep_assert_held(&uuid_mutex);
2416 if (!fs_devices->seeding)
2417 return ERR_PTR(-EINVAL);
2418
2419 /*
2420 * Private copy of the seed devices, anchored at
2421 * fs_info->fs_devices->seed_list
2422 */
2423 seed_devices = alloc_fs_devices(NULL);
2424 if (IS_ERR(seed_devices))
2425 return seed_devices;
2426
2427 /*
2428 * It's necessary to retain a copy of the original seed fs_devices in
2429 * fs_uuids so that filesystems which have been seeded can successfully
2430 * reference the seed device from open_seed_devices. This also supports
2431 * multiple fs seed.
2432 */
2433 old_devices = clone_fs_devices(fs_devices);
2434 if (IS_ERR(old_devices)) {
2435 kfree(seed_devices);
2436 return old_devices;
2437 }
2438
2439 list_add(&old_devices->fs_list, &fs_uuids);
2440
2441 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2442 seed_devices->opened = 1;
2443 INIT_LIST_HEAD(&seed_devices->devices);
2444 INIT_LIST_HEAD(&seed_devices->alloc_list);
2445 mutex_init(&seed_devices->device_list_mutex);
2446
2447 return seed_devices;
2448}
2449
2450/*
2451 * Splice seed devices into the sprout fs_devices.
2452 * Generate a new fsid for the sprouted read-write filesystem.
2453 */
2454static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2455 struct btrfs_fs_devices *seed_devices)
2456{
2457 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2458 struct btrfs_super_block *disk_super = fs_info->super_copy;
2459 struct btrfs_device *device;
2460 u64 super_flags;
2461
2462 /*
2463 * We are updating the fsid, the thread leading to device_list_add()
2464 * could race, so uuid_mutex is needed.
2465 */
2466 lockdep_assert_held(&uuid_mutex);
2467
2468 /*
2469 * The threads listed below may traverse dev_list but can do that without
2470 * device_list_mutex:
2471 * - All device ops and balance - as we are in btrfs_exclop_start.
2472 * - Various dev_list readers - are using RCU.
2473 * - btrfs_ioctl_fitrim() - is using RCU.
2474 *
2475 * For-read threads as below are using device_list_mutex:
2476 * - Readonly scrub btrfs_scrub_dev()
2477 * - Readonly scrub btrfs_scrub_progress()
2478 * - btrfs_get_dev_stats()
2479 */
2480 lockdep_assert_held(&fs_devices->device_list_mutex);
2481
2482 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2483 synchronize_rcu);
2484 list_for_each_entry(device, &seed_devices->devices, dev_list)
2485 device->fs_devices = seed_devices;
2486
2487 fs_devices->seeding = false;
2488 fs_devices->num_devices = 0;
2489 fs_devices->open_devices = 0;
2490 fs_devices->missing_devices = 0;
2491 fs_devices->rotating = false;
2492 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2493
2494 generate_random_uuid(fs_devices->fsid);
2495 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2496 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2497
2498 super_flags = btrfs_super_flags(disk_super) &
2499 ~BTRFS_SUPER_FLAG_SEEDING;
2500 btrfs_set_super_flags(disk_super, super_flags);
2501}
2502
2503/*
2504 * Store the expected generation for seed devices in device items.
2505 */
2506static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2507{
2508 BTRFS_DEV_LOOKUP_ARGS(args);
2509 struct btrfs_fs_info *fs_info = trans->fs_info;
2510 struct btrfs_root *root = fs_info->chunk_root;
2511 struct btrfs_path *path;
2512 struct extent_buffer *leaf;
2513 struct btrfs_dev_item *dev_item;
2514 struct btrfs_device *device;
2515 struct btrfs_key key;
2516 u8 fs_uuid[BTRFS_FSID_SIZE];
2517 u8 dev_uuid[BTRFS_UUID_SIZE];
2518 int ret;
2519
2520 path = btrfs_alloc_path();
2521 if (!path)
2522 return -ENOMEM;
2523
2524 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2525 key.offset = 0;
2526 key.type = BTRFS_DEV_ITEM_KEY;
2527
2528 while (1) {
2529 btrfs_reserve_chunk_metadata(trans, false);
2530 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2531 btrfs_trans_release_chunk_metadata(trans);
2532 if (ret < 0)
2533 goto error;
2534
2535 leaf = path->nodes[0];
2536next_slot:
2537 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2538 ret = btrfs_next_leaf(root, path);
2539 if (ret > 0)
2540 break;
2541 if (ret < 0)
2542 goto error;
2543 leaf = path->nodes[0];
2544 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2545 btrfs_release_path(path);
2546 continue;
2547 }
2548
2549 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2550 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2551 key.type != BTRFS_DEV_ITEM_KEY)
2552 break;
2553
2554 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2555 struct btrfs_dev_item);
2556 args.devid = btrfs_device_id(leaf, dev_item);
2557 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2558 BTRFS_UUID_SIZE);
2559 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2560 BTRFS_FSID_SIZE);
2561 args.uuid = dev_uuid;
2562 args.fsid = fs_uuid;
2563 device = btrfs_find_device(fs_info->fs_devices, &args);
2564 BUG_ON(!device); /* Logic error */
2565
2566 if (device->fs_devices->seeding) {
2567 btrfs_set_device_generation(leaf, dev_item,
2568 device->generation);
2569 btrfs_mark_buffer_dirty(trans, leaf);
2570 }
2571
2572 path->slots[0]++;
2573 goto next_slot;
2574 }
2575 ret = 0;
2576error:
2577 btrfs_free_path(path);
2578 return ret;
2579}
2580
2581int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2582{
2583 struct btrfs_root *root = fs_info->dev_root;
2584 struct btrfs_trans_handle *trans;
2585 struct btrfs_device *device;
2586 struct bdev_handle *bdev_handle;
2587 struct super_block *sb = fs_info->sb;
2588 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2589 struct btrfs_fs_devices *seed_devices = NULL;
2590 u64 orig_super_total_bytes;
2591 u64 orig_super_num_devices;
2592 int ret = 0;
2593 bool seeding_dev = false;
2594 bool locked = false;
2595
2596 if (sb_rdonly(sb) && !fs_devices->seeding)
2597 return -EROFS;
2598
2599 bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
2600 fs_info->bdev_holder, NULL);
2601 if (IS_ERR(bdev_handle))
2602 return PTR_ERR(bdev_handle);
2603
2604 if (!btrfs_check_device_zone_type(fs_info, bdev_handle->bdev)) {
2605 ret = -EINVAL;
2606 goto error;
2607 }
2608
2609 if (fs_devices->seeding) {
2610 seeding_dev = true;
2611 down_write(&sb->s_umount);
2612 mutex_lock(&uuid_mutex);
2613 locked = true;
2614 }
2615
2616 sync_blockdev(bdev_handle->bdev);
2617
2618 rcu_read_lock();
2619 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2620 if (device->bdev == bdev_handle->bdev) {
2621 ret = -EEXIST;
2622 rcu_read_unlock();
2623 goto error;
2624 }
2625 }
2626 rcu_read_unlock();
2627
2628 device = btrfs_alloc_device(fs_info, NULL, NULL, device_path);
2629 if (IS_ERR(device)) {
2630 /* we can safely leave the fs_devices entry around */
2631 ret = PTR_ERR(device);
2632 goto error;
2633 }
2634
2635 device->fs_info = fs_info;
2636 device->bdev_handle = bdev_handle;
2637 device->bdev = bdev_handle->bdev;
2638 ret = lookup_bdev(device_path, &device->devt);
2639 if (ret)
2640 goto error_free_device;
2641
2642 ret = btrfs_get_dev_zone_info(device, false);
2643 if (ret)
2644 goto error_free_device;
2645
2646 trans = btrfs_start_transaction(root, 0);
2647 if (IS_ERR(trans)) {
2648 ret = PTR_ERR(trans);
2649 goto error_free_zone;
2650 }
2651
2652 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2653 device->generation = trans->transid;
2654 device->io_width = fs_info->sectorsize;
2655 device->io_align = fs_info->sectorsize;
2656 device->sector_size = fs_info->sectorsize;
2657 device->total_bytes =
2658 round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize);
2659 device->disk_total_bytes = device->total_bytes;
2660 device->commit_total_bytes = device->total_bytes;
2661 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2662 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2663 device->dev_stats_valid = 1;
2664 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2665
2666 if (seeding_dev) {
2667 btrfs_clear_sb_rdonly(sb);
2668
2669 /* GFP_KERNEL allocation must not be under device_list_mutex */
2670 seed_devices = btrfs_init_sprout(fs_info);
2671 if (IS_ERR(seed_devices)) {
2672 ret = PTR_ERR(seed_devices);
2673 btrfs_abort_transaction(trans, ret);
2674 goto error_trans;
2675 }
2676 }
2677
2678 mutex_lock(&fs_devices->device_list_mutex);
2679 if (seeding_dev) {
2680 btrfs_setup_sprout(fs_info, seed_devices);
2681 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2682 device);
2683 }
2684
2685 device->fs_devices = fs_devices;
2686
2687 mutex_lock(&fs_info->chunk_mutex);
2688 list_add_rcu(&device->dev_list, &fs_devices->devices);
2689 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2690 fs_devices->num_devices++;
2691 fs_devices->open_devices++;
2692 fs_devices->rw_devices++;
2693 fs_devices->total_devices++;
2694 fs_devices->total_rw_bytes += device->total_bytes;
2695
2696 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2697
2698 if (!bdev_nonrot(device->bdev))
2699 fs_devices->rotating = true;
2700
2701 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2702 btrfs_set_super_total_bytes(fs_info->super_copy,
2703 round_down(orig_super_total_bytes + device->total_bytes,
2704 fs_info->sectorsize));
2705
2706 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2707 btrfs_set_super_num_devices(fs_info->super_copy,
2708 orig_super_num_devices + 1);
2709
2710 /*
2711 * we've got more storage, clear any full flags on the space
2712 * infos
2713 */
2714 btrfs_clear_space_info_full(fs_info);
2715
2716 mutex_unlock(&fs_info->chunk_mutex);
2717
2718 /* Add sysfs device entry */
2719 btrfs_sysfs_add_device(device);
2720
2721 mutex_unlock(&fs_devices->device_list_mutex);
2722
2723 if (seeding_dev) {
2724 mutex_lock(&fs_info->chunk_mutex);
2725 ret = init_first_rw_device(trans);
2726 mutex_unlock(&fs_info->chunk_mutex);
2727 if (ret) {
2728 btrfs_abort_transaction(trans, ret);
2729 goto error_sysfs;
2730 }
2731 }
2732
2733 ret = btrfs_add_dev_item(trans, device);
2734 if (ret) {
2735 btrfs_abort_transaction(trans, ret);
2736 goto error_sysfs;
2737 }
2738
2739 if (seeding_dev) {
2740 ret = btrfs_finish_sprout(trans);
2741 if (ret) {
2742 btrfs_abort_transaction(trans, ret);
2743 goto error_sysfs;
2744 }
2745
2746 /*
2747 * fs_devices now represents the newly sprouted filesystem and
2748 * its fsid has been changed by btrfs_sprout_splice().
2749 */
2750 btrfs_sysfs_update_sprout_fsid(fs_devices);
2751 }
2752
2753 ret = btrfs_commit_transaction(trans);
2754
2755 if (seeding_dev) {
2756 mutex_unlock(&uuid_mutex);
2757 up_write(&sb->s_umount);
2758 locked = false;
2759
2760 if (ret) /* transaction commit */
2761 return ret;
2762
2763 ret = btrfs_relocate_sys_chunks(fs_info);
2764 if (ret < 0)
2765 btrfs_handle_fs_error(fs_info, ret,
2766 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2767 trans = btrfs_attach_transaction(root);
2768 if (IS_ERR(trans)) {
2769 if (PTR_ERR(trans) == -ENOENT)
2770 return 0;
2771 ret = PTR_ERR(trans);
2772 trans = NULL;
2773 goto error_sysfs;
2774 }
2775 ret = btrfs_commit_transaction(trans);
2776 }
2777
2778 /*
2779 * Now that we have written a new super block to this device, check all
2780 * other fs_devices list if device_path alienates any other scanned
2781 * device.
2782 * We can ignore the return value as it typically returns -EINVAL and
2783 * only succeeds if the device was an alien.
2784 */
2785 btrfs_forget_devices(device->devt);
2786
2787 /* Update ctime/mtime for blkid or udev */
2788 update_dev_time(device_path);
2789
2790 return ret;
2791
2792error_sysfs:
2793 btrfs_sysfs_remove_device(device);
2794 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2795 mutex_lock(&fs_info->chunk_mutex);
2796 list_del_rcu(&device->dev_list);
2797 list_del(&device->dev_alloc_list);
2798 fs_info->fs_devices->num_devices--;
2799 fs_info->fs_devices->open_devices--;
2800 fs_info->fs_devices->rw_devices--;
2801 fs_info->fs_devices->total_devices--;
2802 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2803 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2804 btrfs_set_super_total_bytes(fs_info->super_copy,
2805 orig_super_total_bytes);
2806 btrfs_set_super_num_devices(fs_info->super_copy,
2807 orig_super_num_devices);
2808 mutex_unlock(&fs_info->chunk_mutex);
2809 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2810error_trans:
2811 if (seeding_dev)
2812 btrfs_set_sb_rdonly(sb);
2813 if (trans)
2814 btrfs_end_transaction(trans);
2815error_free_zone:
2816 btrfs_destroy_dev_zone_info(device);
2817error_free_device:
2818 btrfs_free_device(device);
2819error:
2820 bdev_release(bdev_handle);
2821 if (locked) {
2822 mutex_unlock(&uuid_mutex);
2823 up_write(&sb->s_umount);
2824 }
2825 return ret;
2826}
2827
2828static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2829 struct btrfs_device *device)
2830{
2831 int ret;
2832 struct btrfs_path *path;
2833 struct btrfs_root *root = device->fs_info->chunk_root;
2834 struct btrfs_dev_item *dev_item;
2835 struct extent_buffer *leaf;
2836 struct btrfs_key key;
2837
2838 path = btrfs_alloc_path();
2839 if (!path)
2840 return -ENOMEM;
2841
2842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2843 key.type = BTRFS_DEV_ITEM_KEY;
2844 key.offset = device->devid;
2845
2846 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2847 if (ret < 0)
2848 goto out;
2849
2850 if (ret > 0) {
2851 ret = -ENOENT;
2852 goto out;
2853 }
2854
2855 leaf = path->nodes[0];
2856 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2857
2858 btrfs_set_device_id(leaf, dev_item, device->devid);
2859 btrfs_set_device_type(leaf, dev_item, device->type);
2860 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2861 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2862 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2863 btrfs_set_device_total_bytes(leaf, dev_item,
2864 btrfs_device_get_disk_total_bytes(device));
2865 btrfs_set_device_bytes_used(leaf, dev_item,
2866 btrfs_device_get_bytes_used(device));
2867 btrfs_mark_buffer_dirty(trans, leaf);
2868
2869out:
2870 btrfs_free_path(path);
2871 return ret;
2872}
2873
2874int btrfs_grow_device(struct btrfs_trans_handle *trans,
2875 struct btrfs_device *device, u64 new_size)
2876{
2877 struct btrfs_fs_info *fs_info = device->fs_info;
2878 struct btrfs_super_block *super_copy = fs_info->super_copy;
2879 u64 old_total;
2880 u64 diff;
2881 int ret;
2882
2883 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2884 return -EACCES;
2885
2886 new_size = round_down(new_size, fs_info->sectorsize);
2887
2888 mutex_lock(&fs_info->chunk_mutex);
2889 old_total = btrfs_super_total_bytes(super_copy);
2890 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2891
2892 if (new_size <= device->total_bytes ||
2893 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2894 mutex_unlock(&fs_info->chunk_mutex);
2895 return -EINVAL;
2896 }
2897
2898 btrfs_set_super_total_bytes(super_copy,
2899 round_down(old_total + diff, fs_info->sectorsize));
2900 device->fs_devices->total_rw_bytes += diff;
2901 atomic64_add(diff, &fs_info->free_chunk_space);
2902
2903 btrfs_device_set_total_bytes(device, new_size);
2904 btrfs_device_set_disk_total_bytes(device, new_size);
2905 btrfs_clear_space_info_full(device->fs_info);
2906 if (list_empty(&device->post_commit_list))
2907 list_add_tail(&device->post_commit_list,
2908 &trans->transaction->dev_update_list);
2909 mutex_unlock(&fs_info->chunk_mutex);
2910
2911 btrfs_reserve_chunk_metadata(trans, false);
2912 ret = btrfs_update_device(trans, device);
2913 btrfs_trans_release_chunk_metadata(trans);
2914
2915 return ret;
2916}
2917
2918static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2919{
2920 struct btrfs_fs_info *fs_info = trans->fs_info;
2921 struct btrfs_root *root = fs_info->chunk_root;
2922 int ret;
2923 struct btrfs_path *path;
2924 struct btrfs_key key;
2925
2926 path = btrfs_alloc_path();
2927 if (!path)
2928 return -ENOMEM;
2929
2930 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2931 key.offset = chunk_offset;
2932 key.type = BTRFS_CHUNK_ITEM_KEY;
2933
2934 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2935 if (ret < 0)
2936 goto out;
2937 else if (ret > 0) { /* Logic error or corruption */
2938 btrfs_handle_fs_error(fs_info, -ENOENT,
2939 "Failed lookup while freeing chunk.");
2940 ret = -ENOENT;
2941 goto out;
2942 }
2943
2944 ret = btrfs_del_item(trans, root, path);
2945 if (ret < 0)
2946 btrfs_handle_fs_error(fs_info, ret,
2947 "Failed to delete chunk item.");
2948out:
2949 btrfs_free_path(path);
2950 return ret;
2951}
2952
2953static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2954{
2955 struct btrfs_super_block *super_copy = fs_info->super_copy;
2956 struct btrfs_disk_key *disk_key;
2957 struct btrfs_chunk *chunk;
2958 u8 *ptr;
2959 int ret = 0;
2960 u32 num_stripes;
2961 u32 array_size;
2962 u32 len = 0;
2963 u32 cur;
2964 struct btrfs_key key;
2965
2966 lockdep_assert_held(&fs_info->chunk_mutex);
2967 array_size = btrfs_super_sys_array_size(super_copy);
2968
2969 ptr = super_copy->sys_chunk_array;
2970 cur = 0;
2971
2972 while (cur < array_size) {
2973 disk_key = (struct btrfs_disk_key *)ptr;
2974 btrfs_disk_key_to_cpu(&key, disk_key);
2975
2976 len = sizeof(*disk_key);
2977
2978 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2979 chunk = (struct btrfs_chunk *)(ptr + len);
2980 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2981 len += btrfs_chunk_item_size(num_stripes);
2982 } else {
2983 ret = -EIO;
2984 break;
2985 }
2986 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2987 key.offset == chunk_offset) {
2988 memmove(ptr, ptr + len, array_size - (cur + len));
2989 array_size -= len;
2990 btrfs_set_super_sys_array_size(super_copy, array_size);
2991 } else {
2992 ptr += len;
2993 cur += len;
2994 }
2995 }
2996 return ret;
2997}
2998
2999struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
3000 u64 logical, u64 length)
3001{
3002 struct rb_node *node = fs_info->mapping_tree.rb_root.rb_node;
3003 struct rb_node *prev = NULL;
3004 struct rb_node *orig_prev;
3005 struct btrfs_chunk_map *map;
3006 struct btrfs_chunk_map *prev_map = NULL;
3007
3008 while (node) {
3009 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
3010 prev = node;
3011 prev_map = map;
3012
3013 if (logical < map->start) {
3014 node = node->rb_left;
3015 } else if (logical >= map->start + map->chunk_len) {
3016 node = node->rb_right;
3017 } else {
3018 refcount_inc(&map->refs);
3019 return map;
3020 }
3021 }
3022
3023 if (!prev)
3024 return NULL;
3025
3026 orig_prev = prev;
3027 while (prev && logical >= prev_map->start + prev_map->chunk_len) {
3028 prev = rb_next(prev);
3029 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3030 }
3031
3032 if (!prev) {
3033 prev = orig_prev;
3034 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3035 while (prev && logical < prev_map->start) {
3036 prev = rb_prev(prev);
3037 prev_map = rb_entry(prev, struct btrfs_chunk_map, rb_node);
3038 }
3039 }
3040
3041 if (prev) {
3042 u64 end = logical + length;
3043
3044 /*
3045 * Caller can pass a U64_MAX length when it wants to get any
3046 * chunk starting at an offset of 'logical' or higher, so deal
3047 * with underflow by resetting the end offset to U64_MAX.
3048 */
3049 if (end < logical)
3050 end = U64_MAX;
3051
3052 if (end > prev_map->start &&
3053 logical < prev_map->start + prev_map->chunk_len) {
3054 refcount_inc(&prev_map->refs);
3055 return prev_map;
3056 }
3057 }
3058
3059 return NULL;
3060}
3061
3062struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
3063 u64 logical, u64 length)
3064{
3065 struct btrfs_chunk_map *map;
3066
3067 read_lock(&fs_info->mapping_tree_lock);
3068 map = btrfs_find_chunk_map_nolock(fs_info, logical, length);
3069 read_unlock(&fs_info->mapping_tree_lock);
3070
3071 return map;
3072}
3073
3074/*
3075 * Find the mapping containing the given logical extent.
3076 *
3077 * @logical: Logical block offset in bytes.
3078 * @length: Length of extent in bytes.
3079 *
3080 * Return: Chunk mapping or ERR_PTR.
3081 */
3082struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3083 u64 logical, u64 length)
3084{
3085 struct btrfs_chunk_map *map;
3086
3087 map = btrfs_find_chunk_map(fs_info, logical, length);
3088
3089 if (unlikely(!map)) {
3090 btrfs_crit(fs_info,
3091 "unable to find chunk map for logical %llu length %llu",
3092 logical, length);
3093 return ERR_PTR(-EINVAL);
3094 }
3095
3096 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
3097 btrfs_crit(fs_info,
3098 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3099 logical, logical + length, map->start,
3100 map->start + map->chunk_len);
3101 btrfs_free_chunk_map(map);
3102 return ERR_PTR(-EINVAL);
3103 }
3104
3105 /* Callers are responsible for dropping the reference. */
3106 return map;
3107}
3108
3109static int remove_chunk_item(struct btrfs_trans_handle *trans,
3110 struct btrfs_chunk_map *map, u64 chunk_offset)
3111{
3112 int i;
3113
3114 /*
3115 * Removing chunk items and updating the device items in the chunks btree
3116 * requires holding the chunk_mutex.
3117 * See the comment at btrfs_chunk_alloc() for the details.
3118 */
3119 lockdep_assert_held(&trans->fs_info->chunk_mutex);
3120
3121 for (i = 0; i < map->num_stripes; i++) {
3122 int ret;
3123
3124 ret = btrfs_update_device(trans, map->stripes[i].dev);
3125 if (ret)
3126 return ret;
3127 }
3128
3129 return btrfs_free_chunk(trans, chunk_offset);
3130}
3131
3132int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3133{
3134 struct btrfs_fs_info *fs_info = trans->fs_info;
3135 struct btrfs_chunk_map *map;
3136 u64 dev_extent_len = 0;
3137 int i, ret = 0;
3138 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3139
3140 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3141 if (IS_ERR(map)) {
3142 /*
3143 * This is a logic error, but we don't want to just rely on the
3144 * user having built with ASSERT enabled, so if ASSERT doesn't
3145 * do anything we still error out.
3146 */
3147 ASSERT(0);
3148 return PTR_ERR(map);
3149 }
3150
3151 /*
3152 * First delete the device extent items from the devices btree.
3153 * We take the device_list_mutex to avoid racing with the finishing phase
3154 * of a device replace operation. See the comment below before acquiring
3155 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3156 * because that can result in a deadlock when deleting the device extent
3157 * items from the devices btree - COWing an extent buffer from the btree
3158 * may result in allocating a new metadata chunk, which would attempt to
3159 * lock again fs_info->chunk_mutex.
3160 */
3161 mutex_lock(&fs_devices->device_list_mutex);
3162 for (i = 0; i < map->num_stripes; i++) {
3163 struct btrfs_device *device = map->stripes[i].dev;
3164 ret = btrfs_free_dev_extent(trans, device,
3165 map->stripes[i].physical,
3166 &dev_extent_len);
3167 if (ret) {
3168 mutex_unlock(&fs_devices->device_list_mutex);
3169 btrfs_abort_transaction(trans, ret);
3170 goto out;
3171 }
3172
3173 if (device->bytes_used > 0) {
3174 mutex_lock(&fs_info->chunk_mutex);
3175 btrfs_device_set_bytes_used(device,
3176 device->bytes_used - dev_extent_len);
3177 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3178 btrfs_clear_space_info_full(fs_info);
3179 mutex_unlock(&fs_info->chunk_mutex);
3180 }
3181 }
3182 mutex_unlock(&fs_devices->device_list_mutex);
3183
3184 /*
3185 * We acquire fs_info->chunk_mutex for 2 reasons:
3186 *
3187 * 1) Just like with the first phase of the chunk allocation, we must
3188 * reserve system space, do all chunk btree updates and deletions, and
3189 * update the system chunk array in the superblock while holding this
3190 * mutex. This is for similar reasons as explained on the comment at
3191 * the top of btrfs_chunk_alloc();
3192 *
3193 * 2) Prevent races with the final phase of a device replace operation
3194 * that replaces the device object associated with the map's stripes,
3195 * because the device object's id can change at any time during that
3196 * final phase of the device replace operation
3197 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3198 * replaced device and then see it with an ID of
3199 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3200 * the device item, which does not exists on the chunk btree.
3201 * The finishing phase of device replace acquires both the
3202 * device_list_mutex and the chunk_mutex, in that order, so we are
3203 * safe by just acquiring the chunk_mutex.
3204 */
3205 trans->removing_chunk = true;
3206 mutex_lock(&fs_info->chunk_mutex);
3207
3208 check_system_chunk(trans, map->type);
3209
3210 ret = remove_chunk_item(trans, map, chunk_offset);
3211 /*
3212 * Normally we should not get -ENOSPC since we reserved space before
3213 * through the call to check_system_chunk().
3214 *
3215 * Despite our system space_info having enough free space, we may not
3216 * be able to allocate extents from its block groups, because all have
3217 * an incompatible profile, which will force us to allocate a new system
3218 * block group with the right profile, or right after we called
3219 * check_system_space() above, a scrub turned the only system block group
3220 * with enough free space into RO mode.
3221 * This is explained with more detail at do_chunk_alloc().
3222 *
3223 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3224 */
3225 if (ret == -ENOSPC) {
3226 const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3227 struct btrfs_block_group *sys_bg;
3228
3229 sys_bg = btrfs_create_chunk(trans, sys_flags);
3230 if (IS_ERR(sys_bg)) {
3231 ret = PTR_ERR(sys_bg);
3232 btrfs_abort_transaction(trans, ret);
3233 goto out;
3234 }
3235
3236 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3237 if (ret) {
3238 btrfs_abort_transaction(trans, ret);
3239 goto out;
3240 }
3241
3242 ret = remove_chunk_item(trans, map, chunk_offset);
3243 if (ret) {
3244 btrfs_abort_transaction(trans, ret);
3245 goto out;
3246 }
3247 } else if (ret) {
3248 btrfs_abort_transaction(trans, ret);
3249 goto out;
3250 }
3251
3252 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len);
3253
3254 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3255 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3256 if (ret) {
3257 btrfs_abort_transaction(trans, ret);
3258 goto out;
3259 }
3260 }
3261
3262 mutex_unlock(&fs_info->chunk_mutex);
3263 trans->removing_chunk = false;
3264
3265 /*
3266 * We are done with chunk btree updates and deletions, so release the
3267 * system space we previously reserved (with check_system_chunk()).
3268 */
3269 btrfs_trans_release_chunk_metadata(trans);
3270
3271 ret = btrfs_remove_block_group(trans, map);
3272 if (ret) {
3273 btrfs_abort_transaction(trans, ret);
3274 goto out;
3275 }
3276
3277out:
3278 if (trans->removing_chunk) {
3279 mutex_unlock(&fs_info->chunk_mutex);
3280 trans->removing_chunk = false;
3281 }
3282 /* once for us */
3283 btrfs_free_chunk_map(map);
3284 return ret;
3285}
3286
3287int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3288{
3289 struct btrfs_root *root = fs_info->chunk_root;
3290 struct btrfs_trans_handle *trans;
3291 struct btrfs_block_group *block_group;
3292 u64 length;
3293 int ret;
3294
3295 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3296 btrfs_err(fs_info,
3297 "relocate: not supported on extent tree v2 yet");
3298 return -EINVAL;
3299 }
3300
3301 /*
3302 * Prevent races with automatic removal of unused block groups.
3303 * After we relocate and before we remove the chunk with offset
3304 * chunk_offset, automatic removal of the block group can kick in,
3305 * resulting in a failure when calling btrfs_remove_chunk() below.
3306 *
3307 * Make sure to acquire this mutex before doing a tree search (dev
3308 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3309 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3310 * we release the path used to search the chunk/dev tree and before
3311 * the current task acquires this mutex and calls us.
3312 */
3313 lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3314
3315 /* step one, relocate all the extents inside this chunk */
3316 btrfs_scrub_pause(fs_info);
3317 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3318 btrfs_scrub_continue(fs_info);
3319 if (ret) {
3320 /*
3321 * If we had a transaction abort, stop all running scrubs.
3322 * See transaction.c:cleanup_transaction() why we do it here.
3323 */
3324 if (BTRFS_FS_ERROR(fs_info))
3325 btrfs_scrub_cancel(fs_info);
3326 return ret;
3327 }
3328
3329 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3330 if (!block_group)
3331 return -ENOENT;
3332 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3333 length = block_group->length;
3334 btrfs_put_block_group(block_group);
3335
3336 /*
3337 * On a zoned file system, discard the whole block group, this will
3338 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3339 * resetting the zone fails, don't treat it as a fatal problem from the
3340 * filesystem's point of view.
3341 */
3342 if (btrfs_is_zoned(fs_info)) {
3343 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3344 if (ret)
3345 btrfs_info(fs_info,
3346 "failed to reset zone %llu after relocation",
3347 chunk_offset);
3348 }
3349
3350 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3351 chunk_offset);
3352 if (IS_ERR(trans)) {
3353 ret = PTR_ERR(trans);
3354 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3355 return ret;
3356 }
3357
3358 /*
3359 * step two, delete the device extents and the
3360 * chunk tree entries
3361 */
3362 ret = btrfs_remove_chunk(trans, chunk_offset);
3363 btrfs_end_transaction(trans);
3364 return ret;
3365}
3366
3367static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3368{
3369 struct btrfs_root *chunk_root = fs_info->chunk_root;
3370 struct btrfs_path *path;
3371 struct extent_buffer *leaf;
3372 struct btrfs_chunk *chunk;
3373 struct btrfs_key key;
3374 struct btrfs_key found_key;
3375 u64 chunk_type;
3376 bool retried = false;
3377 int failed = 0;
3378 int ret;
3379
3380 path = btrfs_alloc_path();
3381 if (!path)
3382 return -ENOMEM;
3383
3384again:
3385 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3386 key.offset = (u64)-1;
3387 key.type = BTRFS_CHUNK_ITEM_KEY;
3388
3389 while (1) {
3390 mutex_lock(&fs_info->reclaim_bgs_lock);
3391 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3392 if (ret < 0) {
3393 mutex_unlock(&fs_info->reclaim_bgs_lock);
3394 goto error;
3395 }
3396 BUG_ON(ret == 0); /* Corruption */
3397
3398 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3399 key.type);
3400 if (ret)
3401 mutex_unlock(&fs_info->reclaim_bgs_lock);
3402 if (ret < 0)
3403 goto error;
3404 if (ret > 0)
3405 break;
3406
3407 leaf = path->nodes[0];
3408 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3409
3410 chunk = btrfs_item_ptr(leaf, path->slots[0],
3411 struct btrfs_chunk);
3412 chunk_type = btrfs_chunk_type(leaf, chunk);
3413 btrfs_release_path(path);
3414
3415 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3416 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3417 if (ret == -ENOSPC)
3418 failed++;
3419 else
3420 BUG_ON(ret);
3421 }
3422 mutex_unlock(&fs_info->reclaim_bgs_lock);
3423
3424 if (found_key.offset == 0)
3425 break;
3426 key.offset = found_key.offset - 1;
3427 }
3428 ret = 0;
3429 if (failed && !retried) {
3430 failed = 0;
3431 retried = true;
3432 goto again;
3433 } else if (WARN_ON(failed && retried)) {
3434 ret = -ENOSPC;
3435 }
3436error:
3437 btrfs_free_path(path);
3438 return ret;
3439}
3440
3441/*
3442 * return 1 : allocate a data chunk successfully,
3443 * return <0: errors during allocating a data chunk,
3444 * return 0 : no need to allocate a data chunk.
3445 */
3446static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3447 u64 chunk_offset)
3448{
3449 struct btrfs_block_group *cache;
3450 u64 bytes_used;
3451 u64 chunk_type;
3452
3453 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3454 ASSERT(cache);
3455 chunk_type = cache->flags;
3456 btrfs_put_block_group(cache);
3457
3458 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3459 return 0;
3460
3461 spin_lock(&fs_info->data_sinfo->lock);
3462 bytes_used = fs_info->data_sinfo->bytes_used;
3463 spin_unlock(&fs_info->data_sinfo->lock);
3464
3465 if (!bytes_used) {
3466 struct btrfs_trans_handle *trans;
3467 int ret;
3468
3469 trans = btrfs_join_transaction(fs_info->tree_root);
3470 if (IS_ERR(trans))
3471 return PTR_ERR(trans);
3472
3473 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3474 btrfs_end_transaction(trans);
3475 if (ret < 0)
3476 return ret;
3477 return 1;
3478 }
3479
3480 return 0;
3481}
3482
3483static int insert_balance_item(struct btrfs_fs_info *fs_info,
3484 struct btrfs_balance_control *bctl)
3485{
3486 struct btrfs_root *root = fs_info->tree_root;
3487 struct btrfs_trans_handle *trans;
3488 struct btrfs_balance_item *item;
3489 struct btrfs_disk_balance_args disk_bargs;
3490 struct btrfs_path *path;
3491 struct extent_buffer *leaf;
3492 struct btrfs_key key;
3493 int ret, err;
3494
3495 path = btrfs_alloc_path();
3496 if (!path)
3497 return -ENOMEM;
3498
3499 trans = btrfs_start_transaction(root, 0);
3500 if (IS_ERR(trans)) {
3501 btrfs_free_path(path);
3502 return PTR_ERR(trans);
3503 }
3504
3505 key.objectid = BTRFS_BALANCE_OBJECTID;
3506 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3507 key.offset = 0;
3508
3509 ret = btrfs_insert_empty_item(trans, root, path, &key,
3510 sizeof(*item));
3511 if (ret)
3512 goto out;
3513
3514 leaf = path->nodes[0];
3515 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3516
3517 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3518
3519 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3520 btrfs_set_balance_data(leaf, item, &disk_bargs);
3521 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3522 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3523 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3524 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3525
3526 btrfs_set_balance_flags(leaf, item, bctl->flags);
3527
3528 btrfs_mark_buffer_dirty(trans, leaf);
3529out:
3530 btrfs_free_path(path);
3531 err = btrfs_commit_transaction(trans);
3532 if (err && !ret)
3533 ret = err;
3534 return ret;
3535}
3536
3537static int del_balance_item(struct btrfs_fs_info *fs_info)
3538{
3539 struct btrfs_root *root = fs_info->tree_root;
3540 struct btrfs_trans_handle *trans;
3541 struct btrfs_path *path;
3542 struct btrfs_key key;
3543 int ret, err;
3544
3545 path = btrfs_alloc_path();
3546 if (!path)
3547 return -ENOMEM;
3548
3549 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3550 if (IS_ERR(trans)) {
3551 btrfs_free_path(path);
3552 return PTR_ERR(trans);
3553 }
3554
3555 key.objectid = BTRFS_BALANCE_OBJECTID;
3556 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3557 key.offset = 0;
3558
3559 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3560 if (ret < 0)
3561 goto out;
3562 if (ret > 0) {
3563 ret = -ENOENT;
3564 goto out;
3565 }
3566
3567 ret = btrfs_del_item(trans, root, path);
3568out:
3569 btrfs_free_path(path);
3570 err = btrfs_commit_transaction(trans);
3571 if (err && !ret)
3572 ret = err;
3573 return ret;
3574}
3575
3576/*
3577 * This is a heuristic used to reduce the number of chunks balanced on
3578 * resume after balance was interrupted.
3579 */
3580static void update_balance_args(struct btrfs_balance_control *bctl)
3581{
3582 /*
3583 * Turn on soft mode for chunk types that were being converted.
3584 */
3585 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3586 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3587 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3588 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3589 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3590 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3591
3592 /*
3593 * Turn on usage filter if is not already used. The idea is
3594 * that chunks that we have already balanced should be
3595 * reasonably full. Don't do it for chunks that are being
3596 * converted - that will keep us from relocating unconverted
3597 * (albeit full) chunks.
3598 */
3599 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3600 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3601 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3602 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3603 bctl->data.usage = 90;
3604 }
3605 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3606 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3607 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3608 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3609 bctl->sys.usage = 90;
3610 }
3611 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3612 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3613 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3614 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3615 bctl->meta.usage = 90;
3616 }
3617}
3618
3619/*
3620 * Clear the balance status in fs_info and delete the balance item from disk.
3621 */
3622static void reset_balance_state(struct btrfs_fs_info *fs_info)
3623{
3624 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3625 int ret;
3626
3627 BUG_ON(!fs_info->balance_ctl);
3628
3629 spin_lock(&fs_info->balance_lock);
3630 fs_info->balance_ctl = NULL;
3631 spin_unlock(&fs_info->balance_lock);
3632
3633 kfree(bctl);
3634 ret = del_balance_item(fs_info);
3635 if (ret)
3636 btrfs_handle_fs_error(fs_info, ret, NULL);
3637}
3638
3639/*
3640 * Balance filters. Return 1 if chunk should be filtered out
3641 * (should not be balanced).
3642 */
3643static int chunk_profiles_filter(u64 chunk_type,
3644 struct btrfs_balance_args *bargs)
3645{
3646 chunk_type = chunk_to_extended(chunk_type) &
3647 BTRFS_EXTENDED_PROFILE_MASK;
3648
3649 if (bargs->profiles & chunk_type)
3650 return 0;
3651
3652 return 1;
3653}
3654
3655static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3656 struct btrfs_balance_args *bargs)
3657{
3658 struct btrfs_block_group *cache;
3659 u64 chunk_used;
3660 u64 user_thresh_min;
3661 u64 user_thresh_max;
3662 int ret = 1;
3663
3664 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3665 chunk_used = cache->used;
3666
3667 if (bargs->usage_min == 0)
3668 user_thresh_min = 0;
3669 else
3670 user_thresh_min = mult_perc(cache->length, bargs->usage_min);
3671
3672 if (bargs->usage_max == 0)
3673 user_thresh_max = 1;
3674 else if (bargs->usage_max > 100)
3675 user_thresh_max = cache->length;
3676 else
3677 user_thresh_max = mult_perc(cache->length, bargs->usage_max);
3678
3679 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3680 ret = 0;
3681
3682 btrfs_put_block_group(cache);
3683 return ret;
3684}
3685
3686static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3687 u64 chunk_offset, struct btrfs_balance_args *bargs)
3688{
3689 struct btrfs_block_group *cache;
3690 u64 chunk_used, user_thresh;
3691 int ret = 1;
3692
3693 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3694 chunk_used = cache->used;
3695
3696 if (bargs->usage_min == 0)
3697 user_thresh = 1;
3698 else if (bargs->usage > 100)
3699 user_thresh = cache->length;
3700 else
3701 user_thresh = mult_perc(cache->length, bargs->usage);
3702
3703 if (chunk_used < user_thresh)
3704 ret = 0;
3705
3706 btrfs_put_block_group(cache);
3707 return ret;
3708}
3709
3710static int chunk_devid_filter(struct extent_buffer *leaf,
3711 struct btrfs_chunk *chunk,
3712 struct btrfs_balance_args *bargs)
3713{
3714 struct btrfs_stripe *stripe;
3715 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3716 int i;
3717
3718 for (i = 0; i < num_stripes; i++) {
3719 stripe = btrfs_stripe_nr(chunk, i);
3720 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3721 return 0;
3722 }
3723
3724 return 1;
3725}
3726
3727static u64 calc_data_stripes(u64 type, int num_stripes)
3728{
3729 const int index = btrfs_bg_flags_to_raid_index(type);
3730 const int ncopies = btrfs_raid_array[index].ncopies;
3731 const int nparity = btrfs_raid_array[index].nparity;
3732
3733 return (num_stripes - nparity) / ncopies;
3734}
3735
3736/* [pstart, pend) */
3737static int chunk_drange_filter(struct extent_buffer *leaf,
3738 struct btrfs_chunk *chunk,
3739 struct btrfs_balance_args *bargs)
3740{
3741 struct btrfs_stripe *stripe;
3742 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3743 u64 stripe_offset;
3744 u64 stripe_length;
3745 u64 type;
3746 int factor;
3747 int i;
3748
3749 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3750 return 0;
3751
3752 type = btrfs_chunk_type(leaf, chunk);
3753 factor = calc_data_stripes(type, num_stripes);
3754
3755 for (i = 0; i < num_stripes; i++) {
3756 stripe = btrfs_stripe_nr(chunk, i);
3757 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3758 continue;
3759
3760 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3761 stripe_length = btrfs_chunk_length(leaf, chunk);
3762 stripe_length = div_u64(stripe_length, factor);
3763
3764 if (stripe_offset < bargs->pend &&
3765 stripe_offset + stripe_length > bargs->pstart)
3766 return 0;
3767 }
3768
3769 return 1;
3770}
3771
3772/* [vstart, vend) */
3773static int chunk_vrange_filter(struct extent_buffer *leaf,
3774 struct btrfs_chunk *chunk,
3775 u64 chunk_offset,
3776 struct btrfs_balance_args *bargs)
3777{
3778 if (chunk_offset < bargs->vend &&
3779 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3780 /* at least part of the chunk is inside this vrange */
3781 return 0;
3782
3783 return 1;
3784}
3785
3786static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3787 struct btrfs_chunk *chunk,
3788 struct btrfs_balance_args *bargs)
3789{
3790 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3791
3792 if (bargs->stripes_min <= num_stripes
3793 && num_stripes <= bargs->stripes_max)
3794 return 0;
3795
3796 return 1;
3797}
3798
3799static int chunk_soft_convert_filter(u64 chunk_type,
3800 struct btrfs_balance_args *bargs)
3801{
3802 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3803 return 0;
3804
3805 chunk_type = chunk_to_extended(chunk_type) &
3806 BTRFS_EXTENDED_PROFILE_MASK;
3807
3808 if (bargs->target == chunk_type)
3809 return 1;
3810
3811 return 0;
3812}
3813
3814static int should_balance_chunk(struct extent_buffer *leaf,
3815 struct btrfs_chunk *chunk, u64 chunk_offset)
3816{
3817 struct btrfs_fs_info *fs_info = leaf->fs_info;
3818 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3819 struct btrfs_balance_args *bargs = NULL;
3820 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3821
3822 /* type filter */
3823 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3824 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3825 return 0;
3826 }
3827
3828 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3829 bargs = &bctl->data;
3830 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3831 bargs = &bctl->sys;
3832 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3833 bargs = &bctl->meta;
3834
3835 /* profiles filter */
3836 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3837 chunk_profiles_filter(chunk_type, bargs)) {
3838 return 0;
3839 }
3840
3841 /* usage filter */
3842 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3843 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3844 return 0;
3845 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3846 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3847 return 0;
3848 }
3849
3850 /* devid filter */
3851 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3852 chunk_devid_filter(leaf, chunk, bargs)) {
3853 return 0;
3854 }
3855
3856 /* drange filter, makes sense only with devid filter */
3857 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3858 chunk_drange_filter(leaf, chunk, bargs)) {
3859 return 0;
3860 }
3861
3862 /* vrange filter */
3863 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3864 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3865 return 0;
3866 }
3867
3868 /* stripes filter */
3869 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3870 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3871 return 0;
3872 }
3873
3874 /* soft profile changing mode */
3875 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3876 chunk_soft_convert_filter(chunk_type, bargs)) {
3877 return 0;
3878 }
3879
3880 /*
3881 * limited by count, must be the last filter
3882 */
3883 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3884 if (bargs->limit == 0)
3885 return 0;
3886 else
3887 bargs->limit--;
3888 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3889 /*
3890 * Same logic as the 'limit' filter; the minimum cannot be
3891 * determined here because we do not have the global information
3892 * about the count of all chunks that satisfy the filters.
3893 */
3894 if (bargs->limit_max == 0)
3895 return 0;
3896 else
3897 bargs->limit_max--;
3898 }
3899
3900 return 1;
3901}
3902
3903static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3904{
3905 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3906 struct btrfs_root *chunk_root = fs_info->chunk_root;
3907 u64 chunk_type;
3908 struct btrfs_chunk *chunk;
3909 struct btrfs_path *path = NULL;
3910 struct btrfs_key key;
3911 struct btrfs_key found_key;
3912 struct extent_buffer *leaf;
3913 int slot;
3914 int ret;
3915 int enospc_errors = 0;
3916 bool counting = true;
3917 /* The single value limit and min/max limits use the same bytes in the */
3918 u64 limit_data = bctl->data.limit;
3919 u64 limit_meta = bctl->meta.limit;
3920 u64 limit_sys = bctl->sys.limit;
3921 u32 count_data = 0;
3922 u32 count_meta = 0;
3923 u32 count_sys = 0;
3924 int chunk_reserved = 0;
3925
3926 path = btrfs_alloc_path();
3927 if (!path) {
3928 ret = -ENOMEM;
3929 goto error;
3930 }
3931
3932 /* zero out stat counters */
3933 spin_lock(&fs_info->balance_lock);
3934 memset(&bctl->stat, 0, sizeof(bctl->stat));
3935 spin_unlock(&fs_info->balance_lock);
3936again:
3937 if (!counting) {
3938 /*
3939 * The single value limit and min/max limits use the same bytes
3940 * in the
3941 */
3942 bctl->data.limit = limit_data;
3943 bctl->meta.limit = limit_meta;
3944 bctl->sys.limit = limit_sys;
3945 }
3946 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3947 key.offset = (u64)-1;
3948 key.type = BTRFS_CHUNK_ITEM_KEY;
3949
3950 while (1) {
3951 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3952 atomic_read(&fs_info->balance_cancel_req)) {
3953 ret = -ECANCELED;
3954 goto error;
3955 }
3956
3957 mutex_lock(&fs_info->reclaim_bgs_lock);
3958 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3959 if (ret < 0) {
3960 mutex_unlock(&fs_info->reclaim_bgs_lock);
3961 goto error;
3962 }
3963
3964 /*
3965 * this shouldn't happen, it means the last relocate
3966 * failed
3967 */
3968 if (ret == 0)
3969 BUG(); /* FIXME break ? */
3970
3971 ret = btrfs_previous_item(chunk_root, path, 0,
3972 BTRFS_CHUNK_ITEM_KEY);
3973 if (ret) {
3974 mutex_unlock(&fs_info->reclaim_bgs_lock);
3975 ret = 0;
3976 break;
3977 }
3978
3979 leaf = path->nodes[0];
3980 slot = path->slots[0];
3981 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3982
3983 if (found_key.objectid != key.objectid) {
3984 mutex_unlock(&fs_info->reclaim_bgs_lock);
3985 break;
3986 }
3987
3988 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3989 chunk_type = btrfs_chunk_type(leaf, chunk);
3990
3991 if (!counting) {
3992 spin_lock(&fs_info->balance_lock);
3993 bctl->stat.considered++;
3994 spin_unlock(&fs_info->balance_lock);
3995 }
3996
3997 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3998
3999 btrfs_release_path(path);
4000 if (!ret) {
4001 mutex_unlock(&fs_info->reclaim_bgs_lock);
4002 goto loop;
4003 }
4004
4005 if (counting) {
4006 mutex_unlock(&fs_info->reclaim_bgs_lock);
4007 spin_lock(&fs_info->balance_lock);
4008 bctl->stat.expected++;
4009 spin_unlock(&fs_info->balance_lock);
4010
4011 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
4012 count_data++;
4013 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
4014 count_sys++;
4015 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
4016 count_meta++;
4017
4018 goto loop;
4019 }
4020
4021 /*
4022 * Apply limit_min filter, no need to check if the LIMITS
4023 * filter is used, limit_min is 0 by default
4024 */
4025 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
4026 count_data < bctl->data.limit_min)
4027 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
4028 count_meta < bctl->meta.limit_min)
4029 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
4030 count_sys < bctl->sys.limit_min)) {
4031 mutex_unlock(&fs_info->reclaim_bgs_lock);
4032 goto loop;
4033 }
4034
4035 if (!chunk_reserved) {
4036 /*
4037 * We may be relocating the only data chunk we have,
4038 * which could potentially end up with losing data's
4039 * raid profile, so lets allocate an empty one in
4040 * advance.
4041 */
4042 ret = btrfs_may_alloc_data_chunk(fs_info,
4043 found_key.offset);
4044 if (ret < 0) {
4045 mutex_unlock(&fs_info->reclaim_bgs_lock);
4046 goto error;
4047 } else if (ret == 1) {
4048 chunk_reserved = 1;
4049 }
4050 }
4051
4052 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
4053 mutex_unlock(&fs_info->reclaim_bgs_lock);
4054 if (ret == -ENOSPC) {
4055 enospc_errors++;
4056 } else if (ret == -ETXTBSY) {
4057 btrfs_info(fs_info,
4058 "skipping relocation of block group %llu due to active swapfile",
4059 found_key.offset);
4060 ret = 0;
4061 } else if (ret) {
4062 goto error;
4063 } else {
4064 spin_lock(&fs_info->balance_lock);
4065 bctl->stat.completed++;
4066 spin_unlock(&fs_info->balance_lock);
4067 }
4068loop:
4069 if (found_key.offset == 0)
4070 break;
4071 key.offset = found_key.offset - 1;
4072 }
4073
4074 if (counting) {
4075 btrfs_release_path(path);
4076 counting = false;
4077 goto again;
4078 }
4079error:
4080 btrfs_free_path(path);
4081 if (enospc_errors) {
4082 btrfs_info(fs_info, "%d enospc errors during balance",
4083 enospc_errors);
4084 if (!ret)
4085 ret = -ENOSPC;
4086 }
4087
4088 return ret;
4089}
4090
4091/*
4092 * See if a given profile is valid and reduced.
4093 *
4094 * @flags: profile to validate
4095 * @extended: if true @flags is treated as an extended profile
4096 */
4097static int alloc_profile_is_valid(u64 flags, int extended)
4098{
4099 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4100 BTRFS_BLOCK_GROUP_PROFILE_MASK);
4101
4102 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4103
4104 /* 1) check that all other bits are zeroed */
4105 if (flags & ~mask)
4106 return 0;
4107
4108 /* 2) see if profile is reduced */
4109 if (flags == 0)
4110 return !extended; /* "0" is valid for usual profiles */
4111
4112 return has_single_bit_set(flags);
4113}
4114
4115/*
4116 * Validate target profile against allowed profiles and return true if it's OK.
4117 * Otherwise print the error message and return false.
4118 */
4119static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4120 const struct btrfs_balance_args *bargs,
4121 u64 allowed, const char *type)
4122{
4123 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4124 return true;
4125
4126 /* Profile is valid and does not have bits outside of the allowed set */
4127 if (alloc_profile_is_valid(bargs->target, 1) &&
4128 (bargs->target & ~allowed) == 0)
4129 return true;
4130
4131 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4132 type, btrfs_bg_type_to_raid_name(bargs->target));
4133 return false;
4134}
4135
4136/*
4137 * Fill @buf with textual description of balance filter flags @bargs, up to
4138 * @size_buf including the terminating null. The output may be trimmed if it
4139 * does not fit into the provided buffer.
4140 */
4141static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4142 u32 size_buf)
4143{
4144 int ret;
4145 u32 size_bp = size_buf;
4146 char *bp = buf;
4147 u64 flags = bargs->flags;
4148 char tmp_buf[128] = {'\0'};
4149
4150 if (!flags)
4151 return;
4152
4153#define CHECK_APPEND_NOARG(a) \
4154 do { \
4155 ret = snprintf(bp, size_bp, (a)); \
4156 if (ret < 0 || ret >= size_bp) \
4157 goto out_overflow; \
4158 size_bp -= ret; \
4159 bp += ret; \
4160 } while (0)
4161
4162#define CHECK_APPEND_1ARG(a, v1) \
4163 do { \
4164 ret = snprintf(bp, size_bp, (a), (v1)); \
4165 if (ret < 0 || ret >= size_bp) \
4166 goto out_overflow; \
4167 size_bp -= ret; \
4168 bp += ret; \
4169 } while (0)
4170
4171#define CHECK_APPEND_2ARG(a, v1, v2) \
4172 do { \
4173 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
4174 if (ret < 0 || ret >= size_bp) \
4175 goto out_overflow; \
4176 size_bp -= ret; \
4177 bp += ret; \
4178 } while (0)
4179
4180 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4181 CHECK_APPEND_1ARG("convert=%s,",
4182 btrfs_bg_type_to_raid_name(bargs->target));
4183
4184 if (flags & BTRFS_BALANCE_ARGS_SOFT)
4185 CHECK_APPEND_NOARG("soft,");
4186
4187 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4188 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4189 sizeof(tmp_buf));
4190 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4191 }
4192
4193 if (flags & BTRFS_BALANCE_ARGS_USAGE)
4194 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4195
4196 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4197 CHECK_APPEND_2ARG("usage=%u..%u,",
4198 bargs->usage_min, bargs->usage_max);
4199
4200 if (flags & BTRFS_BALANCE_ARGS_DEVID)
4201 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4202
4203 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4204 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4205 bargs->pstart, bargs->pend);
4206
4207 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4208 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4209 bargs->vstart, bargs->vend);
4210
4211 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4212 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4213
4214 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4215 CHECK_APPEND_2ARG("limit=%u..%u,",
4216 bargs->limit_min, bargs->limit_max);
4217
4218 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4219 CHECK_APPEND_2ARG("stripes=%u..%u,",
4220 bargs->stripes_min, bargs->stripes_max);
4221
4222#undef CHECK_APPEND_2ARG
4223#undef CHECK_APPEND_1ARG
4224#undef CHECK_APPEND_NOARG
4225
4226out_overflow:
4227
4228 if (size_bp < size_buf)
4229 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4230 else
4231 buf[0] = '\0';
4232}
4233
4234static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4235{
4236 u32 size_buf = 1024;
4237 char tmp_buf[192] = {'\0'};
4238 char *buf;
4239 char *bp;
4240 u32 size_bp = size_buf;
4241 int ret;
4242 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4243
4244 buf = kzalloc(size_buf, GFP_KERNEL);
4245 if (!buf)
4246 return;
4247
4248 bp = buf;
4249
4250#define CHECK_APPEND_1ARG(a, v1) \
4251 do { \
4252 ret = snprintf(bp, size_bp, (a), (v1)); \
4253 if (ret < 0 || ret >= size_bp) \
4254 goto out_overflow; \
4255 size_bp -= ret; \
4256 bp += ret; \
4257 } while (0)
4258
4259 if (bctl->flags & BTRFS_BALANCE_FORCE)
4260 CHECK_APPEND_1ARG("%s", "-f ");
4261
4262 if (bctl->flags & BTRFS_BALANCE_DATA) {
4263 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4264 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4265 }
4266
4267 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4268 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4269 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4270 }
4271
4272 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4273 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4274 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4275 }
4276
4277#undef CHECK_APPEND_1ARG
4278
4279out_overflow:
4280
4281 if (size_bp < size_buf)
4282 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4283 btrfs_info(fs_info, "balance: %s %s",
4284 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4285 "resume" : "start", buf);
4286
4287 kfree(buf);
4288}
4289
4290/*
4291 * Should be called with balance mutexe held
4292 */
4293int btrfs_balance(struct btrfs_fs_info *fs_info,
4294 struct btrfs_balance_control *bctl,
4295 struct btrfs_ioctl_balance_args *bargs)
4296{
4297 u64 meta_target, data_target;
4298 u64 allowed;
4299 int mixed = 0;
4300 int ret;
4301 u64 num_devices;
4302 unsigned seq;
4303 bool reducing_redundancy;
4304 bool paused = false;
4305 int i;
4306
4307 if (btrfs_fs_closing(fs_info) ||
4308 atomic_read(&fs_info->balance_pause_req) ||
4309 btrfs_should_cancel_balance(fs_info)) {
4310 ret = -EINVAL;
4311 goto out;
4312 }
4313
4314 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4315 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4316 mixed = 1;
4317
4318 /*
4319 * In case of mixed groups both data and meta should be picked,
4320 * and identical options should be given for both of them.
4321 */
4322 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4323 if (mixed && (bctl->flags & allowed)) {
4324 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4325 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4326 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4327 btrfs_err(fs_info,
4328 "balance: mixed groups data and metadata options must be the same");
4329 ret = -EINVAL;
4330 goto out;
4331 }
4332 }
4333
4334 /*
4335 * rw_devices will not change at the moment, device add/delete/replace
4336 * are exclusive
4337 */
4338 num_devices = fs_info->fs_devices->rw_devices;
4339
4340 /*
4341 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4342 * special bit for it, to make it easier to distinguish. Thus we need
4343 * to set it manually, or balance would refuse the profile.
4344 */
4345 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4346 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4347 if (num_devices >= btrfs_raid_array[i].devs_min)
4348 allowed |= btrfs_raid_array[i].bg_flag;
4349
4350 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4351 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4352 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4353 ret = -EINVAL;
4354 goto out;
4355 }
4356
4357 /*
4358 * Allow to reduce metadata or system integrity only if force set for
4359 * profiles with redundancy (copies, parity)
4360 */
4361 allowed = 0;
4362 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4363 if (btrfs_raid_array[i].ncopies >= 2 ||
4364 btrfs_raid_array[i].tolerated_failures >= 1)
4365 allowed |= btrfs_raid_array[i].bg_flag;
4366 }
4367 do {
4368 seq = read_seqbegin(&fs_info->profiles_lock);
4369
4370 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4371 (fs_info->avail_system_alloc_bits & allowed) &&
4372 !(bctl->sys.target & allowed)) ||
4373 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4374 (fs_info->avail_metadata_alloc_bits & allowed) &&
4375 !(bctl->meta.target & allowed)))
4376 reducing_redundancy = true;
4377 else
4378 reducing_redundancy = false;
4379
4380 /* if we're not converting, the target field is uninitialized */
4381 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4382 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4383 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4384 bctl->data.target : fs_info->avail_data_alloc_bits;
4385 } while (read_seqretry(&fs_info->profiles_lock, seq));
4386
4387 if (reducing_redundancy) {
4388 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4389 btrfs_info(fs_info,
4390 "balance: force reducing metadata redundancy");
4391 } else {
4392 btrfs_err(fs_info,
4393 "balance: reduces metadata redundancy, use --force if you want this");
4394 ret = -EINVAL;
4395 goto out;
4396 }
4397 }
4398
4399 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4400 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4401 btrfs_warn(fs_info,
4402 "balance: metadata profile %s has lower redundancy than data profile %s",
4403 btrfs_bg_type_to_raid_name(meta_target),
4404 btrfs_bg_type_to_raid_name(data_target));
4405 }
4406
4407 ret = insert_balance_item(fs_info, bctl);
4408 if (ret && ret != -EEXIST)
4409 goto out;
4410
4411 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4412 BUG_ON(ret == -EEXIST);
4413 BUG_ON(fs_info->balance_ctl);
4414 spin_lock(&fs_info->balance_lock);
4415 fs_info->balance_ctl = bctl;
4416 spin_unlock(&fs_info->balance_lock);
4417 } else {
4418 BUG_ON(ret != -EEXIST);
4419 spin_lock(&fs_info->balance_lock);
4420 update_balance_args(bctl);
4421 spin_unlock(&fs_info->balance_lock);
4422 }
4423
4424 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4425 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4426 describe_balance_start_or_resume(fs_info);
4427 mutex_unlock(&fs_info->balance_mutex);
4428
4429 ret = __btrfs_balance(fs_info);
4430
4431 mutex_lock(&fs_info->balance_mutex);
4432 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4433 btrfs_info(fs_info, "balance: paused");
4434 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4435 paused = true;
4436 }
4437 /*
4438 * Balance can be canceled by:
4439 *
4440 * - Regular cancel request
4441 * Then ret == -ECANCELED and balance_cancel_req > 0
4442 *
4443 * - Fatal signal to "btrfs" process
4444 * Either the signal caught by wait_reserve_ticket() and callers
4445 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4446 * got -ECANCELED.
4447 * Either way, in this case balance_cancel_req = 0, and
4448 * ret == -EINTR or ret == -ECANCELED.
4449 *
4450 * So here we only check the return value to catch canceled balance.
4451 */
4452 else if (ret == -ECANCELED || ret == -EINTR)
4453 btrfs_info(fs_info, "balance: canceled");
4454 else
4455 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4456
4457 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4458
4459 if (bargs) {
4460 memset(bargs, 0, sizeof(*bargs));
4461 btrfs_update_ioctl_balance_args(fs_info, bargs);
4462 }
4463
4464 /* We didn't pause, we can clean everything up. */
4465 if (!paused) {
4466 reset_balance_state(fs_info);
4467 btrfs_exclop_finish(fs_info);
4468 }
4469
4470 wake_up(&fs_info->balance_wait_q);
4471
4472 return ret;
4473out:
4474 if (bctl->flags & BTRFS_BALANCE_RESUME)
4475 reset_balance_state(fs_info);
4476 else
4477 kfree(bctl);
4478 btrfs_exclop_finish(fs_info);
4479
4480 return ret;
4481}
4482
4483static int balance_kthread(void *data)
4484{
4485 struct btrfs_fs_info *fs_info = data;
4486 int ret = 0;
4487
4488 sb_start_write(fs_info->sb);
4489 mutex_lock(&fs_info->balance_mutex);
4490 if (fs_info->balance_ctl)
4491 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4492 mutex_unlock(&fs_info->balance_mutex);
4493 sb_end_write(fs_info->sb);
4494
4495 return ret;
4496}
4497
4498int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4499{
4500 struct task_struct *tsk;
4501
4502 mutex_lock(&fs_info->balance_mutex);
4503 if (!fs_info->balance_ctl) {
4504 mutex_unlock(&fs_info->balance_mutex);
4505 return 0;
4506 }
4507 mutex_unlock(&fs_info->balance_mutex);
4508
4509 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4510 btrfs_info(fs_info, "balance: resume skipped");
4511 return 0;
4512 }
4513
4514 spin_lock(&fs_info->super_lock);
4515 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4516 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4517 spin_unlock(&fs_info->super_lock);
4518 /*
4519 * A ro->rw remount sequence should continue with the paused balance
4520 * regardless of who pauses it, system or the user as of now, so set
4521 * the resume flag.
4522 */
4523 spin_lock(&fs_info->balance_lock);
4524 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4525 spin_unlock(&fs_info->balance_lock);
4526
4527 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4528 return PTR_ERR_OR_ZERO(tsk);
4529}
4530
4531int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4532{
4533 struct btrfs_balance_control *bctl;
4534 struct btrfs_balance_item *item;
4535 struct btrfs_disk_balance_args disk_bargs;
4536 struct btrfs_path *path;
4537 struct extent_buffer *leaf;
4538 struct btrfs_key key;
4539 int ret;
4540
4541 path = btrfs_alloc_path();
4542 if (!path)
4543 return -ENOMEM;
4544
4545 key.objectid = BTRFS_BALANCE_OBJECTID;
4546 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4547 key.offset = 0;
4548
4549 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4550 if (ret < 0)
4551 goto out;
4552 if (ret > 0) { /* ret = -ENOENT; */
4553 ret = 0;
4554 goto out;
4555 }
4556
4557 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4558 if (!bctl) {
4559 ret = -ENOMEM;
4560 goto out;
4561 }
4562
4563 leaf = path->nodes[0];
4564 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4565
4566 bctl->flags = btrfs_balance_flags(leaf, item);
4567 bctl->flags |= BTRFS_BALANCE_RESUME;
4568
4569 btrfs_balance_data(leaf, item, &disk_bargs);
4570 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4571 btrfs_balance_meta(leaf, item, &disk_bargs);
4572 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4573 btrfs_balance_sys(leaf, item, &disk_bargs);
4574 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4575
4576 /*
4577 * This should never happen, as the paused balance state is recovered
4578 * during mount without any chance of other exclusive ops to collide.
4579 *
4580 * This gives the exclusive op status to balance and keeps in paused
4581 * state until user intervention (cancel or umount). If the ownership
4582 * cannot be assigned, show a message but do not fail. The balance
4583 * is in a paused state and must have fs_info::balance_ctl properly
4584 * set up.
4585 */
4586 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4587 btrfs_warn(fs_info,
4588 "balance: cannot set exclusive op status, resume manually");
4589
4590 btrfs_release_path(path);
4591
4592 mutex_lock(&fs_info->balance_mutex);
4593 BUG_ON(fs_info->balance_ctl);
4594 spin_lock(&fs_info->balance_lock);
4595 fs_info->balance_ctl = bctl;
4596 spin_unlock(&fs_info->balance_lock);
4597 mutex_unlock(&fs_info->balance_mutex);
4598out:
4599 btrfs_free_path(path);
4600 return ret;
4601}
4602
4603int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4604{
4605 int ret = 0;
4606
4607 mutex_lock(&fs_info->balance_mutex);
4608 if (!fs_info->balance_ctl) {
4609 mutex_unlock(&fs_info->balance_mutex);
4610 return -ENOTCONN;
4611 }
4612
4613 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4614 atomic_inc(&fs_info->balance_pause_req);
4615 mutex_unlock(&fs_info->balance_mutex);
4616
4617 wait_event(fs_info->balance_wait_q,
4618 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4619
4620 mutex_lock(&fs_info->balance_mutex);
4621 /* we are good with balance_ctl ripped off from under us */
4622 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4623 atomic_dec(&fs_info->balance_pause_req);
4624 } else {
4625 ret = -ENOTCONN;
4626 }
4627
4628 mutex_unlock(&fs_info->balance_mutex);
4629 return ret;
4630}
4631
4632int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4633{
4634 mutex_lock(&fs_info->balance_mutex);
4635 if (!fs_info->balance_ctl) {
4636 mutex_unlock(&fs_info->balance_mutex);
4637 return -ENOTCONN;
4638 }
4639
4640 /*
4641 * A paused balance with the item stored on disk can be resumed at
4642 * mount time if the mount is read-write. Otherwise it's still paused
4643 * and we must not allow cancelling as it deletes the item.
4644 */
4645 if (sb_rdonly(fs_info->sb)) {
4646 mutex_unlock(&fs_info->balance_mutex);
4647 return -EROFS;
4648 }
4649
4650 atomic_inc(&fs_info->balance_cancel_req);
4651 /*
4652 * if we are running just wait and return, balance item is
4653 * deleted in btrfs_balance in this case
4654 */
4655 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4656 mutex_unlock(&fs_info->balance_mutex);
4657 wait_event(fs_info->balance_wait_q,
4658 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4659 mutex_lock(&fs_info->balance_mutex);
4660 } else {
4661 mutex_unlock(&fs_info->balance_mutex);
4662 /*
4663 * Lock released to allow other waiters to continue, we'll
4664 * reexamine the status again.
4665 */
4666 mutex_lock(&fs_info->balance_mutex);
4667
4668 if (fs_info->balance_ctl) {
4669 reset_balance_state(fs_info);
4670 btrfs_exclop_finish(fs_info);
4671 btrfs_info(fs_info, "balance: canceled");
4672 }
4673 }
4674
4675 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4676 atomic_dec(&fs_info->balance_cancel_req);
4677 mutex_unlock(&fs_info->balance_mutex);
4678 return 0;
4679}
4680
4681int btrfs_uuid_scan_kthread(void *data)
4682{
4683 struct btrfs_fs_info *fs_info = data;
4684 struct btrfs_root *root = fs_info->tree_root;
4685 struct btrfs_key key;
4686 struct btrfs_path *path = NULL;
4687 int ret = 0;
4688 struct extent_buffer *eb;
4689 int slot;
4690 struct btrfs_root_item root_item;
4691 u32 item_size;
4692 struct btrfs_trans_handle *trans = NULL;
4693 bool closing = false;
4694
4695 path = btrfs_alloc_path();
4696 if (!path) {
4697 ret = -ENOMEM;
4698 goto out;
4699 }
4700
4701 key.objectid = 0;
4702 key.type = BTRFS_ROOT_ITEM_KEY;
4703 key.offset = 0;
4704
4705 while (1) {
4706 if (btrfs_fs_closing(fs_info)) {
4707 closing = true;
4708 break;
4709 }
4710 ret = btrfs_search_forward(root, &key, path,
4711 BTRFS_OLDEST_GENERATION);
4712 if (ret) {
4713 if (ret > 0)
4714 ret = 0;
4715 break;
4716 }
4717
4718 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4719 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4720 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4721 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4722 goto skip;
4723
4724 eb = path->nodes[0];
4725 slot = path->slots[0];
4726 item_size = btrfs_item_size(eb, slot);
4727 if (item_size < sizeof(root_item))
4728 goto skip;
4729
4730 read_extent_buffer(eb, &root_item,
4731 btrfs_item_ptr_offset(eb, slot),
4732 (int)sizeof(root_item));
4733 if (btrfs_root_refs(&root_item) == 0)
4734 goto skip;
4735
4736 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4737 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4738 if (trans)
4739 goto update_tree;
4740
4741 btrfs_release_path(path);
4742 /*
4743 * 1 - subvol uuid item
4744 * 1 - received_subvol uuid item
4745 */
4746 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4747 if (IS_ERR(trans)) {
4748 ret = PTR_ERR(trans);
4749 break;
4750 }
4751 continue;
4752 } else {
4753 goto skip;
4754 }
4755update_tree:
4756 btrfs_release_path(path);
4757 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4758 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4759 BTRFS_UUID_KEY_SUBVOL,
4760 key.objectid);
4761 if (ret < 0) {
4762 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4763 ret);
4764 break;
4765 }
4766 }
4767
4768 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4769 ret = btrfs_uuid_tree_add(trans,
4770 root_item.received_uuid,
4771 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4772 key.objectid);
4773 if (ret < 0) {
4774 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4775 ret);
4776 break;
4777 }
4778 }
4779
4780skip:
4781 btrfs_release_path(path);
4782 if (trans) {
4783 ret = btrfs_end_transaction(trans);
4784 trans = NULL;
4785 if (ret)
4786 break;
4787 }
4788
4789 if (key.offset < (u64)-1) {
4790 key.offset++;
4791 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4792 key.offset = 0;
4793 key.type = BTRFS_ROOT_ITEM_KEY;
4794 } else if (key.objectid < (u64)-1) {
4795 key.offset = 0;
4796 key.type = BTRFS_ROOT_ITEM_KEY;
4797 key.objectid++;
4798 } else {
4799 break;
4800 }
4801 cond_resched();
4802 }
4803
4804out:
4805 btrfs_free_path(path);
4806 if (trans && !IS_ERR(trans))
4807 btrfs_end_transaction(trans);
4808 if (ret)
4809 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4810 else if (!closing)
4811 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4812 up(&fs_info->uuid_tree_rescan_sem);
4813 return 0;
4814}
4815
4816int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4817{
4818 struct btrfs_trans_handle *trans;
4819 struct btrfs_root *tree_root = fs_info->tree_root;
4820 struct btrfs_root *uuid_root;
4821 struct task_struct *task;
4822 int ret;
4823
4824 /*
4825 * 1 - root node
4826 * 1 - root item
4827 */
4828 trans = btrfs_start_transaction(tree_root, 2);
4829 if (IS_ERR(trans))
4830 return PTR_ERR(trans);
4831
4832 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4833 if (IS_ERR(uuid_root)) {
4834 ret = PTR_ERR(uuid_root);
4835 btrfs_abort_transaction(trans, ret);
4836 btrfs_end_transaction(trans);
4837 return ret;
4838 }
4839
4840 fs_info->uuid_root = uuid_root;
4841
4842 ret = btrfs_commit_transaction(trans);
4843 if (ret)
4844 return ret;
4845
4846 down(&fs_info->uuid_tree_rescan_sem);
4847 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4848 if (IS_ERR(task)) {
4849 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4850 btrfs_warn(fs_info, "failed to start uuid_scan task");
4851 up(&fs_info->uuid_tree_rescan_sem);
4852 return PTR_ERR(task);
4853 }
4854
4855 return 0;
4856}
4857
4858/*
4859 * shrinking a device means finding all of the device extents past
4860 * the new size, and then following the back refs to the chunks.
4861 * The chunk relocation code actually frees the device extent
4862 */
4863int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4864{
4865 struct btrfs_fs_info *fs_info = device->fs_info;
4866 struct btrfs_root *root = fs_info->dev_root;
4867 struct btrfs_trans_handle *trans;
4868 struct btrfs_dev_extent *dev_extent = NULL;
4869 struct btrfs_path *path;
4870 u64 length;
4871 u64 chunk_offset;
4872 int ret;
4873 int slot;
4874 int failed = 0;
4875 bool retried = false;
4876 struct extent_buffer *l;
4877 struct btrfs_key key;
4878 struct btrfs_super_block *super_copy = fs_info->super_copy;
4879 u64 old_total = btrfs_super_total_bytes(super_copy);
4880 u64 old_size = btrfs_device_get_total_bytes(device);
4881 u64 diff;
4882 u64 start;
4883 u64 free_diff = 0;
4884
4885 new_size = round_down(new_size, fs_info->sectorsize);
4886 start = new_size;
4887 diff = round_down(old_size - new_size, fs_info->sectorsize);
4888
4889 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4890 return -EINVAL;
4891
4892 path = btrfs_alloc_path();
4893 if (!path)
4894 return -ENOMEM;
4895
4896 path->reada = READA_BACK;
4897
4898 trans = btrfs_start_transaction(root, 0);
4899 if (IS_ERR(trans)) {
4900 btrfs_free_path(path);
4901 return PTR_ERR(trans);
4902 }
4903
4904 mutex_lock(&fs_info->chunk_mutex);
4905
4906 btrfs_device_set_total_bytes(device, new_size);
4907 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4908 device->fs_devices->total_rw_bytes -= diff;
4909
4910 /*
4911 * The new free_chunk_space is new_size - used, so we have to
4912 * subtract the delta of the old free_chunk_space which included
4913 * old_size - used. If used > new_size then just subtract this
4914 * entire device's free space.
4915 */
4916 if (device->bytes_used < new_size)
4917 free_diff = (old_size - device->bytes_used) -
4918 (new_size - device->bytes_used);
4919 else
4920 free_diff = old_size - device->bytes_used;
4921 atomic64_sub(free_diff, &fs_info->free_chunk_space);
4922 }
4923
4924 /*
4925 * Once the device's size has been set to the new size, ensure all
4926 * in-memory chunks are synced to disk so that the loop below sees them
4927 * and relocates them accordingly.
4928 */
4929 if (contains_pending_extent(device, &start, diff)) {
4930 mutex_unlock(&fs_info->chunk_mutex);
4931 ret = btrfs_commit_transaction(trans);
4932 if (ret)
4933 goto done;
4934 } else {
4935 mutex_unlock(&fs_info->chunk_mutex);
4936 btrfs_end_transaction(trans);
4937 }
4938
4939again:
4940 key.objectid = device->devid;
4941 key.offset = (u64)-1;
4942 key.type = BTRFS_DEV_EXTENT_KEY;
4943
4944 do {
4945 mutex_lock(&fs_info->reclaim_bgs_lock);
4946 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4947 if (ret < 0) {
4948 mutex_unlock(&fs_info->reclaim_bgs_lock);
4949 goto done;
4950 }
4951
4952 ret = btrfs_previous_item(root, path, 0, key.type);
4953 if (ret) {
4954 mutex_unlock(&fs_info->reclaim_bgs_lock);
4955 if (ret < 0)
4956 goto done;
4957 ret = 0;
4958 btrfs_release_path(path);
4959 break;
4960 }
4961
4962 l = path->nodes[0];
4963 slot = path->slots[0];
4964 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4965
4966 if (key.objectid != device->devid) {
4967 mutex_unlock(&fs_info->reclaim_bgs_lock);
4968 btrfs_release_path(path);
4969 break;
4970 }
4971
4972 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4973 length = btrfs_dev_extent_length(l, dev_extent);
4974
4975 if (key.offset + length <= new_size) {
4976 mutex_unlock(&fs_info->reclaim_bgs_lock);
4977 btrfs_release_path(path);
4978 break;
4979 }
4980
4981 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4982 btrfs_release_path(path);
4983
4984 /*
4985 * We may be relocating the only data chunk we have,
4986 * which could potentially end up with losing data's
4987 * raid profile, so lets allocate an empty one in
4988 * advance.
4989 */
4990 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4991 if (ret < 0) {
4992 mutex_unlock(&fs_info->reclaim_bgs_lock);
4993 goto done;
4994 }
4995
4996 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4997 mutex_unlock(&fs_info->reclaim_bgs_lock);
4998 if (ret == -ENOSPC) {
4999 failed++;
5000 } else if (ret) {
5001 if (ret == -ETXTBSY) {
5002 btrfs_warn(fs_info,
5003 "could not shrink block group %llu due to active swapfile",
5004 chunk_offset);
5005 }
5006 goto done;
5007 }
5008 } while (key.offset-- > 0);
5009
5010 if (failed && !retried) {
5011 failed = 0;
5012 retried = true;
5013 goto again;
5014 } else if (failed && retried) {
5015 ret = -ENOSPC;
5016 goto done;
5017 }
5018
5019 /* Shrinking succeeded, else we would be at "done". */
5020 trans = btrfs_start_transaction(root, 0);
5021 if (IS_ERR(trans)) {
5022 ret = PTR_ERR(trans);
5023 goto done;
5024 }
5025
5026 mutex_lock(&fs_info->chunk_mutex);
5027 /* Clear all state bits beyond the shrunk device size */
5028 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
5029 CHUNK_STATE_MASK);
5030
5031 btrfs_device_set_disk_total_bytes(device, new_size);
5032 if (list_empty(&device->post_commit_list))
5033 list_add_tail(&device->post_commit_list,
5034 &trans->transaction->dev_update_list);
5035
5036 WARN_ON(diff > old_total);
5037 btrfs_set_super_total_bytes(super_copy,
5038 round_down(old_total - diff, fs_info->sectorsize));
5039 mutex_unlock(&fs_info->chunk_mutex);
5040
5041 btrfs_reserve_chunk_metadata(trans, false);
5042 /* Now btrfs_update_device() will change the on-disk size. */
5043 ret = btrfs_update_device(trans, device);
5044 btrfs_trans_release_chunk_metadata(trans);
5045 if (ret < 0) {
5046 btrfs_abort_transaction(trans, ret);
5047 btrfs_end_transaction(trans);
5048 } else {
5049 ret = btrfs_commit_transaction(trans);
5050 }
5051done:
5052 btrfs_free_path(path);
5053 if (ret) {
5054 mutex_lock(&fs_info->chunk_mutex);
5055 btrfs_device_set_total_bytes(device, old_size);
5056 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5057 device->fs_devices->total_rw_bytes += diff;
5058 atomic64_add(free_diff, &fs_info->free_chunk_space);
5059 }
5060 mutex_unlock(&fs_info->chunk_mutex);
5061 }
5062 return ret;
5063}
5064
5065static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5066 struct btrfs_key *key,
5067 struct btrfs_chunk *chunk, int item_size)
5068{
5069 struct btrfs_super_block *super_copy = fs_info->super_copy;
5070 struct btrfs_disk_key disk_key;
5071 u32 array_size;
5072 u8 *ptr;
5073
5074 lockdep_assert_held(&fs_info->chunk_mutex);
5075
5076 array_size = btrfs_super_sys_array_size(super_copy);
5077 if (array_size + item_size + sizeof(disk_key)
5078 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5079 return -EFBIG;
5080
5081 ptr = super_copy->sys_chunk_array + array_size;
5082 btrfs_cpu_key_to_disk(&disk_key, key);
5083 memcpy(ptr, &disk_key, sizeof(disk_key));
5084 ptr += sizeof(disk_key);
5085 memcpy(ptr, chunk, item_size);
5086 item_size += sizeof(disk_key);
5087 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5088
5089 return 0;
5090}
5091
5092/*
5093 * sort the devices in descending order by max_avail, total_avail
5094 */
5095static int btrfs_cmp_device_info(const void *a, const void *b)
5096{
5097 const struct btrfs_device_info *di_a = a;
5098 const struct btrfs_device_info *di_b = b;
5099
5100 if (di_a->max_avail > di_b->max_avail)
5101 return -1;
5102 if (di_a->max_avail < di_b->max_avail)
5103 return 1;
5104 if (di_a->total_avail > di_b->total_avail)
5105 return -1;
5106 if (di_a->total_avail < di_b->total_avail)
5107 return 1;
5108 return 0;
5109}
5110
5111static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5112{
5113 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5114 return;
5115
5116 btrfs_set_fs_incompat(info, RAID56);
5117}
5118
5119static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5120{
5121 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5122 return;
5123
5124 btrfs_set_fs_incompat(info, RAID1C34);
5125}
5126
5127/*
5128 * Structure used internally for btrfs_create_chunk() function.
5129 * Wraps needed parameters.
5130 */
5131struct alloc_chunk_ctl {
5132 u64 start;
5133 u64 type;
5134 /* Total number of stripes to allocate */
5135 int num_stripes;
5136 /* sub_stripes info for map */
5137 int sub_stripes;
5138 /* Stripes per device */
5139 int dev_stripes;
5140 /* Maximum number of devices to use */
5141 int devs_max;
5142 /* Minimum number of devices to use */
5143 int devs_min;
5144 /* ndevs has to be a multiple of this */
5145 int devs_increment;
5146 /* Number of copies */
5147 int ncopies;
5148 /* Number of stripes worth of bytes to store parity information */
5149 int nparity;
5150 u64 max_stripe_size;
5151 u64 max_chunk_size;
5152 u64 dev_extent_min;
5153 u64 stripe_size;
5154 u64 chunk_size;
5155 int ndevs;
5156};
5157
5158static void init_alloc_chunk_ctl_policy_regular(
5159 struct btrfs_fs_devices *fs_devices,
5160 struct alloc_chunk_ctl *ctl)
5161{
5162 struct btrfs_space_info *space_info;
5163
5164 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5165 ASSERT(space_info);
5166
5167 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5168 ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
5169
5170 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5171 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5172
5173 /* We don't want a chunk larger than 10% of writable space */
5174 ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
5175 ctl->max_chunk_size);
5176 ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes);
5177}
5178
5179static void init_alloc_chunk_ctl_policy_zoned(
5180 struct btrfs_fs_devices *fs_devices,
5181 struct alloc_chunk_ctl *ctl)
5182{
5183 u64 zone_size = fs_devices->fs_info->zone_size;
5184 u64 limit;
5185 int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5186 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5187 u64 min_chunk_size = min_data_stripes * zone_size;
5188 u64 type = ctl->type;
5189
5190 ctl->max_stripe_size = zone_size;
5191 if (type & BTRFS_BLOCK_GROUP_DATA) {
5192 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5193 zone_size);
5194 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5195 ctl->max_chunk_size = ctl->max_stripe_size;
5196 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5197 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5198 ctl->devs_max = min_t(int, ctl->devs_max,
5199 BTRFS_MAX_DEVS_SYS_CHUNK);
5200 } else {
5201 BUG();
5202 }
5203
5204 /* We don't want a chunk larger than 10% of writable space */
5205 limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
5206 zone_size),
5207 min_chunk_size);
5208 ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5209 ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5210}
5211
5212static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5213 struct alloc_chunk_ctl *ctl)
5214{
5215 int index = btrfs_bg_flags_to_raid_index(ctl->type);
5216
5217 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5218 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5219 ctl->devs_max = btrfs_raid_array[index].devs_max;
5220 if (!ctl->devs_max)
5221 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5222 ctl->devs_min = btrfs_raid_array[index].devs_min;
5223 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5224 ctl->ncopies = btrfs_raid_array[index].ncopies;
5225 ctl->nparity = btrfs_raid_array[index].nparity;
5226 ctl->ndevs = 0;
5227
5228 switch (fs_devices->chunk_alloc_policy) {
5229 case BTRFS_CHUNK_ALLOC_REGULAR:
5230 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5231 break;
5232 case BTRFS_CHUNK_ALLOC_ZONED:
5233 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5234 break;
5235 default:
5236 BUG();
5237 }
5238}
5239
5240static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5241 struct alloc_chunk_ctl *ctl,
5242 struct btrfs_device_info *devices_info)
5243{
5244 struct btrfs_fs_info *info = fs_devices->fs_info;
5245 struct btrfs_device *device;
5246 u64 total_avail;
5247 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5248 int ret;
5249 int ndevs = 0;
5250 u64 max_avail;
5251 u64 dev_offset;
5252
5253 /*
5254 * in the first pass through the devices list, we gather information
5255 * about the available holes on each device.
5256 */
5257 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5258 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5259 WARN(1, KERN_ERR
5260 "BTRFS: read-only device in alloc_list\n");
5261 continue;
5262 }
5263
5264 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5265 &device->dev_state) ||
5266 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5267 continue;
5268
5269 if (device->total_bytes > device->bytes_used)
5270 total_avail = device->total_bytes - device->bytes_used;
5271 else
5272 total_avail = 0;
5273
5274 /* If there is no space on this device, skip it. */
5275 if (total_avail < ctl->dev_extent_min)
5276 continue;
5277
5278 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5279 &max_avail);
5280 if (ret && ret != -ENOSPC)
5281 return ret;
5282
5283 if (ret == 0)
5284 max_avail = dev_extent_want;
5285
5286 if (max_avail < ctl->dev_extent_min) {
5287 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5288 btrfs_debug(info,
5289 "%s: devid %llu has no free space, have=%llu want=%llu",
5290 __func__, device->devid, max_avail,
5291 ctl->dev_extent_min);
5292 continue;
5293 }
5294
5295 if (ndevs == fs_devices->rw_devices) {
5296 WARN(1, "%s: found more than %llu devices\n",
5297 __func__, fs_devices->rw_devices);
5298 break;
5299 }
5300 devices_info[ndevs].dev_offset = dev_offset;
5301 devices_info[ndevs].max_avail = max_avail;
5302 devices_info[ndevs].total_avail = total_avail;
5303 devices_info[ndevs].dev = device;
5304 ++ndevs;
5305 }
5306 ctl->ndevs = ndevs;
5307
5308 /*
5309 * now sort the devices by hole size / available space
5310 */
5311 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5312 btrfs_cmp_device_info, NULL);
5313
5314 return 0;
5315}
5316
5317static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5318 struct btrfs_device_info *devices_info)
5319{
5320 /* Number of stripes that count for block group size */
5321 int data_stripes;
5322
5323 /*
5324 * The primary goal is to maximize the number of stripes, so use as
5325 * many devices as possible, even if the stripes are not maximum sized.
5326 *
5327 * The DUP profile stores more than one stripe per device, the
5328 * max_avail is the total size so we have to adjust.
5329 */
5330 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5331 ctl->dev_stripes);
5332 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5333
5334 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5335 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5336
5337 /*
5338 * Use the number of data stripes to figure out how big this chunk is
5339 * really going to be in terms of logical address space, and compare
5340 * that answer with the max chunk size. If it's higher, we try to
5341 * reduce stripe_size.
5342 */
5343 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5344 /*
5345 * Reduce stripe_size, round it up to a 16MB boundary again and
5346 * then use it, unless it ends up being even bigger than the
5347 * previous value we had already.
5348 */
5349 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5350 data_stripes), SZ_16M),
5351 ctl->stripe_size);
5352 }
5353
5354 /* Stripe size should not go beyond 1G. */
5355 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5356
5357 /* Align to BTRFS_STRIPE_LEN */
5358 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5359 ctl->chunk_size = ctl->stripe_size * data_stripes;
5360
5361 return 0;
5362}
5363
5364static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5365 struct btrfs_device_info *devices_info)
5366{
5367 u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5368 /* Number of stripes that count for block group size */
5369 int data_stripes;
5370
5371 /*
5372 * It should hold because:
5373 * dev_extent_min == dev_extent_want == zone_size * dev_stripes
5374 */
5375 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5376
5377 ctl->stripe_size = zone_size;
5378 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5379 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5380
5381 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5382 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5383 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5384 ctl->stripe_size) + ctl->nparity,
5385 ctl->dev_stripes);
5386 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5387 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5388 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5389 }
5390
5391 ctl->chunk_size = ctl->stripe_size * data_stripes;
5392
5393 return 0;
5394}
5395
5396static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5397 struct alloc_chunk_ctl *ctl,
5398 struct btrfs_device_info *devices_info)
5399{
5400 struct btrfs_fs_info *info = fs_devices->fs_info;
5401
5402 /*
5403 * Round down to number of usable stripes, devs_increment can be any
5404 * number so we can't use round_down() that requires power of 2, while
5405 * rounddown is safe.
5406 */
5407 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5408
5409 if (ctl->ndevs < ctl->devs_min) {
5410 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5411 btrfs_debug(info,
5412 "%s: not enough devices with free space: have=%d minimum required=%d",
5413 __func__, ctl->ndevs, ctl->devs_min);
5414 }
5415 return -ENOSPC;
5416 }
5417
5418 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5419
5420 switch (fs_devices->chunk_alloc_policy) {
5421 case BTRFS_CHUNK_ALLOC_REGULAR:
5422 return decide_stripe_size_regular(ctl, devices_info);
5423 case BTRFS_CHUNK_ALLOC_ZONED:
5424 return decide_stripe_size_zoned(ctl, devices_info);
5425 default:
5426 BUG();
5427 }
5428}
5429
5430static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
5431{
5432 for (int i = 0; i < map->num_stripes; i++) {
5433 struct btrfs_io_stripe *stripe = &map->stripes[i];
5434 struct btrfs_device *device = stripe->dev;
5435
5436 set_extent_bit(&device->alloc_state, stripe->physical,
5437 stripe->physical + map->stripe_size - 1,
5438 bits | EXTENT_NOWAIT, NULL);
5439 }
5440}
5441
5442static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
5443{
5444 for (int i = 0; i < map->num_stripes; i++) {
5445 struct btrfs_io_stripe *stripe = &map->stripes[i];
5446 struct btrfs_device *device = stripe->dev;
5447
5448 __clear_extent_bit(&device->alloc_state, stripe->physical,
5449 stripe->physical + map->stripe_size - 1,
5450 bits | EXTENT_NOWAIT,
5451 NULL, NULL);
5452 }
5453}
5454
5455void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5456{
5457 write_lock(&fs_info->mapping_tree_lock);
5458 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5459 RB_CLEAR_NODE(&map->rb_node);
5460 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5461 write_unlock(&fs_info->mapping_tree_lock);
5462
5463 /* Once for the tree reference. */
5464 btrfs_free_chunk_map(map);
5465}
5466
5467EXPORT_FOR_TESTS
5468int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5469{
5470 struct rb_node **p;
5471 struct rb_node *parent = NULL;
5472 bool leftmost = true;
5473
5474 write_lock(&fs_info->mapping_tree_lock);
5475 p = &fs_info->mapping_tree.rb_root.rb_node;
5476 while (*p) {
5477 struct btrfs_chunk_map *entry;
5478
5479 parent = *p;
5480 entry = rb_entry(parent, struct btrfs_chunk_map, rb_node);
5481
5482 if (map->start < entry->start) {
5483 p = &(*p)->rb_left;
5484 } else if (map->start > entry->start) {
5485 p = &(*p)->rb_right;
5486 leftmost = false;
5487 } else {
5488 write_unlock(&fs_info->mapping_tree_lock);
5489 return -EEXIST;
5490 }
5491 }
5492 rb_link_node(&map->rb_node, parent, p);
5493 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost);
5494 chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
5495 chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
5496 write_unlock(&fs_info->mapping_tree_lock);
5497
5498 return 0;
5499}
5500
5501EXPORT_FOR_TESTS
5502struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp)
5503{
5504 struct btrfs_chunk_map *map;
5505
5506 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp);
5507 if (!map)
5508 return NULL;
5509
5510 refcount_set(&map->refs, 1);
5511 RB_CLEAR_NODE(&map->rb_node);
5512
5513 return map;
5514}
5515
5516struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp)
5517{
5518 const int size = btrfs_chunk_map_size(map->num_stripes);
5519 struct btrfs_chunk_map *clone;
5520
5521 clone = kmemdup(map, size, gfp);
5522 if (!clone)
5523 return NULL;
5524
5525 refcount_set(&clone->refs, 1);
5526 RB_CLEAR_NODE(&clone->rb_node);
5527
5528 return clone;
5529}
5530
5531static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5532 struct alloc_chunk_ctl *ctl,
5533 struct btrfs_device_info *devices_info)
5534{
5535 struct btrfs_fs_info *info = trans->fs_info;
5536 struct btrfs_chunk_map *map;
5537 struct btrfs_block_group *block_group;
5538 u64 start = ctl->start;
5539 u64 type = ctl->type;
5540 int ret;
5541 int i;
5542 int j;
5543
5544 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
5545 if (!map)
5546 return ERR_PTR(-ENOMEM);
5547
5548 map->start = start;
5549 map->chunk_len = ctl->chunk_size;
5550 map->stripe_size = ctl->stripe_size;
5551 map->type = type;
5552 map->io_align = BTRFS_STRIPE_LEN;
5553 map->io_width = BTRFS_STRIPE_LEN;
5554 map->sub_stripes = ctl->sub_stripes;
5555 map->num_stripes = ctl->num_stripes;
5556
5557 for (i = 0; i < ctl->ndevs; ++i) {
5558 for (j = 0; j < ctl->dev_stripes; ++j) {
5559 int s = i * ctl->dev_stripes + j;
5560 map->stripes[s].dev = devices_info[i].dev;
5561 map->stripes[s].physical = devices_info[i].dev_offset +
5562 j * ctl->stripe_size;
5563 }
5564 }
5565
5566 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5567
5568 ret = btrfs_add_chunk_map(info, map);
5569 if (ret) {
5570 btrfs_free_chunk_map(map);
5571 return ERR_PTR(ret);
5572 }
5573
5574 block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size);
5575 if (IS_ERR(block_group)) {
5576 btrfs_remove_chunk_map(info, map);
5577 return block_group;
5578 }
5579
5580 for (int i = 0; i < map->num_stripes; i++) {
5581 struct btrfs_device *dev = map->stripes[i].dev;
5582
5583 btrfs_device_set_bytes_used(dev,
5584 dev->bytes_used + ctl->stripe_size);
5585 if (list_empty(&dev->post_commit_list))
5586 list_add_tail(&dev->post_commit_list,
5587 &trans->transaction->dev_update_list);
5588 }
5589
5590 atomic64_sub(ctl->stripe_size * map->num_stripes,
5591 &info->free_chunk_space);
5592
5593 check_raid56_incompat_flag(info, type);
5594 check_raid1c34_incompat_flag(info, type);
5595
5596 return block_group;
5597}
5598
5599struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5600 u64 type)
5601{
5602 struct btrfs_fs_info *info = trans->fs_info;
5603 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5604 struct btrfs_device_info *devices_info = NULL;
5605 struct alloc_chunk_ctl ctl;
5606 struct btrfs_block_group *block_group;
5607 int ret;
5608
5609 lockdep_assert_held(&info->chunk_mutex);
5610
5611 if (!alloc_profile_is_valid(type, 0)) {
5612 ASSERT(0);
5613 return ERR_PTR(-EINVAL);
5614 }
5615
5616 if (list_empty(&fs_devices->alloc_list)) {
5617 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5618 btrfs_debug(info, "%s: no writable device", __func__);
5619 return ERR_PTR(-ENOSPC);
5620 }
5621
5622 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5623 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5624 ASSERT(0);
5625 return ERR_PTR(-EINVAL);
5626 }
5627
5628 ctl.start = find_next_chunk(info);
5629 ctl.type = type;
5630 init_alloc_chunk_ctl(fs_devices, &ctl);
5631
5632 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5633 GFP_NOFS);
5634 if (!devices_info)
5635 return ERR_PTR(-ENOMEM);
5636
5637 ret = gather_device_info(fs_devices, &ctl, devices_info);
5638 if (ret < 0) {
5639 block_group = ERR_PTR(ret);
5640 goto out;
5641 }
5642
5643 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5644 if (ret < 0) {
5645 block_group = ERR_PTR(ret);
5646 goto out;
5647 }
5648
5649 block_group = create_chunk(trans, &ctl, devices_info);
5650
5651out:
5652 kfree(devices_info);
5653 return block_group;
5654}
5655
5656/*
5657 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5658 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5659 * chunks.
5660 *
5661 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5662 * phases.
5663 */
5664int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5665 struct btrfs_block_group *bg)
5666{
5667 struct btrfs_fs_info *fs_info = trans->fs_info;
5668 struct btrfs_root *chunk_root = fs_info->chunk_root;
5669 struct btrfs_key key;
5670 struct btrfs_chunk *chunk;
5671 struct btrfs_stripe *stripe;
5672 struct btrfs_chunk_map *map;
5673 size_t item_size;
5674 int i;
5675 int ret;
5676
5677 /*
5678 * We take the chunk_mutex for 2 reasons:
5679 *
5680 * 1) Updates and insertions in the chunk btree must be done while holding
5681 * the chunk_mutex, as well as updating the system chunk array in the
5682 * superblock. See the comment on top of btrfs_chunk_alloc() for the
5683 * details;
5684 *
5685 * 2) To prevent races with the final phase of a device replace operation
5686 * that replaces the device object associated with the map's stripes,
5687 * because the device object's id can change at any time during that
5688 * final phase of the device replace operation
5689 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5690 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5691 * which would cause a failure when updating the device item, which does
5692 * not exists, or persisting a stripe of the chunk item with such ID.
5693 * Here we can't use the device_list_mutex because our caller already
5694 * has locked the chunk_mutex, and the final phase of device replace
5695 * acquires both mutexes - first the device_list_mutex and then the
5696 * chunk_mutex. Using any of those two mutexes protects us from a
5697 * concurrent device replace.
5698 */
5699 lockdep_assert_held(&fs_info->chunk_mutex);
5700
5701 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5702 if (IS_ERR(map)) {
5703 ret = PTR_ERR(map);
5704 btrfs_abort_transaction(trans, ret);
5705 return ret;
5706 }
5707
5708 item_size = btrfs_chunk_item_size(map->num_stripes);
5709
5710 chunk = kzalloc(item_size, GFP_NOFS);
5711 if (!chunk) {
5712 ret = -ENOMEM;
5713 btrfs_abort_transaction(trans, ret);
5714 goto out;
5715 }
5716
5717 for (i = 0; i < map->num_stripes; i++) {
5718 struct btrfs_device *device = map->stripes[i].dev;
5719
5720 ret = btrfs_update_device(trans, device);
5721 if (ret)
5722 goto out;
5723 }
5724
5725 stripe = &chunk->stripe;
5726 for (i = 0; i < map->num_stripes; i++) {
5727 struct btrfs_device *device = map->stripes[i].dev;
5728 const u64 dev_offset = map->stripes[i].physical;
5729
5730 btrfs_set_stack_stripe_devid(stripe, device->devid);
5731 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5732 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5733 stripe++;
5734 }
5735
5736 btrfs_set_stack_chunk_length(chunk, bg->length);
5737 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5738 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
5739 btrfs_set_stack_chunk_type(chunk, map->type);
5740 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5741 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN);
5742 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN);
5743 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5744 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5745
5746 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5747 key.type = BTRFS_CHUNK_ITEM_KEY;
5748 key.offset = bg->start;
5749
5750 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5751 if (ret)
5752 goto out;
5753
5754 set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
5755
5756 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5757 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5758 if (ret)
5759 goto out;
5760 }
5761
5762out:
5763 kfree(chunk);
5764 btrfs_free_chunk_map(map);
5765 return ret;
5766}
5767
5768static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5769{
5770 struct btrfs_fs_info *fs_info = trans->fs_info;
5771 u64 alloc_profile;
5772 struct btrfs_block_group *meta_bg;
5773 struct btrfs_block_group *sys_bg;
5774
5775 /*
5776 * When adding a new device for sprouting, the seed device is read-only
5777 * so we must first allocate a metadata and a system chunk. But before
5778 * adding the block group items to the extent, device and chunk btrees,
5779 * we must first:
5780 *
5781 * 1) Create both chunks without doing any changes to the btrees, as
5782 * otherwise we would get -ENOSPC since the block groups from the
5783 * seed device are read-only;
5784 *
5785 * 2) Add the device item for the new sprout device - finishing the setup
5786 * of a new block group requires updating the device item in the chunk
5787 * btree, so it must exist when we attempt to do it. The previous step
5788 * ensures this does not fail with -ENOSPC.
5789 *
5790 * After that we can add the block group items to their btrees:
5791 * update existing device item in the chunk btree, add a new block group
5792 * item to the extent btree, add a new chunk item to the chunk btree and
5793 * finally add the new device extent items to the devices btree.
5794 */
5795
5796 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5797 meta_bg = btrfs_create_chunk(trans, alloc_profile);
5798 if (IS_ERR(meta_bg))
5799 return PTR_ERR(meta_bg);
5800
5801 alloc_profile = btrfs_system_alloc_profile(fs_info);
5802 sys_bg = btrfs_create_chunk(trans, alloc_profile);
5803 if (IS_ERR(sys_bg))
5804 return PTR_ERR(sys_bg);
5805
5806 return 0;
5807}
5808
5809static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
5810{
5811 const int index = btrfs_bg_flags_to_raid_index(map->type);
5812
5813 return btrfs_raid_array[index].tolerated_failures;
5814}
5815
5816bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5817{
5818 struct btrfs_chunk_map *map;
5819 int miss_ndevs = 0;
5820 int i;
5821 bool ret = true;
5822
5823 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5824 if (IS_ERR(map))
5825 return false;
5826
5827 for (i = 0; i < map->num_stripes; i++) {
5828 if (test_bit(BTRFS_DEV_STATE_MISSING,
5829 &map->stripes[i].dev->dev_state)) {
5830 miss_ndevs++;
5831 continue;
5832 }
5833 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5834 &map->stripes[i].dev->dev_state)) {
5835 ret = false;
5836 goto end;
5837 }
5838 }
5839
5840 /*
5841 * If the number of missing devices is larger than max errors, we can
5842 * not write the data into that chunk successfully.
5843 */
5844 if (miss_ndevs > btrfs_chunk_max_errors(map))
5845 ret = false;
5846end:
5847 btrfs_free_chunk_map(map);
5848 return ret;
5849}
5850
5851void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info)
5852{
5853 write_lock(&fs_info->mapping_tree_lock);
5854 while (!RB_EMPTY_ROOT(&fs_info->mapping_tree.rb_root)) {
5855 struct btrfs_chunk_map *map;
5856 struct rb_node *node;
5857
5858 node = rb_first_cached(&fs_info->mapping_tree);
5859 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
5860 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5861 RB_CLEAR_NODE(&map->rb_node);
5862 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5863 /* Once for the tree ref. */
5864 btrfs_free_chunk_map(map);
5865 cond_resched_rwlock_write(&fs_info->mapping_tree_lock);
5866 }
5867 write_unlock(&fs_info->mapping_tree_lock);
5868}
5869
5870int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5871{
5872 struct btrfs_chunk_map *map;
5873 enum btrfs_raid_types index;
5874 int ret = 1;
5875
5876 map = btrfs_get_chunk_map(fs_info, logical, len);
5877 if (IS_ERR(map))
5878 /*
5879 * We could return errors for these cases, but that could get
5880 * ugly and we'd probably do the same thing which is just not do
5881 * anything else and exit, so return 1 so the callers don't try
5882 * to use other copies.
5883 */
5884 return 1;
5885
5886 index = btrfs_bg_flags_to_raid_index(map->type);
5887
5888 /* Non-RAID56, use their ncopies from btrfs_raid_array. */
5889 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5890 ret = btrfs_raid_array[index].ncopies;
5891 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5892 ret = 2;
5893 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5894 /*
5895 * There could be two corrupted data stripes, we need
5896 * to loop retry in order to rebuild the correct data.
5897 *
5898 * Fail a stripe at a time on every retry except the
5899 * stripe under reconstruction.
5900 */
5901 ret = map->num_stripes;
5902 btrfs_free_chunk_map(map);
5903 return ret;
5904}
5905
5906unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5907 u64 logical)
5908{
5909 struct btrfs_chunk_map *map;
5910 unsigned long len = fs_info->sectorsize;
5911
5912 if (!btrfs_fs_incompat(fs_info, RAID56))
5913 return len;
5914
5915 map = btrfs_get_chunk_map(fs_info, logical, len);
5916
5917 if (!WARN_ON(IS_ERR(map))) {
5918 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5919 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
5920 btrfs_free_chunk_map(map);
5921 }
5922 return len;
5923}
5924
5925int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5926{
5927 struct btrfs_chunk_map *map;
5928 int ret = 0;
5929
5930 if (!btrfs_fs_incompat(fs_info, RAID56))
5931 return 0;
5932
5933 map = btrfs_get_chunk_map(fs_info, logical, len);
5934
5935 if (!WARN_ON(IS_ERR(map))) {
5936 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5937 ret = 1;
5938 btrfs_free_chunk_map(map);
5939 }
5940 return ret;
5941}
5942
5943static int find_live_mirror(struct btrfs_fs_info *fs_info,
5944 struct btrfs_chunk_map *map, int first,
5945 int dev_replace_is_ongoing)
5946{
5947 int i;
5948 int num_stripes;
5949 int preferred_mirror;
5950 int tolerance;
5951 struct btrfs_device *srcdev;
5952
5953 ASSERT((map->type &
5954 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5955
5956 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5957 num_stripes = map->sub_stripes;
5958 else
5959 num_stripes = map->num_stripes;
5960
5961 switch (fs_info->fs_devices->read_policy) {
5962 default:
5963 /* Shouldn't happen, just warn and use pid instead of failing */
5964 btrfs_warn_rl(fs_info,
5965 "unknown read_policy type %u, reset to pid",
5966 fs_info->fs_devices->read_policy);
5967 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5968 fallthrough;
5969 case BTRFS_READ_POLICY_PID:
5970 preferred_mirror = first + (current->pid % num_stripes);
5971 break;
5972 }
5973
5974 if (dev_replace_is_ongoing &&
5975 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5976 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5977 srcdev = fs_info->dev_replace.srcdev;
5978 else
5979 srcdev = NULL;
5980
5981 /*
5982 * try to avoid the drive that is the source drive for a
5983 * dev-replace procedure, only choose it if no other non-missing
5984 * mirror is available
5985 */
5986 for (tolerance = 0; tolerance < 2; tolerance++) {
5987 if (map->stripes[preferred_mirror].dev->bdev &&
5988 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5989 return preferred_mirror;
5990 for (i = first; i < first + num_stripes; i++) {
5991 if (map->stripes[i].dev->bdev &&
5992 (tolerance || map->stripes[i].dev != srcdev))
5993 return i;
5994 }
5995 }
5996
5997 /* we couldn't find one that doesn't fail. Just return something
5998 * and the io error handling code will clean up eventually
5999 */
6000 return preferred_mirror;
6001}
6002
6003static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
6004 u64 logical,
6005 u16 total_stripes)
6006{
6007 struct btrfs_io_context *bioc;
6008
6009 bioc = kzalloc(
6010 /* The size of btrfs_io_context */
6011 sizeof(struct btrfs_io_context) +
6012 /* Plus the variable array for the stripes */
6013 sizeof(struct btrfs_io_stripe) * (total_stripes),
6014 GFP_NOFS);
6015
6016 if (!bioc)
6017 return NULL;
6018
6019 refcount_set(&bioc->refs, 1);
6020
6021 bioc->fs_info = fs_info;
6022 bioc->replace_stripe_src = -1;
6023 bioc->full_stripe_logical = (u64)-1;
6024 bioc->logical = logical;
6025
6026 return bioc;
6027}
6028
6029void btrfs_get_bioc(struct btrfs_io_context *bioc)
6030{
6031 WARN_ON(!refcount_read(&bioc->refs));
6032 refcount_inc(&bioc->refs);
6033}
6034
6035void btrfs_put_bioc(struct btrfs_io_context *bioc)
6036{
6037 if (!bioc)
6038 return;
6039 if (refcount_dec_and_test(&bioc->refs))
6040 kfree(bioc);
6041}
6042
6043/*
6044 * Please note that, discard won't be sent to target device of device
6045 * replace.
6046 */
6047struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
6048 u64 logical, u64 *length_ret,
6049 u32 *num_stripes)
6050{
6051 struct btrfs_chunk_map *map;
6052 struct btrfs_discard_stripe *stripes;
6053 u64 length = *length_ret;
6054 u64 offset;
6055 u32 stripe_nr;
6056 u32 stripe_nr_end;
6057 u32 stripe_cnt;
6058 u64 stripe_end_offset;
6059 u64 stripe_offset;
6060 u32 stripe_index;
6061 u32 factor = 0;
6062 u32 sub_stripes = 0;
6063 u32 stripes_per_dev = 0;
6064 u32 remaining_stripes = 0;
6065 u32 last_stripe = 0;
6066 int ret;
6067 int i;
6068
6069 map = btrfs_get_chunk_map(fs_info, logical, length);
6070 if (IS_ERR(map))
6071 return ERR_CAST(map);
6072
6073 /* we don't discard raid56 yet */
6074 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6075 ret = -EOPNOTSUPP;
6076 goto out_free_map;
6077 }
6078
6079 offset = logical - map->start;
6080 length = min_t(u64, map->start + map->chunk_len - logical, length);
6081 *length_ret = length;
6082
6083 /*
6084 * stripe_nr counts the total number of stripes we have to stride
6085 * to get to this block
6086 */
6087 stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
6088
6089 /* stripe_offset is the offset of this block in its stripe */
6090 stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr);
6091
6092 stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >>
6093 BTRFS_STRIPE_LEN_SHIFT;
6094 stripe_cnt = stripe_nr_end - stripe_nr;
6095 stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) -
6096 (offset + length);
6097 /*
6098 * after this, stripe_nr is the number of stripes on this
6099 * device we have to walk to find the data, and stripe_index is
6100 * the number of our device in the stripe array
6101 */
6102 *num_stripes = 1;
6103 stripe_index = 0;
6104 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6105 BTRFS_BLOCK_GROUP_RAID10)) {
6106 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6107 sub_stripes = 1;
6108 else
6109 sub_stripes = map->sub_stripes;
6110
6111 factor = map->num_stripes / sub_stripes;
6112 *num_stripes = min_t(u64, map->num_stripes,
6113 sub_stripes * stripe_cnt);
6114 stripe_index = stripe_nr % factor;
6115 stripe_nr /= factor;
6116 stripe_index *= sub_stripes;
6117
6118 remaining_stripes = stripe_cnt % factor;
6119 stripes_per_dev = stripe_cnt / factor;
6120 last_stripe = ((stripe_nr_end - 1) % factor) * sub_stripes;
6121 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6122 BTRFS_BLOCK_GROUP_DUP)) {
6123 *num_stripes = map->num_stripes;
6124 } else {
6125 stripe_index = stripe_nr % map->num_stripes;
6126 stripe_nr /= map->num_stripes;
6127 }
6128
6129 stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
6130 if (!stripes) {
6131 ret = -ENOMEM;
6132 goto out_free_map;
6133 }
6134
6135 for (i = 0; i < *num_stripes; i++) {
6136 stripes[i].physical =
6137 map->stripes[stripe_index].physical +
6138 stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
6139 stripes[i].dev = map->stripes[stripe_index].dev;
6140
6141 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6142 BTRFS_BLOCK_GROUP_RAID10)) {
6143 stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev);
6144
6145 if (i / sub_stripes < remaining_stripes)
6146 stripes[i].length += BTRFS_STRIPE_LEN;
6147
6148 /*
6149 * Special for the first stripe and
6150 * the last stripe:
6151 *
6152 * |-------|...|-------|
6153 * |----------|
6154 * off end_off
6155 */
6156 if (i < sub_stripes)
6157 stripes[i].length -= stripe_offset;
6158
6159 if (stripe_index >= last_stripe &&
6160 stripe_index <= (last_stripe +
6161 sub_stripes - 1))
6162 stripes[i].length -= stripe_end_offset;
6163
6164 if (i == sub_stripes - 1)
6165 stripe_offset = 0;
6166 } else {
6167 stripes[i].length = length;
6168 }
6169
6170 stripe_index++;
6171 if (stripe_index == map->num_stripes) {
6172 stripe_index = 0;
6173 stripe_nr++;
6174 }
6175 }
6176
6177 btrfs_free_chunk_map(map);
6178 return stripes;
6179out_free_map:
6180 btrfs_free_chunk_map(map);
6181 return ERR_PTR(ret);
6182}
6183
6184static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6185{
6186 struct btrfs_block_group *cache;
6187 bool ret;
6188
6189 /* Non zoned filesystem does not use "to_copy" flag */
6190 if (!btrfs_is_zoned(fs_info))
6191 return false;
6192
6193 cache = btrfs_lookup_block_group(fs_info, logical);
6194
6195 ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
6196
6197 btrfs_put_block_group(cache);
6198 return ret;
6199}
6200
6201static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6202 struct btrfs_io_context *bioc,
6203 struct btrfs_dev_replace *dev_replace,
6204 u64 logical,
6205 int *num_stripes_ret, int *max_errors_ret)
6206{
6207 u64 srcdev_devid = dev_replace->srcdev->devid;
6208 /*
6209 * At this stage, num_stripes is still the real number of stripes,
6210 * excluding the duplicated stripes.
6211 */
6212 int num_stripes = *num_stripes_ret;
6213 int nr_extra_stripes = 0;
6214 int max_errors = *max_errors_ret;
6215 int i;
6216
6217 /*
6218 * A block group which has "to_copy" set will eventually be copied by
6219 * the dev-replace process. We can avoid cloning IO here.
6220 */
6221 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6222 return;
6223
6224 /*
6225 * Duplicate the write operations while the dev-replace procedure is
6226 * running. Since the copying of the old disk to the new disk takes
6227 * place at run time while the filesystem is mounted writable, the
6228 * regular write operations to the old disk have to be duplicated to go
6229 * to the new disk as well.
6230 *
6231 * Note that device->missing is handled by the caller, and that the
6232 * write to the old disk is already set up in the stripes array.
6233 */
6234 for (i = 0; i < num_stripes; i++) {
6235 struct btrfs_io_stripe *old = &bioc->stripes[i];
6236 struct btrfs_io_stripe *new = &bioc->stripes[num_stripes + nr_extra_stripes];
6237
6238 if (old->dev->devid != srcdev_devid)
6239 continue;
6240
6241 new->physical = old->physical;
6242 new->dev = dev_replace->tgtdev;
6243 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
6244 bioc->replace_stripe_src = i;
6245 nr_extra_stripes++;
6246 }
6247
6248 /* We can only have at most 2 extra nr_stripes (for DUP). */
6249 ASSERT(nr_extra_stripes <= 2);
6250 /*
6251 * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for
6252 * replace.
6253 * If we have 2 extra stripes, only choose the one with smaller physical.
6254 */
6255 if (op == BTRFS_MAP_GET_READ_MIRRORS && nr_extra_stripes == 2) {
6256 struct btrfs_io_stripe *first = &bioc->stripes[num_stripes];
6257 struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1];
6258
6259 /* Only DUP can have two extra stripes. */
6260 ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP);
6261
6262 /*
6263 * Swap the last stripe stripes and reduce @nr_extra_stripes.
6264 * The extra stripe would still be there, but won't be accessed.
6265 */
6266 if (first->physical > second->physical) {
6267 swap(second->physical, first->physical);
6268 swap(second->dev, first->dev);
6269 nr_extra_stripes--;
6270 }
6271 }
6272
6273 *num_stripes_ret = num_stripes + nr_extra_stripes;
6274 *max_errors_ret = max_errors + nr_extra_stripes;
6275 bioc->replace_nr_stripes = nr_extra_stripes;
6276}
6277
6278static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
6279 struct btrfs_io_geometry *io_geom)
6280{
6281 /*
6282 * Stripe_nr is the stripe where this block falls. stripe_offset is
6283 * the offset of this block in its stripe.
6284 */
6285 io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK;
6286 io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
6287 ASSERT(io_geom->stripe_offset < U32_MAX);
6288
6289 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6290 unsigned long full_stripe_len =
6291 btrfs_stripe_nr_to_offset(nr_data_stripes(map));
6292
6293 /*
6294 * For full stripe start, we use previously calculated
6295 * @stripe_nr. Align it to nr_data_stripes, then multiply with
6296 * STRIPE_LEN.
6297 *
6298 * By this we can avoid u64 division completely. And we have
6299 * to go rounddown(), not round_down(), as nr_data_stripes is
6300 * not ensured to be power of 2.
6301 */
6302 io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset(
6303 rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
6304
6305 ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset);
6306 ASSERT(io_geom->raid56_full_stripe_start <= offset);
6307 /*
6308 * For writes to RAID56, allow to write a full stripe set, but
6309 * no straddling of stripe sets.
6310 */
6311 if (io_geom->op == BTRFS_MAP_WRITE)
6312 return full_stripe_len - (offset - io_geom->raid56_full_stripe_start);
6313 }
6314
6315 /*
6316 * For other RAID types and for RAID56 reads, allow a single stripe (on
6317 * a single disk).
6318 */
6319 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
6320 return BTRFS_STRIPE_LEN - io_geom->stripe_offset;
6321 return U64_MAX;
6322}
6323
6324static int set_io_stripe(struct btrfs_fs_info *fs_info, u64 logical,
6325 u64 *length, struct btrfs_io_stripe *dst,
6326 struct btrfs_chunk_map *map,
6327 struct btrfs_io_geometry *io_geom)
6328{
6329 dst->dev = map->stripes[io_geom->stripe_index].dev;
6330
6331 if (io_geom->op == BTRFS_MAP_READ &&
6332 btrfs_need_stripe_tree_update(fs_info, map->type))
6333 return btrfs_get_raid_extent_offset(fs_info, logical, length,
6334 map->type,
6335 io_geom->stripe_index, dst);
6336
6337 dst->physical = map->stripes[io_geom->stripe_index].physical +
6338 io_geom->stripe_offset +
6339 btrfs_stripe_nr_to_offset(io_geom->stripe_nr);
6340 return 0;
6341}
6342
6343static bool is_single_device_io(struct btrfs_fs_info *fs_info,
6344 const struct btrfs_io_stripe *smap,
6345 const struct btrfs_chunk_map *map,
6346 int num_alloc_stripes,
6347 enum btrfs_map_op op, int mirror_num)
6348{
6349 if (!smap)
6350 return false;
6351
6352 if (num_alloc_stripes != 1)
6353 return false;
6354
6355 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ)
6356 return false;
6357
6358 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)
6359 return false;
6360
6361 return true;
6362}
6363
6364static void map_blocks_raid0(const struct btrfs_chunk_map *map,
6365 struct btrfs_io_geometry *io_geom)
6366{
6367 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6368 io_geom->stripe_nr /= map->num_stripes;
6369 if (io_geom->op == BTRFS_MAP_READ)
6370 io_geom->mirror_num = 1;
6371}
6372
6373static void map_blocks_raid1(struct btrfs_fs_info *fs_info,
6374 struct btrfs_chunk_map *map,
6375 struct btrfs_io_geometry *io_geom,
6376 bool dev_replace_is_ongoing)
6377{
6378 if (io_geom->op != BTRFS_MAP_READ) {
6379 io_geom->num_stripes = map->num_stripes;
6380 return;
6381 }
6382
6383 if (io_geom->mirror_num) {
6384 io_geom->stripe_index = io_geom->mirror_num - 1;
6385 return;
6386 }
6387
6388 io_geom->stripe_index = find_live_mirror(fs_info, map, 0,
6389 dev_replace_is_ongoing);
6390 io_geom->mirror_num = io_geom->stripe_index + 1;
6391}
6392
6393static void map_blocks_dup(const struct btrfs_chunk_map *map,
6394 struct btrfs_io_geometry *io_geom)
6395{
6396 if (io_geom->op != BTRFS_MAP_READ) {
6397 io_geom->num_stripes = map->num_stripes;
6398 return;
6399 }
6400
6401 if (io_geom->mirror_num) {
6402 io_geom->stripe_index = io_geom->mirror_num - 1;
6403 return;
6404 }
6405
6406 io_geom->mirror_num = 1;
6407}
6408
6409static void map_blocks_raid10(struct btrfs_fs_info *fs_info,
6410 struct btrfs_chunk_map *map,
6411 struct btrfs_io_geometry *io_geom,
6412 bool dev_replace_is_ongoing)
6413{
6414 u32 factor = map->num_stripes / map->sub_stripes;
6415 int old_stripe_index;
6416
6417 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes;
6418 io_geom->stripe_nr /= factor;
6419
6420 if (io_geom->op != BTRFS_MAP_READ) {
6421 io_geom->num_stripes = map->sub_stripes;
6422 return;
6423 }
6424
6425 if (io_geom->mirror_num) {
6426 io_geom->stripe_index += io_geom->mirror_num - 1;
6427 return;
6428 }
6429
6430 old_stripe_index = io_geom->stripe_index;
6431 io_geom->stripe_index = find_live_mirror(fs_info, map,
6432 io_geom->stripe_index,
6433 dev_replace_is_ongoing);
6434 io_geom->mirror_num = io_geom->stripe_index - old_stripe_index + 1;
6435}
6436
6437static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
6438 struct btrfs_io_geometry *io_geom,
6439 u64 logical, u64 *length)
6440{
6441 int data_stripes = nr_data_stripes(map);
6442
6443 /*
6444 * Needs full stripe mapping.
6445 *
6446 * Push stripe_nr back to the start of the full stripe For those cases
6447 * needing a full stripe, @stripe_nr is the full stripe number.
6448 *
6449 * Originally we go raid56_full_stripe_start / full_stripe_len, but
6450 * that can be expensive. Here we just divide @stripe_nr with
6451 * @data_stripes.
6452 */
6453 io_geom->stripe_nr /= data_stripes;
6454
6455 /* RAID[56] write or recovery. Return all stripes */
6456 io_geom->num_stripes = map->num_stripes;
6457 io_geom->max_errors = btrfs_chunk_max_errors(map);
6458
6459 /* Return the length to the full stripe end. */
6460 *length = min(logical + *length,
6461 io_geom->raid56_full_stripe_start + map->start +
6462 btrfs_stripe_nr_to_offset(data_stripes)) -
6463 logical;
6464 io_geom->stripe_index = 0;
6465 io_geom->stripe_offset = 0;
6466}
6467
6468static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
6469 struct btrfs_io_geometry *io_geom)
6470{
6471 int data_stripes = nr_data_stripes(map);
6472
6473 ASSERT(io_geom->mirror_num <= 1);
6474 /* Just grab the data stripe directly. */
6475 io_geom->stripe_index = io_geom->stripe_nr % data_stripes;
6476 io_geom->stripe_nr /= data_stripes;
6477
6478 /* We distribute the parity blocks across stripes. */
6479 io_geom->stripe_index =
6480 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes;
6481
6482 if (io_geom->op == BTRFS_MAP_READ && io_geom->mirror_num < 1)
6483 io_geom->mirror_num = 1;
6484}
6485
6486static void map_blocks_single(const struct btrfs_chunk_map *map,
6487 struct btrfs_io_geometry *io_geom)
6488{
6489 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6490 io_geom->stripe_nr /= map->num_stripes;
6491 io_geom->mirror_num = io_geom->stripe_index + 1;
6492}
6493
6494/*
6495 * Map one logical range to one or more physical ranges.
6496 *
6497 * @length: (Mandatory) mapped length of this run.
6498 * One logical range can be split into different segments
6499 * due to factors like zones and RAID0/5/6/10 stripe
6500 * boundaries.
6501 *
6502 * @bioc_ret: (Mandatory) returned btrfs_io_context structure.
6503 * which has one or more physical ranges (btrfs_io_stripe)
6504 * recorded inside.
6505 * Caller should call btrfs_put_bioc() to free it after use.
6506 *
6507 * @smap: (Optional) single physical range optimization.
6508 * If the map request can be fulfilled by one single
6509 * physical range, and this is parameter is not NULL,
6510 * then @bioc_ret would be NULL, and @smap would be
6511 * updated.
6512 *
6513 * @mirror_num_ret: (Mandatory) returned mirror number if the original
6514 * value is 0.
6515 *
6516 * Mirror number 0 means to choose any live mirrors.
6517 *
6518 * For non-RAID56 profiles, non-zero mirror_num means
6519 * the Nth mirror. (e.g. mirror_num 1 means the first
6520 * copy).
6521 *
6522 * For RAID56 profile, mirror 1 means rebuild from P and
6523 * the remaining data stripes.
6524 *
6525 * For RAID6 profile, mirror > 2 means mark another
6526 * data/P stripe error and rebuild from the remaining
6527 * stripes..
6528 */
6529int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6530 u64 logical, u64 *length,
6531 struct btrfs_io_context **bioc_ret,
6532 struct btrfs_io_stripe *smap, int *mirror_num_ret)
6533{
6534 struct btrfs_chunk_map *map;
6535 struct btrfs_io_geometry io_geom = { 0 };
6536 u64 map_offset;
6537 int i;
6538 int ret = 0;
6539 int num_copies;
6540 struct btrfs_io_context *bioc = NULL;
6541 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6542 int dev_replace_is_ongoing = 0;
6543 u16 num_alloc_stripes;
6544 u64 max_len;
6545
6546 ASSERT(bioc_ret);
6547
6548 io_geom.mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
6549 io_geom.num_stripes = 1;
6550 io_geom.stripe_index = 0;
6551 io_geom.op = op;
6552
6553 num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize);
6554 if (io_geom.mirror_num > num_copies)
6555 return -EINVAL;
6556
6557 map = btrfs_get_chunk_map(fs_info, logical, *length);
6558 if (IS_ERR(map))
6559 return PTR_ERR(map);
6560
6561 map_offset = logical - map->start;
6562 io_geom.raid56_full_stripe_start = (u64)-1;
6563 max_len = btrfs_max_io_len(map, map_offset, &io_geom);
6564 *length = min_t(u64, map->chunk_len - map_offset, max_len);
6565
6566 down_read(&dev_replace->rwsem);
6567 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6568 /*
6569 * Hold the semaphore for read during the whole operation, write is
6570 * requested at commit time but must wait.
6571 */
6572 if (!dev_replace_is_ongoing)
6573 up_read(&dev_replace->rwsem);
6574
6575 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6576 case BTRFS_BLOCK_GROUP_RAID0:
6577 map_blocks_raid0(map, &io_geom);
6578 break;
6579 case BTRFS_BLOCK_GROUP_RAID1:
6580 case BTRFS_BLOCK_GROUP_RAID1C3:
6581 case BTRFS_BLOCK_GROUP_RAID1C4:
6582 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing);
6583 break;
6584 case BTRFS_BLOCK_GROUP_DUP:
6585 map_blocks_dup(map, &io_geom);
6586 break;
6587 case BTRFS_BLOCK_GROUP_RAID10:
6588 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing);
6589 break;
6590 case BTRFS_BLOCK_GROUP_RAID5:
6591 case BTRFS_BLOCK_GROUP_RAID6:
6592 if (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)
6593 map_blocks_raid56_write(map, &io_geom, logical, length);
6594 else
6595 map_blocks_raid56_read(map, &io_geom);
6596 break;
6597 default:
6598 /*
6599 * After this, stripe_nr is the number of stripes on this
6600 * device we have to walk to find the data, and stripe_index is
6601 * the number of our device in the stripe array
6602 */
6603 map_blocks_single(map, &io_geom);
6604 break;
6605 }
6606 if (io_geom.stripe_index >= map->num_stripes) {
6607 btrfs_crit(fs_info,
6608 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6609 io_geom.stripe_index, map->num_stripes);
6610 ret = -EINVAL;
6611 goto out;
6612 }
6613
6614 num_alloc_stripes = io_geom.num_stripes;
6615 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6616 op != BTRFS_MAP_READ)
6617 /*
6618 * For replace case, we need to add extra stripes for extra
6619 * duplicated stripes.
6620 *
6621 * For both WRITE and GET_READ_MIRRORS, we may have at most
6622 * 2 more stripes (DUP types, otherwise 1).
6623 */
6624 num_alloc_stripes += 2;
6625
6626 /*
6627 * If this I/O maps to a single device, try to return the device and
6628 * physical block information on the stack instead of allocating an
6629 * I/O context structure.
6630 */
6631 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op,
6632 io_geom.mirror_num)) {
6633 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
6634 if (mirror_num_ret)
6635 *mirror_num_ret = io_geom.mirror_num;
6636 *bioc_ret = NULL;
6637 goto out;
6638 }
6639
6640 bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
6641 if (!bioc) {
6642 ret = -ENOMEM;
6643 goto out;
6644 }
6645 bioc->map_type = map->type;
6646
6647 /*
6648 * For RAID56 full map, we need to make sure the stripes[] follows the
6649 * rule that data stripes are all ordered, then followed with P and Q
6650 * (if we have).
6651 *
6652 * It's still mostly the same as other profiles, just with extra rotation.
6653 */
6654 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
6655 (op != BTRFS_MAP_READ || io_geom.mirror_num > 1)) {
6656 /*
6657 * For RAID56 @stripe_nr is already the number of full stripes
6658 * before us, which is also the rotation value (needs to modulo
6659 * with num_stripes).
6660 *
6661 * In this case, we just add @stripe_nr with @i, then do the
6662 * modulo, to reduce one modulo call.
6663 */
6664 bioc->full_stripe_logical = map->start +
6665 btrfs_stripe_nr_to_offset(io_geom.stripe_nr *
6666 nr_data_stripes(map));
6667 for (int i = 0; i < io_geom.num_stripes; i++) {
6668 struct btrfs_io_stripe *dst = &bioc->stripes[i];
6669 u32 stripe_index;
6670
6671 stripe_index = (i + io_geom.stripe_nr) % io_geom.num_stripes;
6672 dst->dev = map->stripes[stripe_index].dev;
6673 dst->physical =
6674 map->stripes[stripe_index].physical +
6675 io_geom.stripe_offset +
6676 btrfs_stripe_nr_to_offset(io_geom.stripe_nr);
6677 }
6678 } else {
6679 /*
6680 * For all other non-RAID56 profiles, just copy the target
6681 * stripe into the bioc.
6682 */
6683 for (i = 0; i < io_geom.num_stripes; i++) {
6684 ret = set_io_stripe(fs_info, logical, length,
6685 &bioc->stripes[i], map, &io_geom);
6686 if (ret < 0)
6687 break;
6688 io_geom.stripe_index++;
6689 }
6690 }
6691
6692 if (ret) {
6693 *bioc_ret = NULL;
6694 btrfs_put_bioc(bioc);
6695 goto out;
6696 }
6697
6698 if (op != BTRFS_MAP_READ)
6699 io_geom.max_errors = btrfs_chunk_max_errors(map);
6700
6701 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6702 op != BTRFS_MAP_READ) {
6703 handle_ops_on_dev_replace(op, bioc, dev_replace, logical,
6704 &io_geom.num_stripes, &io_geom.max_errors);
6705 }
6706
6707 *bioc_ret = bioc;
6708 bioc->num_stripes = io_geom.num_stripes;
6709 bioc->max_errors = io_geom.max_errors;
6710 bioc->mirror_num = io_geom.mirror_num;
6711
6712out:
6713 if (dev_replace_is_ongoing) {
6714 lockdep_assert_held(&dev_replace->rwsem);
6715 /* Unlock and let waiting writers proceed */
6716 up_read(&dev_replace->rwsem);
6717 }
6718 btrfs_free_chunk_map(map);
6719 return ret;
6720}
6721
6722static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6723 const struct btrfs_fs_devices *fs_devices)
6724{
6725 if (args->fsid == NULL)
6726 return true;
6727 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6728 return true;
6729 return false;
6730}
6731
6732static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6733 const struct btrfs_device *device)
6734{
6735 if (args->missing) {
6736 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6737 !device->bdev)
6738 return true;
6739 return false;
6740 }
6741
6742 if (device->devid != args->devid)
6743 return false;
6744 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6745 return false;
6746 return true;
6747}
6748
6749/*
6750 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6751 * return NULL.
6752 *
6753 * If devid and uuid are both specified, the match must be exact, otherwise
6754 * only devid is used.
6755 */
6756struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6757 const struct btrfs_dev_lookup_args *args)
6758{
6759 struct btrfs_device *device;
6760 struct btrfs_fs_devices *seed_devs;
6761
6762 if (dev_args_match_fs_devices(args, fs_devices)) {
6763 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6764 if (dev_args_match_device(args, device))
6765 return device;
6766 }
6767 }
6768
6769 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6770 if (!dev_args_match_fs_devices(args, seed_devs))
6771 continue;
6772 list_for_each_entry(device, &seed_devs->devices, dev_list) {
6773 if (dev_args_match_device(args, device))
6774 return device;
6775 }
6776 }
6777
6778 return NULL;
6779}
6780
6781static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6782 u64 devid, u8 *dev_uuid)
6783{
6784 struct btrfs_device *device;
6785 unsigned int nofs_flag;
6786
6787 /*
6788 * We call this under the chunk_mutex, so we want to use NOFS for this
6789 * allocation, however we don't want to change btrfs_alloc_device() to
6790 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6791 * places.
6792 */
6793
6794 nofs_flag = memalloc_nofs_save();
6795 device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL);
6796 memalloc_nofs_restore(nofs_flag);
6797 if (IS_ERR(device))
6798 return device;
6799
6800 list_add(&device->dev_list, &fs_devices->devices);
6801 device->fs_devices = fs_devices;
6802 fs_devices->num_devices++;
6803
6804 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6805 fs_devices->missing_devices++;
6806
6807 return device;
6808}
6809
6810/*
6811 * Allocate new device struct, set up devid and UUID.
6812 *
6813 * @fs_info: used only for generating a new devid, can be NULL if
6814 * devid is provided (i.e. @devid != NULL).
6815 * @devid: a pointer to devid for this device. If NULL a new devid
6816 * is generated.
6817 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6818 * is generated.
6819 * @path: a pointer to device path if available, NULL otherwise.
6820 *
6821 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6822 * on error. Returned struct is not linked onto any lists and must be
6823 * destroyed with btrfs_free_device.
6824 */
6825struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6826 const u64 *devid, const u8 *uuid,
6827 const char *path)
6828{
6829 struct btrfs_device *dev;
6830 u64 tmp;
6831
6832 if (WARN_ON(!devid && !fs_info))
6833 return ERR_PTR(-EINVAL);
6834
6835 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6836 if (!dev)
6837 return ERR_PTR(-ENOMEM);
6838
6839 INIT_LIST_HEAD(&dev->dev_list);
6840 INIT_LIST_HEAD(&dev->dev_alloc_list);
6841 INIT_LIST_HEAD(&dev->post_commit_list);
6842
6843 atomic_set(&dev->dev_stats_ccnt, 0);
6844 btrfs_device_data_ordered_init(dev);
6845 extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
6846
6847 if (devid)
6848 tmp = *devid;
6849 else {
6850 int ret;
6851
6852 ret = find_next_devid(fs_info, &tmp);
6853 if (ret) {
6854 btrfs_free_device(dev);
6855 return ERR_PTR(ret);
6856 }
6857 }
6858 dev->devid = tmp;
6859
6860 if (uuid)
6861 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6862 else
6863 generate_random_uuid(dev->uuid);
6864
6865 if (path) {
6866 struct rcu_string *name;
6867
6868 name = rcu_string_strdup(path, GFP_KERNEL);
6869 if (!name) {
6870 btrfs_free_device(dev);
6871 return ERR_PTR(-ENOMEM);
6872 }
6873 rcu_assign_pointer(dev->name, name);
6874 }
6875
6876 return dev;
6877}
6878
6879static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6880 u64 devid, u8 *uuid, bool error)
6881{
6882 if (error)
6883 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6884 devid, uuid);
6885 else
6886 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6887 devid, uuid);
6888}
6889
6890u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
6891{
6892 const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
6893
6894 return div_u64(map->chunk_len, data_stripes);
6895}
6896
6897#if BITS_PER_LONG == 32
6898/*
6899 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6900 * can't be accessed on 32bit systems.
6901 *
6902 * This function do mount time check to reject the fs if it already has
6903 * metadata chunk beyond that limit.
6904 */
6905static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6906 u64 logical, u64 length, u64 type)
6907{
6908 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6909 return 0;
6910
6911 if (logical + length < MAX_LFS_FILESIZE)
6912 return 0;
6913
6914 btrfs_err_32bit_limit(fs_info);
6915 return -EOVERFLOW;
6916}
6917
6918/*
6919 * This is to give early warning for any metadata chunk reaching
6920 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6921 * Although we can still access the metadata, it's not going to be possible
6922 * once the limit is reached.
6923 */
6924static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6925 u64 logical, u64 length, u64 type)
6926{
6927 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6928 return;
6929
6930 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6931 return;
6932
6933 btrfs_warn_32bit_limit(fs_info);
6934}
6935#endif
6936
6937static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
6938 u64 devid, u8 *uuid)
6939{
6940 struct btrfs_device *dev;
6941
6942 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6943 btrfs_report_missing_device(fs_info, devid, uuid, true);
6944 return ERR_PTR(-ENOENT);
6945 }
6946
6947 dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
6948 if (IS_ERR(dev)) {
6949 btrfs_err(fs_info, "failed to init missing device %llu: %ld",
6950 devid, PTR_ERR(dev));
6951 return dev;
6952 }
6953 btrfs_report_missing_device(fs_info, devid, uuid, false);
6954
6955 return dev;
6956}
6957
6958static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6959 struct btrfs_chunk *chunk)
6960{
6961 BTRFS_DEV_LOOKUP_ARGS(args);
6962 struct btrfs_fs_info *fs_info = leaf->fs_info;
6963 struct btrfs_chunk_map *map;
6964 u64 logical;
6965 u64 length;
6966 u64 devid;
6967 u64 type;
6968 u8 uuid[BTRFS_UUID_SIZE];
6969 int index;
6970 int num_stripes;
6971 int ret;
6972 int i;
6973
6974 logical = key->offset;
6975 length = btrfs_chunk_length(leaf, chunk);
6976 type = btrfs_chunk_type(leaf, chunk);
6977 index = btrfs_bg_flags_to_raid_index(type);
6978 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6979
6980#if BITS_PER_LONG == 32
6981 ret = check_32bit_meta_chunk(fs_info, logical, length, type);
6982 if (ret < 0)
6983 return ret;
6984 warn_32bit_meta_chunk(fs_info, logical, length, type);
6985#endif
6986
6987 /*
6988 * Only need to verify chunk item if we're reading from sys chunk array,
6989 * as chunk item in tree block is already verified by tree-checker.
6990 */
6991 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6992 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6993 if (ret)
6994 return ret;
6995 }
6996
6997 map = btrfs_find_chunk_map(fs_info, logical, 1);
6998
6999 /* already mapped? */
7000 if (map && map->start <= logical && map->start + map->chunk_len > logical) {
7001 btrfs_free_chunk_map(map);
7002 return 0;
7003 } else if (map) {
7004 btrfs_free_chunk_map(map);
7005 }
7006
7007 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
7008 if (!map)
7009 return -ENOMEM;
7010
7011 map->start = logical;
7012 map->chunk_len = length;
7013 map->num_stripes = num_stripes;
7014 map->io_width = btrfs_chunk_io_width(leaf, chunk);
7015 map->io_align = btrfs_chunk_io_align(leaf, chunk);
7016 map->type = type;
7017 /*
7018 * We can't use the sub_stripes value, as for profiles other than
7019 * RAID10, they may have 0 as sub_stripes for filesystems created by
7020 * older mkfs (<v5.4).
7021 * In that case, it can cause divide-by-zero errors later.
7022 * Since currently sub_stripes is fixed for each profile, let's
7023 * use the trusted value instead.
7024 */
7025 map->sub_stripes = btrfs_raid_array[index].sub_stripes;
7026 map->verified_stripes = 0;
7027 map->stripe_size = btrfs_calc_stripe_length(map);
7028 for (i = 0; i < num_stripes; i++) {
7029 map->stripes[i].physical =
7030 btrfs_stripe_offset_nr(leaf, chunk, i);
7031 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7032 args.devid = devid;
7033 read_extent_buffer(leaf, uuid, (unsigned long)
7034 btrfs_stripe_dev_uuid_nr(chunk, i),
7035 BTRFS_UUID_SIZE);
7036 args.uuid = uuid;
7037 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7038 if (!map->stripes[i].dev) {
7039 map->stripes[i].dev = handle_missing_device(fs_info,
7040 devid, uuid);
7041 if (IS_ERR(map->stripes[i].dev)) {
7042 ret = PTR_ERR(map->stripes[i].dev);
7043 btrfs_free_chunk_map(map);
7044 return ret;
7045 }
7046 }
7047
7048 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7049 &(map->stripes[i].dev->dev_state));
7050 }
7051
7052 ret = btrfs_add_chunk_map(fs_info, map);
7053 if (ret < 0) {
7054 btrfs_err(fs_info,
7055 "failed to add chunk map, start=%llu len=%llu: %d",
7056 map->start, map->chunk_len, ret);
7057 }
7058
7059 return ret;
7060}
7061
7062static void fill_device_from_item(struct extent_buffer *leaf,
7063 struct btrfs_dev_item *dev_item,
7064 struct btrfs_device *device)
7065{
7066 unsigned long ptr;
7067
7068 device->devid = btrfs_device_id(leaf, dev_item);
7069 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7070 device->total_bytes = device->disk_total_bytes;
7071 device->commit_total_bytes = device->disk_total_bytes;
7072 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7073 device->commit_bytes_used = device->bytes_used;
7074 device->type = btrfs_device_type(leaf, dev_item);
7075 device->io_align = btrfs_device_io_align(leaf, dev_item);
7076 device->io_width = btrfs_device_io_width(leaf, dev_item);
7077 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7078 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7079 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7080
7081 ptr = btrfs_device_uuid(dev_item);
7082 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7083}
7084
7085static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7086 u8 *fsid)
7087{
7088 struct btrfs_fs_devices *fs_devices;
7089 int ret;
7090
7091 lockdep_assert_held(&uuid_mutex);
7092 ASSERT(fsid);
7093
7094 /* This will match only for multi-device seed fs */
7095 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7096 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7097 return fs_devices;
7098
7099
7100 fs_devices = find_fsid(fsid, NULL);
7101 if (!fs_devices) {
7102 if (!btrfs_test_opt(fs_info, DEGRADED))
7103 return ERR_PTR(-ENOENT);
7104
7105 fs_devices = alloc_fs_devices(fsid);
7106 if (IS_ERR(fs_devices))
7107 return fs_devices;
7108
7109 fs_devices->seeding = true;
7110 fs_devices->opened = 1;
7111 return fs_devices;
7112 }
7113
7114 /*
7115 * Upon first call for a seed fs fsid, just create a private copy of the
7116 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7117 */
7118 fs_devices = clone_fs_devices(fs_devices);
7119 if (IS_ERR(fs_devices))
7120 return fs_devices;
7121
7122 ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder);
7123 if (ret) {
7124 free_fs_devices(fs_devices);
7125 return ERR_PTR(ret);
7126 }
7127
7128 if (!fs_devices->seeding) {
7129 close_fs_devices(fs_devices);
7130 free_fs_devices(fs_devices);
7131 return ERR_PTR(-EINVAL);
7132 }
7133
7134 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7135
7136 return fs_devices;
7137}
7138
7139static int read_one_dev(struct extent_buffer *leaf,
7140 struct btrfs_dev_item *dev_item)
7141{
7142 BTRFS_DEV_LOOKUP_ARGS(args);
7143 struct btrfs_fs_info *fs_info = leaf->fs_info;
7144 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7145 struct btrfs_device *device;
7146 u64 devid;
7147 int ret;
7148 u8 fs_uuid[BTRFS_FSID_SIZE];
7149 u8 dev_uuid[BTRFS_UUID_SIZE];
7150
7151 devid = btrfs_device_id(leaf, dev_item);
7152 args.devid = devid;
7153 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7154 BTRFS_UUID_SIZE);
7155 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7156 BTRFS_FSID_SIZE);
7157 args.uuid = dev_uuid;
7158 args.fsid = fs_uuid;
7159
7160 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7161 fs_devices = open_seed_devices(fs_info, fs_uuid);
7162 if (IS_ERR(fs_devices))
7163 return PTR_ERR(fs_devices);
7164 }
7165
7166 device = btrfs_find_device(fs_info->fs_devices, &args);
7167 if (!device) {
7168 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7169 btrfs_report_missing_device(fs_info, devid,
7170 dev_uuid, true);
7171 return -ENOENT;
7172 }
7173
7174 device = add_missing_dev(fs_devices, devid, dev_uuid);
7175 if (IS_ERR(device)) {
7176 btrfs_err(fs_info,
7177 "failed to add missing dev %llu: %ld",
7178 devid, PTR_ERR(device));
7179 return PTR_ERR(device);
7180 }
7181 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7182 } else {
7183 if (!device->bdev) {
7184 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7185 btrfs_report_missing_device(fs_info,
7186 devid, dev_uuid, true);
7187 return -ENOENT;
7188 }
7189 btrfs_report_missing_device(fs_info, devid,
7190 dev_uuid, false);
7191 }
7192
7193 if (!device->bdev &&
7194 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7195 /*
7196 * this happens when a device that was properly setup
7197 * in the device info lists suddenly goes bad.
7198 * device->bdev is NULL, and so we have to set
7199 * device->missing to one here
7200 */
7201 device->fs_devices->missing_devices++;
7202 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7203 }
7204
7205 /* Move the device to its own fs_devices */
7206 if (device->fs_devices != fs_devices) {
7207 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7208 &device->dev_state));
7209
7210 list_move(&device->dev_list, &fs_devices->devices);
7211 device->fs_devices->num_devices--;
7212 fs_devices->num_devices++;
7213
7214 device->fs_devices->missing_devices--;
7215 fs_devices->missing_devices++;
7216
7217 device->fs_devices = fs_devices;
7218 }
7219 }
7220
7221 if (device->fs_devices != fs_info->fs_devices) {
7222 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7223 if (device->generation !=
7224 btrfs_device_generation(leaf, dev_item))
7225 return -EINVAL;
7226 }
7227
7228 fill_device_from_item(leaf, dev_item, device);
7229 if (device->bdev) {
7230 u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7231
7232 if (device->total_bytes > max_total_bytes) {
7233 btrfs_err(fs_info,
7234 "device total_bytes should be at most %llu but found %llu",
7235 max_total_bytes, device->total_bytes);
7236 return -EINVAL;
7237 }
7238 }
7239 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7240 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7241 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7242 device->fs_devices->total_rw_bytes += device->total_bytes;
7243 atomic64_add(device->total_bytes - device->bytes_used,
7244 &fs_info->free_chunk_space);
7245 }
7246 ret = 0;
7247 return ret;
7248}
7249
7250int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7251{
7252 struct btrfs_super_block *super_copy = fs_info->super_copy;
7253 struct extent_buffer *sb;
7254 struct btrfs_disk_key *disk_key;
7255 struct btrfs_chunk *chunk;
7256 u8 *array_ptr;
7257 unsigned long sb_array_offset;
7258 int ret = 0;
7259 u32 num_stripes;
7260 u32 array_size;
7261 u32 len = 0;
7262 u32 cur_offset;
7263 u64 type;
7264 struct btrfs_key key;
7265
7266 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7267
7268 /*
7269 * We allocated a dummy extent, just to use extent buffer accessors.
7270 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7271 * that's fine, we will not go beyond system chunk array anyway.
7272 */
7273 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7274 if (!sb)
7275 return -ENOMEM;
7276 set_extent_buffer_uptodate(sb);
7277
7278 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7279 array_size = btrfs_super_sys_array_size(super_copy);
7280
7281 array_ptr = super_copy->sys_chunk_array;
7282 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7283 cur_offset = 0;
7284
7285 while (cur_offset < array_size) {
7286 disk_key = (struct btrfs_disk_key *)array_ptr;
7287 len = sizeof(*disk_key);
7288 if (cur_offset + len > array_size)
7289 goto out_short_read;
7290
7291 btrfs_disk_key_to_cpu(&key, disk_key);
7292
7293 array_ptr += len;
7294 sb_array_offset += len;
7295 cur_offset += len;
7296
7297 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7298 btrfs_err(fs_info,
7299 "unexpected item type %u in sys_array at offset %u",
7300 (u32)key.type, cur_offset);
7301 ret = -EIO;
7302 break;
7303 }
7304
7305 chunk = (struct btrfs_chunk *)sb_array_offset;
7306 /*
7307 * At least one btrfs_chunk with one stripe must be present,
7308 * exact stripe count check comes afterwards
7309 */
7310 len = btrfs_chunk_item_size(1);
7311 if (cur_offset + len > array_size)
7312 goto out_short_read;
7313
7314 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7315 if (!num_stripes) {
7316 btrfs_err(fs_info,
7317 "invalid number of stripes %u in sys_array at offset %u",
7318 num_stripes, cur_offset);
7319 ret = -EIO;
7320 break;
7321 }
7322
7323 type = btrfs_chunk_type(sb, chunk);
7324 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7325 btrfs_err(fs_info,
7326 "invalid chunk type %llu in sys_array at offset %u",
7327 type, cur_offset);
7328 ret = -EIO;
7329 break;
7330 }
7331
7332 len = btrfs_chunk_item_size(num_stripes);
7333 if (cur_offset + len > array_size)
7334 goto out_short_read;
7335
7336 ret = read_one_chunk(&key, sb, chunk);
7337 if (ret)
7338 break;
7339
7340 array_ptr += len;
7341 sb_array_offset += len;
7342 cur_offset += len;
7343 }
7344 clear_extent_buffer_uptodate(sb);
7345 free_extent_buffer_stale(sb);
7346 return ret;
7347
7348out_short_read:
7349 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7350 len, cur_offset);
7351 clear_extent_buffer_uptodate(sb);
7352 free_extent_buffer_stale(sb);
7353 return -EIO;
7354}
7355
7356/*
7357 * Check if all chunks in the fs are OK for read-write degraded mount
7358 *
7359 * If the @failing_dev is specified, it's accounted as missing.
7360 *
7361 * Return true if all chunks meet the minimal RW mount requirements.
7362 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7363 */
7364bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7365 struct btrfs_device *failing_dev)
7366{
7367 struct btrfs_chunk_map *map;
7368 u64 next_start;
7369 bool ret = true;
7370
7371 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX);
7372 /* No chunk at all? Return false anyway */
7373 if (!map) {
7374 ret = false;
7375 goto out;
7376 }
7377 while (map) {
7378 int missing = 0;
7379 int max_tolerated;
7380 int i;
7381
7382 max_tolerated =
7383 btrfs_get_num_tolerated_disk_barrier_failures(
7384 map->type);
7385 for (i = 0; i < map->num_stripes; i++) {
7386 struct btrfs_device *dev = map->stripes[i].dev;
7387
7388 if (!dev || !dev->bdev ||
7389 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7390 dev->last_flush_error)
7391 missing++;
7392 else if (failing_dev && failing_dev == dev)
7393 missing++;
7394 }
7395 if (missing > max_tolerated) {
7396 if (!failing_dev)
7397 btrfs_warn(fs_info,
7398 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7399 map->start, missing, max_tolerated);
7400 btrfs_free_chunk_map(map);
7401 ret = false;
7402 goto out;
7403 }
7404 next_start = map->start + map->chunk_len;
7405 btrfs_free_chunk_map(map);
7406
7407 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start);
7408 }
7409out:
7410 return ret;
7411}
7412
7413static void readahead_tree_node_children(struct extent_buffer *node)
7414{
7415 int i;
7416 const int nr_items = btrfs_header_nritems(node);
7417
7418 for (i = 0; i < nr_items; i++)
7419 btrfs_readahead_node_child(node, i);
7420}
7421
7422int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7423{
7424 struct btrfs_root *root = fs_info->chunk_root;
7425 struct btrfs_path *path;
7426 struct extent_buffer *leaf;
7427 struct btrfs_key key;
7428 struct btrfs_key found_key;
7429 int ret;
7430 int slot;
7431 int iter_ret = 0;
7432 u64 total_dev = 0;
7433 u64 last_ra_node = 0;
7434
7435 path = btrfs_alloc_path();
7436 if (!path)
7437 return -ENOMEM;
7438
7439 /*
7440 * uuid_mutex is needed only if we are mounting a sprout FS
7441 * otherwise we don't need it.
7442 */
7443 mutex_lock(&uuid_mutex);
7444
7445 /*
7446 * It is possible for mount and umount to race in such a way that
7447 * we execute this code path, but open_fs_devices failed to clear
7448 * total_rw_bytes. We certainly want it cleared before reading the
7449 * device items, so clear it here.
7450 */
7451 fs_info->fs_devices->total_rw_bytes = 0;
7452
7453 /*
7454 * Lockdep complains about possible circular locking dependency between
7455 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7456 * used for freeze procection of a fs (struct super_block.s_writers),
7457 * which we take when starting a transaction, and extent buffers of the
7458 * chunk tree if we call read_one_dev() while holding a lock on an
7459 * extent buffer of the chunk tree. Since we are mounting the filesystem
7460 * and at this point there can't be any concurrent task modifying the
7461 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7462 */
7463 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7464 path->skip_locking = 1;
7465
7466 /*
7467 * Read all device items, and then all the chunk items. All
7468 * device items are found before any chunk item (their object id
7469 * is smaller than the lowest possible object id for a chunk
7470 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7471 */
7472 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7473 key.offset = 0;
7474 key.type = 0;
7475 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7476 struct extent_buffer *node = path->nodes[1];
7477
7478 leaf = path->nodes[0];
7479 slot = path->slots[0];
7480
7481 if (node) {
7482 if (last_ra_node != node->start) {
7483 readahead_tree_node_children(node);
7484 last_ra_node = node->start;
7485 }
7486 }
7487 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7488 struct btrfs_dev_item *dev_item;
7489 dev_item = btrfs_item_ptr(leaf, slot,
7490 struct btrfs_dev_item);
7491 ret = read_one_dev(leaf, dev_item);
7492 if (ret)
7493 goto error;
7494 total_dev++;
7495 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7496 struct btrfs_chunk *chunk;
7497
7498 /*
7499 * We are only called at mount time, so no need to take
7500 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7501 * we always lock first fs_info->chunk_mutex before
7502 * acquiring any locks on the chunk tree. This is a
7503 * requirement for chunk allocation, see the comment on
7504 * top of btrfs_chunk_alloc() for details.
7505 */
7506 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7507 ret = read_one_chunk(&found_key, leaf, chunk);
7508 if (ret)
7509 goto error;
7510 }
7511 }
7512 /* Catch error found during iteration */
7513 if (iter_ret < 0) {
7514 ret = iter_ret;
7515 goto error;
7516 }
7517
7518 /*
7519 * After loading chunk tree, we've got all device information,
7520 * do another round of validation checks.
7521 */
7522 if (total_dev != fs_info->fs_devices->total_devices) {
7523 btrfs_warn(fs_info,
7524"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7525 btrfs_super_num_devices(fs_info->super_copy),
7526 total_dev);
7527 fs_info->fs_devices->total_devices = total_dev;
7528 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7529 }
7530 if (btrfs_super_total_bytes(fs_info->super_copy) <
7531 fs_info->fs_devices->total_rw_bytes) {
7532 btrfs_err(fs_info,
7533 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7534 btrfs_super_total_bytes(fs_info->super_copy),
7535 fs_info->fs_devices->total_rw_bytes);
7536 ret = -EINVAL;
7537 goto error;
7538 }
7539 ret = 0;
7540error:
7541 mutex_unlock(&uuid_mutex);
7542
7543 btrfs_free_path(path);
7544 return ret;
7545}
7546
7547int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7548{
7549 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7550 struct btrfs_device *device;
7551 int ret = 0;
7552
7553 fs_devices->fs_info = fs_info;
7554
7555 mutex_lock(&fs_devices->device_list_mutex);
7556 list_for_each_entry(device, &fs_devices->devices, dev_list)
7557 device->fs_info = fs_info;
7558
7559 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7560 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7561 device->fs_info = fs_info;
7562 ret = btrfs_get_dev_zone_info(device, false);
7563 if (ret)
7564 break;
7565 }
7566
7567 seed_devs->fs_info = fs_info;
7568 }
7569 mutex_unlock(&fs_devices->device_list_mutex);
7570
7571 return ret;
7572}
7573
7574static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7575 const struct btrfs_dev_stats_item *ptr,
7576 int index)
7577{
7578 u64 val;
7579
7580 read_extent_buffer(eb, &val,
7581 offsetof(struct btrfs_dev_stats_item, values) +
7582 ((unsigned long)ptr) + (index * sizeof(u64)),
7583 sizeof(val));
7584 return val;
7585}
7586
7587static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7588 struct btrfs_dev_stats_item *ptr,
7589 int index, u64 val)
7590{
7591 write_extent_buffer(eb, &val,
7592 offsetof(struct btrfs_dev_stats_item, values) +
7593 ((unsigned long)ptr) + (index * sizeof(u64)),
7594 sizeof(val));
7595}
7596
7597static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7598 struct btrfs_path *path)
7599{
7600 struct btrfs_dev_stats_item *ptr;
7601 struct extent_buffer *eb;
7602 struct btrfs_key key;
7603 int item_size;
7604 int i, ret, slot;
7605
7606 if (!device->fs_info->dev_root)
7607 return 0;
7608
7609 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7610 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7611 key.offset = device->devid;
7612 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7613 if (ret) {
7614 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7615 btrfs_dev_stat_set(device, i, 0);
7616 device->dev_stats_valid = 1;
7617 btrfs_release_path(path);
7618 return ret < 0 ? ret : 0;
7619 }
7620 slot = path->slots[0];
7621 eb = path->nodes[0];
7622 item_size = btrfs_item_size(eb, slot);
7623
7624 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7625
7626 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7627 if (item_size >= (1 + i) * sizeof(__le64))
7628 btrfs_dev_stat_set(device, i,
7629 btrfs_dev_stats_value(eb, ptr, i));
7630 else
7631 btrfs_dev_stat_set(device, i, 0);
7632 }
7633
7634 device->dev_stats_valid = 1;
7635 btrfs_dev_stat_print_on_load(device);
7636 btrfs_release_path(path);
7637
7638 return 0;
7639}
7640
7641int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7642{
7643 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7644 struct btrfs_device *device;
7645 struct btrfs_path *path = NULL;
7646 int ret = 0;
7647
7648 path = btrfs_alloc_path();
7649 if (!path)
7650 return -ENOMEM;
7651
7652 mutex_lock(&fs_devices->device_list_mutex);
7653 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7654 ret = btrfs_device_init_dev_stats(device, path);
7655 if (ret)
7656 goto out;
7657 }
7658 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7659 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7660 ret = btrfs_device_init_dev_stats(device, path);
7661 if (ret)
7662 goto out;
7663 }
7664 }
7665out:
7666 mutex_unlock(&fs_devices->device_list_mutex);
7667
7668 btrfs_free_path(path);
7669 return ret;
7670}
7671
7672static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7673 struct btrfs_device *device)
7674{
7675 struct btrfs_fs_info *fs_info = trans->fs_info;
7676 struct btrfs_root *dev_root = fs_info->dev_root;
7677 struct btrfs_path *path;
7678 struct btrfs_key key;
7679 struct extent_buffer *eb;
7680 struct btrfs_dev_stats_item *ptr;
7681 int ret;
7682 int i;
7683
7684 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7685 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7686 key.offset = device->devid;
7687
7688 path = btrfs_alloc_path();
7689 if (!path)
7690 return -ENOMEM;
7691 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7692 if (ret < 0) {
7693 btrfs_warn_in_rcu(fs_info,
7694 "error %d while searching for dev_stats item for device %s",
7695 ret, btrfs_dev_name(device));
7696 goto out;
7697 }
7698
7699 if (ret == 0 &&
7700 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7701 /* need to delete old one and insert a new one */
7702 ret = btrfs_del_item(trans, dev_root, path);
7703 if (ret != 0) {
7704 btrfs_warn_in_rcu(fs_info,
7705 "delete too small dev_stats item for device %s failed %d",
7706 btrfs_dev_name(device), ret);
7707 goto out;
7708 }
7709 ret = 1;
7710 }
7711
7712 if (ret == 1) {
7713 /* need to insert a new item */
7714 btrfs_release_path(path);
7715 ret = btrfs_insert_empty_item(trans, dev_root, path,
7716 &key, sizeof(*ptr));
7717 if (ret < 0) {
7718 btrfs_warn_in_rcu(fs_info,
7719 "insert dev_stats item for device %s failed %d",
7720 btrfs_dev_name(device), ret);
7721 goto out;
7722 }
7723 }
7724
7725 eb = path->nodes[0];
7726 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7727 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7728 btrfs_set_dev_stats_value(eb, ptr, i,
7729 btrfs_dev_stat_read(device, i));
7730 btrfs_mark_buffer_dirty(trans, eb);
7731
7732out:
7733 btrfs_free_path(path);
7734 return ret;
7735}
7736
7737/*
7738 * called from commit_transaction. Writes all changed device stats to disk.
7739 */
7740int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7741{
7742 struct btrfs_fs_info *fs_info = trans->fs_info;
7743 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7744 struct btrfs_device *device;
7745 int stats_cnt;
7746 int ret = 0;
7747
7748 mutex_lock(&fs_devices->device_list_mutex);
7749 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7750 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7751 if (!device->dev_stats_valid || stats_cnt == 0)
7752 continue;
7753
7754
7755 /*
7756 * There is a LOAD-LOAD control dependency between the value of
7757 * dev_stats_ccnt and updating the on-disk values which requires
7758 * reading the in-memory counters. Such control dependencies
7759 * require explicit read memory barriers.
7760 *
7761 * This memory barriers pairs with smp_mb__before_atomic in
7762 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7763 * barrier implied by atomic_xchg in
7764 * btrfs_dev_stats_read_and_reset
7765 */
7766 smp_rmb();
7767
7768 ret = update_dev_stat_item(trans, device);
7769 if (!ret)
7770 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7771 }
7772 mutex_unlock(&fs_devices->device_list_mutex);
7773
7774 return ret;
7775}
7776
7777void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7778{
7779 btrfs_dev_stat_inc(dev, index);
7780
7781 if (!dev->dev_stats_valid)
7782 return;
7783 btrfs_err_rl_in_rcu(dev->fs_info,
7784 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7785 btrfs_dev_name(dev),
7786 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7787 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7788 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7789 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7790 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7791}
7792
7793static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7794{
7795 int i;
7796
7797 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7798 if (btrfs_dev_stat_read(dev, i) != 0)
7799 break;
7800 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7801 return; /* all values == 0, suppress message */
7802
7803 btrfs_info_in_rcu(dev->fs_info,
7804 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7805 btrfs_dev_name(dev),
7806 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7807 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7808 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7809 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7810 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7811}
7812
7813int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7814 struct btrfs_ioctl_get_dev_stats *stats)
7815{
7816 BTRFS_DEV_LOOKUP_ARGS(args);
7817 struct btrfs_device *dev;
7818 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7819 int i;
7820
7821 mutex_lock(&fs_devices->device_list_mutex);
7822 args.devid = stats->devid;
7823 dev = btrfs_find_device(fs_info->fs_devices, &args);
7824 mutex_unlock(&fs_devices->device_list_mutex);
7825
7826 if (!dev) {
7827 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7828 return -ENODEV;
7829 } else if (!dev->dev_stats_valid) {
7830 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7831 return -ENODEV;
7832 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7833 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7834 if (stats->nr_items > i)
7835 stats->values[i] =
7836 btrfs_dev_stat_read_and_reset(dev, i);
7837 else
7838 btrfs_dev_stat_set(dev, i, 0);
7839 }
7840 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7841 current->comm, task_pid_nr(current));
7842 } else {
7843 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7844 if (stats->nr_items > i)
7845 stats->values[i] = btrfs_dev_stat_read(dev, i);
7846 }
7847 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7848 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7849 return 0;
7850}
7851
7852/*
7853 * Update the size and bytes used for each device where it changed. This is
7854 * delayed since we would otherwise get errors while writing out the
7855 * superblocks.
7856 *
7857 * Must be invoked during transaction commit.
7858 */
7859void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7860{
7861 struct btrfs_device *curr, *next;
7862
7863 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7864
7865 if (list_empty(&trans->dev_update_list))
7866 return;
7867
7868 /*
7869 * We don't need the device_list_mutex here. This list is owned by the
7870 * transaction and the transaction must complete before the device is
7871 * released.
7872 */
7873 mutex_lock(&trans->fs_info->chunk_mutex);
7874 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7875 post_commit_list) {
7876 list_del_init(&curr->post_commit_list);
7877 curr->commit_total_bytes = curr->disk_total_bytes;
7878 curr->commit_bytes_used = curr->bytes_used;
7879 }
7880 mutex_unlock(&trans->fs_info->chunk_mutex);
7881}
7882
7883/*
7884 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7885 */
7886int btrfs_bg_type_to_factor(u64 flags)
7887{
7888 const int index = btrfs_bg_flags_to_raid_index(flags);
7889
7890 return btrfs_raid_array[index].ncopies;
7891}
7892
7893
7894
7895static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7896 u64 chunk_offset, u64 devid,
7897 u64 physical_offset, u64 physical_len)
7898{
7899 struct btrfs_dev_lookup_args args = { .devid = devid };
7900 struct btrfs_chunk_map *map;
7901 struct btrfs_device *dev;
7902 u64 stripe_len;
7903 bool found = false;
7904 int ret = 0;
7905 int i;
7906
7907 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
7908 if (!map) {
7909 btrfs_err(fs_info,
7910"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7911 physical_offset, devid);
7912 ret = -EUCLEAN;
7913 goto out;
7914 }
7915
7916 stripe_len = btrfs_calc_stripe_length(map);
7917 if (physical_len != stripe_len) {
7918 btrfs_err(fs_info,
7919"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7920 physical_offset, devid, map->start, physical_len,
7921 stripe_len);
7922 ret = -EUCLEAN;
7923 goto out;
7924 }
7925
7926 /*
7927 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7928 * space. Although kernel can handle it without problem, better to warn
7929 * the users.
7930 */
7931 if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
7932 btrfs_warn(fs_info,
7933 "devid %llu physical %llu len %llu inside the reserved space",
7934 devid, physical_offset, physical_len);
7935
7936 for (i = 0; i < map->num_stripes; i++) {
7937 if (map->stripes[i].dev->devid == devid &&
7938 map->stripes[i].physical == physical_offset) {
7939 found = true;
7940 if (map->verified_stripes >= map->num_stripes) {
7941 btrfs_err(fs_info,
7942 "too many dev extents for chunk %llu found",
7943 map->start);
7944 ret = -EUCLEAN;
7945 goto out;
7946 }
7947 map->verified_stripes++;
7948 break;
7949 }
7950 }
7951 if (!found) {
7952 btrfs_err(fs_info,
7953 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7954 physical_offset, devid);
7955 ret = -EUCLEAN;
7956 }
7957
7958 /* Make sure no dev extent is beyond device boundary */
7959 dev = btrfs_find_device(fs_info->fs_devices, &args);
7960 if (!dev) {
7961 btrfs_err(fs_info, "failed to find devid %llu", devid);
7962 ret = -EUCLEAN;
7963 goto out;
7964 }
7965
7966 if (physical_offset + physical_len > dev->disk_total_bytes) {
7967 btrfs_err(fs_info,
7968"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7969 devid, physical_offset, physical_len,
7970 dev->disk_total_bytes);
7971 ret = -EUCLEAN;
7972 goto out;
7973 }
7974
7975 if (dev->zone_info) {
7976 u64 zone_size = dev->zone_info->zone_size;
7977
7978 if (!IS_ALIGNED(physical_offset, zone_size) ||
7979 !IS_ALIGNED(physical_len, zone_size)) {
7980 btrfs_err(fs_info,
7981"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7982 devid, physical_offset, physical_len);
7983 ret = -EUCLEAN;
7984 goto out;
7985 }
7986 }
7987
7988out:
7989 btrfs_free_chunk_map(map);
7990 return ret;
7991}
7992
7993static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7994{
7995 struct rb_node *node;
7996 int ret = 0;
7997
7998 read_lock(&fs_info->mapping_tree_lock);
7999 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
8000 struct btrfs_chunk_map *map;
8001
8002 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
8003 if (map->num_stripes != map->verified_stripes) {
8004 btrfs_err(fs_info,
8005 "chunk %llu has missing dev extent, have %d expect %d",
8006 map->start, map->verified_stripes, map->num_stripes);
8007 ret = -EUCLEAN;
8008 goto out;
8009 }
8010 }
8011out:
8012 read_unlock(&fs_info->mapping_tree_lock);
8013 return ret;
8014}
8015
8016/*
8017 * Ensure that all dev extents are mapped to correct chunk, otherwise
8018 * later chunk allocation/free would cause unexpected behavior.
8019 *
8020 * NOTE: This will iterate through the whole device tree, which should be of
8021 * the same size level as the chunk tree. This slightly increases mount time.
8022 */
8023int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8024{
8025 struct btrfs_path *path;
8026 struct btrfs_root *root = fs_info->dev_root;
8027 struct btrfs_key key;
8028 u64 prev_devid = 0;
8029 u64 prev_dev_ext_end = 0;
8030 int ret = 0;
8031
8032 /*
8033 * We don't have a dev_root because we mounted with ignorebadroots and
8034 * failed to load the root, so we want to skip the verification in this
8035 * case for sure.
8036 *
8037 * However if the dev root is fine, but the tree itself is corrupted
8038 * we'd still fail to mount. This verification is only to make sure
8039 * writes can happen safely, so instead just bypass this check
8040 * completely in the case of IGNOREBADROOTS.
8041 */
8042 if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8043 return 0;
8044
8045 key.objectid = 1;
8046 key.type = BTRFS_DEV_EXTENT_KEY;
8047 key.offset = 0;
8048
8049 path = btrfs_alloc_path();
8050 if (!path)
8051 return -ENOMEM;
8052
8053 path->reada = READA_FORWARD;
8054 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8055 if (ret < 0)
8056 goto out;
8057
8058 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8059 ret = btrfs_next_leaf(root, path);
8060 if (ret < 0)
8061 goto out;
8062 /* No dev extents at all? Not good */
8063 if (ret > 0) {
8064 ret = -EUCLEAN;
8065 goto out;
8066 }
8067 }
8068 while (1) {
8069 struct extent_buffer *leaf = path->nodes[0];
8070 struct btrfs_dev_extent *dext;
8071 int slot = path->slots[0];
8072 u64 chunk_offset;
8073 u64 physical_offset;
8074 u64 physical_len;
8075 u64 devid;
8076
8077 btrfs_item_key_to_cpu(leaf, &key, slot);
8078 if (key.type != BTRFS_DEV_EXTENT_KEY)
8079 break;
8080 devid = key.objectid;
8081 physical_offset = key.offset;
8082
8083 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8084 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8085 physical_len = btrfs_dev_extent_length(leaf, dext);
8086
8087 /* Check if this dev extent overlaps with the previous one */
8088 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8089 btrfs_err(fs_info,
8090"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8091 devid, physical_offset, prev_dev_ext_end);
8092 ret = -EUCLEAN;
8093 goto out;
8094 }
8095
8096 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8097 physical_offset, physical_len);
8098 if (ret < 0)
8099 goto out;
8100 prev_devid = devid;
8101 prev_dev_ext_end = physical_offset + physical_len;
8102
8103 ret = btrfs_next_item(root, path);
8104 if (ret < 0)
8105 goto out;
8106 if (ret > 0) {
8107 ret = 0;
8108 break;
8109 }
8110 }
8111
8112 /* Ensure all chunks have corresponding dev extents */
8113 ret = verify_chunk_dev_extent_mapping(fs_info);
8114out:
8115 btrfs_free_path(path);
8116 return ret;
8117}
8118
8119/*
8120 * Check whether the given block group or device is pinned by any inode being
8121 * used as a swapfile.
8122 */
8123bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8124{
8125 struct btrfs_swapfile_pin *sp;
8126 struct rb_node *node;
8127
8128 spin_lock(&fs_info->swapfile_pins_lock);
8129 node = fs_info->swapfile_pins.rb_node;
8130 while (node) {
8131 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8132 if (ptr < sp->ptr)
8133 node = node->rb_left;
8134 else if (ptr > sp->ptr)
8135 node = node->rb_right;
8136 else
8137 break;
8138 }
8139 spin_unlock(&fs_info->swapfile_pins_lock);
8140 return node != NULL;
8141}
8142
8143static int relocating_repair_kthread(void *data)
8144{
8145 struct btrfs_block_group *cache = data;
8146 struct btrfs_fs_info *fs_info = cache->fs_info;
8147 u64 target;
8148 int ret = 0;
8149
8150 target = cache->start;
8151 btrfs_put_block_group(cache);
8152
8153 sb_start_write(fs_info->sb);
8154 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8155 btrfs_info(fs_info,
8156 "zoned: skip relocating block group %llu to repair: EBUSY",
8157 target);
8158 sb_end_write(fs_info->sb);
8159 return -EBUSY;
8160 }
8161
8162 mutex_lock(&fs_info->reclaim_bgs_lock);
8163
8164 /* Ensure block group still exists */
8165 cache = btrfs_lookup_block_group(fs_info, target);
8166 if (!cache)
8167 goto out;
8168
8169 if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
8170 goto out;
8171
8172 ret = btrfs_may_alloc_data_chunk(fs_info, target);
8173 if (ret < 0)
8174 goto out;
8175
8176 btrfs_info(fs_info,
8177 "zoned: relocating block group %llu to repair IO failure",
8178 target);
8179 ret = btrfs_relocate_chunk(fs_info, target);
8180
8181out:
8182 if (cache)
8183 btrfs_put_block_group(cache);
8184 mutex_unlock(&fs_info->reclaim_bgs_lock);
8185 btrfs_exclop_finish(fs_info);
8186 sb_end_write(fs_info->sb);
8187
8188 return ret;
8189}
8190
8191bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8192{
8193 struct btrfs_block_group *cache;
8194
8195 if (!btrfs_is_zoned(fs_info))
8196 return false;
8197
8198 /* Do not attempt to repair in degraded state */
8199 if (btrfs_test_opt(fs_info, DEGRADED))
8200 return true;
8201
8202 cache = btrfs_lookup_block_group(fs_info, logical);
8203 if (!cache)
8204 return true;
8205
8206 if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
8207 btrfs_put_block_group(cache);
8208 return true;
8209 }
8210
8211 kthread_run(relocating_repair_kthread, cache,
8212 "btrfs-relocating-repair");
8213
8214 return true;
8215}
8216
8217static void map_raid56_repair_block(struct btrfs_io_context *bioc,
8218 struct btrfs_io_stripe *smap,
8219 u64 logical)
8220{
8221 int data_stripes = nr_bioc_data_stripes(bioc);
8222 int i;
8223
8224 for (i = 0; i < data_stripes; i++) {
8225 u64 stripe_start = bioc->full_stripe_logical +
8226 btrfs_stripe_nr_to_offset(i);
8227
8228 if (logical >= stripe_start &&
8229 logical < stripe_start + BTRFS_STRIPE_LEN)
8230 break;
8231 }
8232 ASSERT(i < data_stripes);
8233 smap->dev = bioc->stripes[i].dev;
8234 smap->physical = bioc->stripes[i].physical +
8235 ((logical - bioc->full_stripe_logical) &
8236 BTRFS_STRIPE_LEN_MASK);
8237}
8238
8239/*
8240 * Map a repair write into a single device.
8241 *
8242 * A repair write is triggered by read time repair or scrub, which would only
8243 * update the contents of a single device.
8244 * Not update any other mirrors nor go through RMW path.
8245 *
8246 * Callers should ensure:
8247 *
8248 * - Call btrfs_bio_counter_inc_blocked() first
8249 * - The range does not cross stripe boundary
8250 * - Has a valid @mirror_num passed in.
8251 */
8252int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
8253 struct btrfs_io_stripe *smap, u64 logical,
8254 u32 length, int mirror_num)
8255{
8256 struct btrfs_io_context *bioc = NULL;
8257 u64 map_length = length;
8258 int mirror_ret = mirror_num;
8259 int ret;
8260
8261 ASSERT(mirror_num > 0);
8262
8263 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
8264 &bioc, smap, &mirror_ret);
8265 if (ret < 0)
8266 return ret;
8267
8268 /* The map range should not cross stripe boundary. */
8269 ASSERT(map_length >= length);
8270
8271 /* Already mapped to single stripe. */
8272 if (!bioc)
8273 goto out;
8274
8275 /* Map the RAID56 multi-stripe writes to a single one. */
8276 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
8277 map_raid56_repair_block(bioc, smap, logical);
8278 goto out;
8279 }
8280
8281 ASSERT(mirror_num <= bioc->num_stripes);
8282 smap->dev = bioc->stripes[mirror_num - 1].dev;
8283 smap->physical = bioc->stripes[mirror_num - 1].physical;
8284out:
8285 btrfs_put_bioc(bioc);
8286 ASSERT(smap->dev);
8287 return 0;
8288}
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <linux/capability.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include <linux/raid/pq.h>
29#include <linux/semaphore.h>
30#include <asm/div64.h>
31#include "ctree.h"
32#include "extent_map.h"
33#include "disk-io.h"
34#include "transaction.h"
35#include "print-tree.h"
36#include "volumes.h"
37#include "raid56.h"
38#include "async-thread.h"
39#include "check-integrity.h"
40#include "rcu-string.h"
41#include "math.h"
42#include "dev-replace.h"
43
44static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root,
46 struct btrfs_device *device);
47static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
48static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
49static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
50static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
51
52static DEFINE_MUTEX(uuid_mutex);
53static LIST_HEAD(fs_uuids);
54
55static void lock_chunks(struct btrfs_root *root)
56{
57 mutex_lock(&root->fs_info->chunk_mutex);
58}
59
60static void unlock_chunks(struct btrfs_root *root)
61{
62 mutex_unlock(&root->fs_info->chunk_mutex);
63}
64
65static struct btrfs_fs_devices *__alloc_fs_devices(void)
66{
67 struct btrfs_fs_devices *fs_devs;
68
69 fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
70 if (!fs_devs)
71 return ERR_PTR(-ENOMEM);
72
73 mutex_init(&fs_devs->device_list_mutex);
74
75 INIT_LIST_HEAD(&fs_devs->devices);
76 INIT_LIST_HEAD(&fs_devs->alloc_list);
77 INIT_LIST_HEAD(&fs_devs->list);
78
79 return fs_devs;
80}
81
82/**
83 * alloc_fs_devices - allocate struct btrfs_fs_devices
84 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is
85 * generated.
86 *
87 * Return: a pointer to a new &struct btrfs_fs_devices on success;
88 * ERR_PTR() on error. Returned struct is not linked onto any lists and
89 * can be destroyed with kfree() right away.
90 */
91static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
92{
93 struct btrfs_fs_devices *fs_devs;
94
95 fs_devs = __alloc_fs_devices();
96 if (IS_ERR(fs_devs))
97 return fs_devs;
98
99 if (fsid)
100 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
101 else
102 generate_random_uuid(fs_devs->fsid);
103
104 return fs_devs;
105}
106
107static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
108{
109 struct btrfs_device *device;
110 WARN_ON(fs_devices->opened);
111 while (!list_empty(&fs_devices->devices)) {
112 device = list_entry(fs_devices->devices.next,
113 struct btrfs_device, dev_list);
114 list_del(&device->dev_list);
115 rcu_string_free(device->name);
116 kfree(device);
117 }
118 kfree(fs_devices);
119}
120
121static void btrfs_kobject_uevent(struct block_device *bdev,
122 enum kobject_action action)
123{
124 int ret;
125
126 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
127 if (ret)
128 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
129 action,
130 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
131 &disk_to_dev(bdev->bd_disk)->kobj);
132}
133
134void btrfs_cleanup_fs_uuids(void)
135{
136 struct btrfs_fs_devices *fs_devices;
137
138 while (!list_empty(&fs_uuids)) {
139 fs_devices = list_entry(fs_uuids.next,
140 struct btrfs_fs_devices, list);
141 list_del(&fs_devices->list);
142 free_fs_devices(fs_devices);
143 }
144}
145
146static struct btrfs_device *__alloc_device(void)
147{
148 struct btrfs_device *dev;
149
150 dev = kzalloc(sizeof(*dev), GFP_NOFS);
151 if (!dev)
152 return ERR_PTR(-ENOMEM);
153
154 INIT_LIST_HEAD(&dev->dev_list);
155 INIT_LIST_HEAD(&dev->dev_alloc_list);
156
157 spin_lock_init(&dev->io_lock);
158
159 spin_lock_init(&dev->reada_lock);
160 atomic_set(&dev->reada_in_flight, 0);
161 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
162 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
163
164 return dev;
165}
166
167static noinline struct btrfs_device *__find_device(struct list_head *head,
168 u64 devid, u8 *uuid)
169{
170 struct btrfs_device *dev;
171
172 list_for_each_entry(dev, head, dev_list) {
173 if (dev->devid == devid &&
174 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
182{
183 struct btrfs_fs_devices *fs_devices;
184
185 list_for_each_entry(fs_devices, &fs_uuids, list) {
186 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
187 return fs_devices;
188 }
189 return NULL;
190}
191
192static int
193btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
194 int flush, struct block_device **bdev,
195 struct buffer_head **bh)
196{
197 int ret;
198
199 *bdev = blkdev_get_by_path(device_path, flags, holder);
200
201 if (IS_ERR(*bdev)) {
202 ret = PTR_ERR(*bdev);
203 printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
204 goto error;
205 }
206
207 if (flush)
208 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
209 ret = set_blocksize(*bdev, 4096);
210 if (ret) {
211 blkdev_put(*bdev, flags);
212 goto error;
213 }
214 invalidate_bdev(*bdev);
215 *bh = btrfs_read_dev_super(*bdev);
216 if (!*bh) {
217 ret = -EINVAL;
218 blkdev_put(*bdev, flags);
219 goto error;
220 }
221
222 return 0;
223
224error:
225 *bdev = NULL;
226 *bh = NULL;
227 return ret;
228}
229
230static void requeue_list(struct btrfs_pending_bios *pending_bios,
231 struct bio *head, struct bio *tail)
232{
233
234 struct bio *old_head;
235
236 old_head = pending_bios->head;
237 pending_bios->head = head;
238 if (pending_bios->tail)
239 tail->bi_next = old_head;
240 else
241 pending_bios->tail = tail;
242}
243
244/*
245 * we try to collect pending bios for a device so we don't get a large
246 * number of procs sending bios down to the same device. This greatly
247 * improves the schedulers ability to collect and merge the bios.
248 *
249 * But, it also turns into a long list of bios to process and that is sure
250 * to eventually make the worker thread block. The solution here is to
251 * make some progress and then put this work struct back at the end of
252 * the list if the block device is congested. This way, multiple devices
253 * can make progress from a single worker thread.
254 */
255static noinline void run_scheduled_bios(struct btrfs_device *device)
256{
257 struct bio *pending;
258 struct backing_dev_info *bdi;
259 struct btrfs_fs_info *fs_info;
260 struct btrfs_pending_bios *pending_bios;
261 struct bio *tail;
262 struct bio *cur;
263 int again = 0;
264 unsigned long num_run;
265 unsigned long batch_run = 0;
266 unsigned long limit;
267 unsigned long last_waited = 0;
268 int force_reg = 0;
269 int sync_pending = 0;
270 struct blk_plug plug;
271
272 /*
273 * this function runs all the bios we've collected for
274 * a particular device. We don't want to wander off to
275 * another device without first sending all of these down.
276 * So, setup a plug here and finish it off before we return
277 */
278 blk_start_plug(&plug);
279
280 bdi = blk_get_backing_dev_info(device->bdev);
281 fs_info = device->dev_root->fs_info;
282 limit = btrfs_async_submit_limit(fs_info);
283 limit = limit * 2 / 3;
284
285loop:
286 spin_lock(&device->io_lock);
287
288loop_lock:
289 num_run = 0;
290
291 /* take all the bios off the list at once and process them
292 * later on (without the lock held). But, remember the
293 * tail and other pointers so the bios can be properly reinserted
294 * into the list if we hit congestion
295 */
296 if (!force_reg && device->pending_sync_bios.head) {
297 pending_bios = &device->pending_sync_bios;
298 force_reg = 1;
299 } else {
300 pending_bios = &device->pending_bios;
301 force_reg = 0;
302 }
303
304 pending = pending_bios->head;
305 tail = pending_bios->tail;
306 WARN_ON(pending && !tail);
307
308 /*
309 * if pending was null this time around, no bios need processing
310 * at all and we can stop. Otherwise it'll loop back up again
311 * and do an additional check so no bios are missed.
312 *
313 * device->running_pending is used to synchronize with the
314 * schedule_bio code.
315 */
316 if (device->pending_sync_bios.head == NULL &&
317 device->pending_bios.head == NULL) {
318 again = 0;
319 device->running_pending = 0;
320 } else {
321 again = 1;
322 device->running_pending = 1;
323 }
324
325 pending_bios->head = NULL;
326 pending_bios->tail = NULL;
327
328 spin_unlock(&device->io_lock);
329
330 while (pending) {
331
332 rmb();
333 /* we want to work on both lists, but do more bios on the
334 * sync list than the regular list
335 */
336 if ((num_run > 32 &&
337 pending_bios != &device->pending_sync_bios &&
338 device->pending_sync_bios.head) ||
339 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
340 device->pending_bios.head)) {
341 spin_lock(&device->io_lock);
342 requeue_list(pending_bios, pending, tail);
343 goto loop_lock;
344 }
345
346 cur = pending;
347 pending = pending->bi_next;
348 cur->bi_next = NULL;
349
350 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
351 waitqueue_active(&fs_info->async_submit_wait))
352 wake_up(&fs_info->async_submit_wait);
353
354 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
355
356 /*
357 * if we're doing the sync list, record that our
358 * plug has some sync requests on it
359 *
360 * If we're doing the regular list and there are
361 * sync requests sitting around, unplug before
362 * we add more
363 */
364 if (pending_bios == &device->pending_sync_bios) {
365 sync_pending = 1;
366 } else if (sync_pending) {
367 blk_finish_plug(&plug);
368 blk_start_plug(&plug);
369 sync_pending = 0;
370 }
371
372 btrfsic_submit_bio(cur->bi_rw, cur);
373 num_run++;
374 batch_run++;
375 if (need_resched())
376 cond_resched();
377
378 /*
379 * we made progress, there is more work to do and the bdi
380 * is now congested. Back off and let other work structs
381 * run instead
382 */
383 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
384 fs_info->fs_devices->open_devices > 1) {
385 struct io_context *ioc;
386
387 ioc = current->io_context;
388
389 /*
390 * the main goal here is that we don't want to
391 * block if we're going to be able to submit
392 * more requests without blocking.
393 *
394 * This code does two great things, it pokes into
395 * the elevator code from a filesystem _and_
396 * it makes assumptions about how batching works.
397 */
398 if (ioc && ioc->nr_batch_requests > 0 &&
399 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
400 (last_waited == 0 ||
401 ioc->last_waited == last_waited)) {
402 /*
403 * we want to go through our batch of
404 * requests and stop. So, we copy out
405 * the ioc->last_waited time and test
406 * against it before looping
407 */
408 last_waited = ioc->last_waited;
409 if (need_resched())
410 cond_resched();
411 continue;
412 }
413 spin_lock(&device->io_lock);
414 requeue_list(pending_bios, pending, tail);
415 device->running_pending = 1;
416
417 spin_unlock(&device->io_lock);
418 btrfs_queue_work(fs_info->submit_workers,
419 &device->work);
420 goto done;
421 }
422 /* unplug every 64 requests just for good measure */
423 if (batch_run % 64 == 0) {
424 blk_finish_plug(&plug);
425 blk_start_plug(&plug);
426 sync_pending = 0;
427 }
428 }
429
430 cond_resched();
431 if (again)
432 goto loop;
433
434 spin_lock(&device->io_lock);
435 if (device->pending_bios.head || device->pending_sync_bios.head)
436 goto loop_lock;
437 spin_unlock(&device->io_lock);
438
439done:
440 blk_finish_plug(&plug);
441}
442
443static void pending_bios_fn(struct btrfs_work *work)
444{
445 struct btrfs_device *device;
446
447 device = container_of(work, struct btrfs_device, work);
448 run_scheduled_bios(device);
449}
450
451/*
452 * Add new device to list of registered devices
453 *
454 * Returns:
455 * 1 - first time device is seen
456 * 0 - device already known
457 * < 0 - error
458 */
459static noinline int device_list_add(const char *path,
460 struct btrfs_super_block *disk_super,
461 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
462{
463 struct btrfs_device *device;
464 struct btrfs_fs_devices *fs_devices;
465 struct rcu_string *name;
466 int ret = 0;
467 u64 found_transid = btrfs_super_generation(disk_super);
468
469 fs_devices = find_fsid(disk_super->fsid);
470 if (!fs_devices) {
471 fs_devices = alloc_fs_devices(disk_super->fsid);
472 if (IS_ERR(fs_devices))
473 return PTR_ERR(fs_devices);
474
475 list_add(&fs_devices->list, &fs_uuids);
476 fs_devices->latest_devid = devid;
477 fs_devices->latest_trans = found_transid;
478
479 device = NULL;
480 } else {
481 device = __find_device(&fs_devices->devices, devid,
482 disk_super->dev_item.uuid);
483 }
484 if (!device) {
485 if (fs_devices->opened)
486 return -EBUSY;
487
488 device = btrfs_alloc_device(NULL, &devid,
489 disk_super->dev_item.uuid);
490 if (IS_ERR(device)) {
491 /* we can safely leave the fs_devices entry around */
492 return PTR_ERR(device);
493 }
494
495 name = rcu_string_strdup(path, GFP_NOFS);
496 if (!name) {
497 kfree(device);
498 return -ENOMEM;
499 }
500 rcu_assign_pointer(device->name, name);
501
502 mutex_lock(&fs_devices->device_list_mutex);
503 list_add_rcu(&device->dev_list, &fs_devices->devices);
504 fs_devices->num_devices++;
505 mutex_unlock(&fs_devices->device_list_mutex);
506
507 ret = 1;
508 device->fs_devices = fs_devices;
509 } else if (!device->name || strcmp(device->name->str, path)) {
510 name = rcu_string_strdup(path, GFP_NOFS);
511 if (!name)
512 return -ENOMEM;
513 rcu_string_free(device->name);
514 rcu_assign_pointer(device->name, name);
515 if (device->missing) {
516 fs_devices->missing_devices--;
517 device->missing = 0;
518 }
519 }
520
521 if (found_transid > fs_devices->latest_trans) {
522 fs_devices->latest_devid = devid;
523 fs_devices->latest_trans = found_transid;
524 }
525 *fs_devices_ret = fs_devices;
526
527 return ret;
528}
529
530static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
531{
532 struct btrfs_fs_devices *fs_devices;
533 struct btrfs_device *device;
534 struct btrfs_device *orig_dev;
535
536 fs_devices = alloc_fs_devices(orig->fsid);
537 if (IS_ERR(fs_devices))
538 return fs_devices;
539
540 fs_devices->latest_devid = orig->latest_devid;
541 fs_devices->latest_trans = orig->latest_trans;
542 fs_devices->total_devices = orig->total_devices;
543
544 /* We have held the volume lock, it is safe to get the devices. */
545 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
546 struct rcu_string *name;
547
548 device = btrfs_alloc_device(NULL, &orig_dev->devid,
549 orig_dev->uuid);
550 if (IS_ERR(device))
551 goto error;
552
553 /*
554 * This is ok to do without rcu read locked because we hold the
555 * uuid mutex so nothing we touch in here is going to disappear.
556 */
557 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
558 if (!name) {
559 kfree(device);
560 goto error;
561 }
562 rcu_assign_pointer(device->name, name);
563
564 list_add(&device->dev_list, &fs_devices->devices);
565 device->fs_devices = fs_devices;
566 fs_devices->num_devices++;
567 }
568 return fs_devices;
569error:
570 free_fs_devices(fs_devices);
571 return ERR_PTR(-ENOMEM);
572}
573
574void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
575 struct btrfs_fs_devices *fs_devices, int step)
576{
577 struct btrfs_device *device, *next;
578
579 struct block_device *latest_bdev = NULL;
580 u64 latest_devid = 0;
581 u64 latest_transid = 0;
582
583 mutex_lock(&uuid_mutex);
584again:
585 /* This is the initialized path, it is safe to release the devices. */
586 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
587 if (device->in_fs_metadata) {
588 if (!device->is_tgtdev_for_dev_replace &&
589 (!latest_transid ||
590 device->generation > latest_transid)) {
591 latest_devid = device->devid;
592 latest_transid = device->generation;
593 latest_bdev = device->bdev;
594 }
595 continue;
596 }
597
598 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
599 /*
600 * In the first step, keep the device which has
601 * the correct fsid and the devid that is used
602 * for the dev_replace procedure.
603 * In the second step, the dev_replace state is
604 * read from the device tree and it is known
605 * whether the procedure is really active or
606 * not, which means whether this device is
607 * used or whether it should be removed.
608 */
609 if (step == 0 || device->is_tgtdev_for_dev_replace) {
610 continue;
611 }
612 }
613 if (device->bdev) {
614 blkdev_put(device->bdev, device->mode);
615 device->bdev = NULL;
616 fs_devices->open_devices--;
617 }
618 if (device->writeable) {
619 list_del_init(&device->dev_alloc_list);
620 device->writeable = 0;
621 if (!device->is_tgtdev_for_dev_replace)
622 fs_devices->rw_devices--;
623 }
624 list_del_init(&device->dev_list);
625 fs_devices->num_devices--;
626 rcu_string_free(device->name);
627 kfree(device);
628 }
629
630 if (fs_devices->seed) {
631 fs_devices = fs_devices->seed;
632 goto again;
633 }
634
635 fs_devices->latest_bdev = latest_bdev;
636 fs_devices->latest_devid = latest_devid;
637 fs_devices->latest_trans = latest_transid;
638
639 mutex_unlock(&uuid_mutex);
640}
641
642static void __free_device(struct work_struct *work)
643{
644 struct btrfs_device *device;
645
646 device = container_of(work, struct btrfs_device, rcu_work);
647
648 if (device->bdev)
649 blkdev_put(device->bdev, device->mode);
650
651 rcu_string_free(device->name);
652 kfree(device);
653}
654
655static void free_device(struct rcu_head *head)
656{
657 struct btrfs_device *device;
658
659 device = container_of(head, struct btrfs_device, rcu);
660
661 INIT_WORK(&device->rcu_work, __free_device);
662 schedule_work(&device->rcu_work);
663}
664
665static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
666{
667 struct btrfs_device *device;
668
669 if (--fs_devices->opened > 0)
670 return 0;
671
672 mutex_lock(&fs_devices->device_list_mutex);
673 list_for_each_entry(device, &fs_devices->devices, dev_list) {
674 struct btrfs_device *new_device;
675 struct rcu_string *name;
676
677 if (device->bdev)
678 fs_devices->open_devices--;
679
680 if (device->writeable &&
681 device->devid != BTRFS_DEV_REPLACE_DEVID) {
682 list_del_init(&device->dev_alloc_list);
683 fs_devices->rw_devices--;
684 }
685
686 if (device->can_discard)
687 fs_devices->num_can_discard--;
688 if (device->missing)
689 fs_devices->missing_devices--;
690
691 new_device = btrfs_alloc_device(NULL, &device->devid,
692 device->uuid);
693 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
694
695 /* Safe because we are under uuid_mutex */
696 if (device->name) {
697 name = rcu_string_strdup(device->name->str, GFP_NOFS);
698 BUG_ON(!name); /* -ENOMEM */
699 rcu_assign_pointer(new_device->name, name);
700 }
701
702 list_replace_rcu(&device->dev_list, &new_device->dev_list);
703 new_device->fs_devices = device->fs_devices;
704
705 call_rcu(&device->rcu, free_device);
706 }
707 mutex_unlock(&fs_devices->device_list_mutex);
708
709 WARN_ON(fs_devices->open_devices);
710 WARN_ON(fs_devices->rw_devices);
711 fs_devices->opened = 0;
712 fs_devices->seeding = 0;
713
714 return 0;
715}
716
717int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
718{
719 struct btrfs_fs_devices *seed_devices = NULL;
720 int ret;
721
722 mutex_lock(&uuid_mutex);
723 ret = __btrfs_close_devices(fs_devices);
724 if (!fs_devices->opened) {
725 seed_devices = fs_devices->seed;
726 fs_devices->seed = NULL;
727 }
728 mutex_unlock(&uuid_mutex);
729
730 while (seed_devices) {
731 fs_devices = seed_devices;
732 seed_devices = fs_devices->seed;
733 __btrfs_close_devices(fs_devices);
734 free_fs_devices(fs_devices);
735 }
736 /*
737 * Wait for rcu kworkers under __btrfs_close_devices
738 * to finish all blkdev_puts so device is really
739 * free when umount is done.
740 */
741 rcu_barrier();
742 return ret;
743}
744
745static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
746 fmode_t flags, void *holder)
747{
748 struct request_queue *q;
749 struct block_device *bdev;
750 struct list_head *head = &fs_devices->devices;
751 struct btrfs_device *device;
752 struct block_device *latest_bdev = NULL;
753 struct buffer_head *bh;
754 struct btrfs_super_block *disk_super;
755 u64 latest_devid = 0;
756 u64 latest_transid = 0;
757 u64 devid;
758 int seeding = 1;
759 int ret = 0;
760
761 flags |= FMODE_EXCL;
762
763 list_for_each_entry(device, head, dev_list) {
764 if (device->bdev)
765 continue;
766 if (!device->name)
767 continue;
768
769 /* Just open everything we can; ignore failures here */
770 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
771 &bdev, &bh))
772 continue;
773
774 disk_super = (struct btrfs_super_block *)bh->b_data;
775 devid = btrfs_stack_device_id(&disk_super->dev_item);
776 if (devid != device->devid)
777 goto error_brelse;
778
779 if (memcmp(device->uuid, disk_super->dev_item.uuid,
780 BTRFS_UUID_SIZE))
781 goto error_brelse;
782
783 device->generation = btrfs_super_generation(disk_super);
784 if (!latest_transid || device->generation > latest_transid) {
785 latest_devid = devid;
786 latest_transid = device->generation;
787 latest_bdev = bdev;
788 }
789
790 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
791 device->writeable = 0;
792 } else {
793 device->writeable = !bdev_read_only(bdev);
794 seeding = 0;
795 }
796
797 q = bdev_get_queue(bdev);
798 if (blk_queue_discard(q)) {
799 device->can_discard = 1;
800 fs_devices->num_can_discard++;
801 }
802
803 device->bdev = bdev;
804 device->in_fs_metadata = 0;
805 device->mode = flags;
806
807 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
808 fs_devices->rotating = 1;
809
810 fs_devices->open_devices++;
811 if (device->writeable &&
812 device->devid != BTRFS_DEV_REPLACE_DEVID) {
813 fs_devices->rw_devices++;
814 list_add(&device->dev_alloc_list,
815 &fs_devices->alloc_list);
816 }
817 brelse(bh);
818 continue;
819
820error_brelse:
821 brelse(bh);
822 blkdev_put(bdev, flags);
823 continue;
824 }
825 if (fs_devices->open_devices == 0) {
826 ret = -EINVAL;
827 goto out;
828 }
829 fs_devices->seeding = seeding;
830 fs_devices->opened = 1;
831 fs_devices->latest_bdev = latest_bdev;
832 fs_devices->latest_devid = latest_devid;
833 fs_devices->latest_trans = latest_transid;
834 fs_devices->total_rw_bytes = 0;
835out:
836 return ret;
837}
838
839int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
840 fmode_t flags, void *holder)
841{
842 int ret;
843
844 mutex_lock(&uuid_mutex);
845 if (fs_devices->opened) {
846 fs_devices->opened++;
847 ret = 0;
848 } else {
849 ret = __btrfs_open_devices(fs_devices, flags, holder);
850 }
851 mutex_unlock(&uuid_mutex);
852 return ret;
853}
854
855/*
856 * Look for a btrfs signature on a device. This may be called out of the mount path
857 * and we are not allowed to call set_blocksize during the scan. The superblock
858 * is read via pagecache
859 */
860int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
861 struct btrfs_fs_devices **fs_devices_ret)
862{
863 struct btrfs_super_block *disk_super;
864 struct block_device *bdev;
865 struct page *page;
866 void *p;
867 int ret = -EINVAL;
868 u64 devid;
869 u64 transid;
870 u64 total_devices;
871 u64 bytenr;
872 pgoff_t index;
873
874 /*
875 * we would like to check all the supers, but that would make
876 * a btrfs mount succeed after a mkfs from a different FS.
877 * So, we need to add a special mount option to scan for
878 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
879 */
880 bytenr = btrfs_sb_offset(0);
881 flags |= FMODE_EXCL;
882 mutex_lock(&uuid_mutex);
883
884 bdev = blkdev_get_by_path(path, flags, holder);
885
886 if (IS_ERR(bdev)) {
887 ret = PTR_ERR(bdev);
888 goto error;
889 }
890
891 /* make sure our super fits in the device */
892 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
893 goto error_bdev_put;
894
895 /* make sure our super fits in the page */
896 if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
897 goto error_bdev_put;
898
899 /* make sure our super doesn't straddle pages on disk */
900 index = bytenr >> PAGE_CACHE_SHIFT;
901 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
902 goto error_bdev_put;
903
904 /* pull in the page with our super */
905 page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
906 index, GFP_NOFS);
907
908 if (IS_ERR_OR_NULL(page))
909 goto error_bdev_put;
910
911 p = kmap(page);
912
913 /* align our pointer to the offset of the super block */
914 disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
915
916 if (btrfs_super_bytenr(disk_super) != bytenr ||
917 btrfs_super_magic(disk_super) != BTRFS_MAGIC)
918 goto error_unmap;
919
920 devid = btrfs_stack_device_id(&disk_super->dev_item);
921 transid = btrfs_super_generation(disk_super);
922 total_devices = btrfs_super_num_devices(disk_super);
923
924 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
925 if (ret > 0) {
926 if (disk_super->label[0]) {
927 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
928 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
929 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
930 } else {
931 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
932 }
933
934 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
935 ret = 0;
936 }
937 if (!ret && fs_devices_ret)
938 (*fs_devices_ret)->total_devices = total_devices;
939
940error_unmap:
941 kunmap(page);
942 page_cache_release(page);
943
944error_bdev_put:
945 blkdev_put(bdev, flags);
946error:
947 mutex_unlock(&uuid_mutex);
948 return ret;
949}
950
951/* helper to account the used device space in the range */
952int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
953 u64 end, u64 *length)
954{
955 struct btrfs_key key;
956 struct btrfs_root *root = device->dev_root;
957 struct btrfs_dev_extent *dev_extent;
958 struct btrfs_path *path;
959 u64 extent_end;
960 int ret;
961 int slot;
962 struct extent_buffer *l;
963
964 *length = 0;
965
966 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
967 return 0;
968
969 path = btrfs_alloc_path();
970 if (!path)
971 return -ENOMEM;
972 path->reada = 2;
973
974 key.objectid = device->devid;
975 key.offset = start;
976 key.type = BTRFS_DEV_EXTENT_KEY;
977
978 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
979 if (ret < 0)
980 goto out;
981 if (ret > 0) {
982 ret = btrfs_previous_item(root, path, key.objectid, key.type);
983 if (ret < 0)
984 goto out;
985 }
986
987 while (1) {
988 l = path->nodes[0];
989 slot = path->slots[0];
990 if (slot >= btrfs_header_nritems(l)) {
991 ret = btrfs_next_leaf(root, path);
992 if (ret == 0)
993 continue;
994 if (ret < 0)
995 goto out;
996
997 break;
998 }
999 btrfs_item_key_to_cpu(l, &key, slot);
1000
1001 if (key.objectid < device->devid)
1002 goto next;
1003
1004 if (key.objectid > device->devid)
1005 break;
1006
1007 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1008 goto next;
1009
1010 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1011 extent_end = key.offset + btrfs_dev_extent_length(l,
1012 dev_extent);
1013 if (key.offset <= start && extent_end > end) {
1014 *length = end - start + 1;
1015 break;
1016 } else if (key.offset <= start && extent_end > start)
1017 *length += extent_end - start;
1018 else if (key.offset > start && extent_end <= end)
1019 *length += extent_end - key.offset;
1020 else if (key.offset > start && key.offset <= end) {
1021 *length += end - key.offset + 1;
1022 break;
1023 } else if (key.offset > end)
1024 break;
1025
1026next:
1027 path->slots[0]++;
1028 }
1029 ret = 0;
1030out:
1031 btrfs_free_path(path);
1032 return ret;
1033}
1034
1035static int contains_pending_extent(struct btrfs_trans_handle *trans,
1036 struct btrfs_device *device,
1037 u64 *start, u64 len)
1038{
1039 struct extent_map *em;
1040 int ret = 0;
1041
1042 list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1043 struct map_lookup *map;
1044 int i;
1045
1046 map = (struct map_lookup *)em->bdev;
1047 for (i = 0; i < map->num_stripes; i++) {
1048 if (map->stripes[i].dev != device)
1049 continue;
1050 if (map->stripes[i].physical >= *start + len ||
1051 map->stripes[i].physical + em->orig_block_len <=
1052 *start)
1053 continue;
1054 *start = map->stripes[i].physical +
1055 em->orig_block_len;
1056 ret = 1;
1057 }
1058 }
1059
1060 return ret;
1061}
1062
1063
1064/*
1065 * find_free_dev_extent - find free space in the specified device
1066 * @device: the device which we search the free space in
1067 * @num_bytes: the size of the free space that we need
1068 * @start: store the start of the free space.
1069 * @len: the size of the free space. that we find, or the size of the max
1070 * free space if we don't find suitable free space
1071 *
1072 * this uses a pretty simple search, the expectation is that it is
1073 * called very infrequently and that a given device has a small number
1074 * of extents
1075 *
1076 * @start is used to store the start of the free space if we find. But if we
1077 * don't find suitable free space, it will be used to store the start position
1078 * of the max free space.
1079 *
1080 * @len is used to store the size of the free space that we find.
1081 * But if we don't find suitable free space, it is used to store the size of
1082 * the max free space.
1083 */
1084int find_free_dev_extent(struct btrfs_trans_handle *trans,
1085 struct btrfs_device *device, u64 num_bytes,
1086 u64 *start, u64 *len)
1087{
1088 struct btrfs_key key;
1089 struct btrfs_root *root = device->dev_root;
1090 struct btrfs_dev_extent *dev_extent;
1091 struct btrfs_path *path;
1092 u64 hole_size;
1093 u64 max_hole_start;
1094 u64 max_hole_size;
1095 u64 extent_end;
1096 u64 search_start;
1097 u64 search_end = device->total_bytes;
1098 int ret;
1099 int slot;
1100 struct extent_buffer *l;
1101
1102 /* FIXME use last free of some kind */
1103
1104 /* we don't want to overwrite the superblock on the drive,
1105 * so we make sure to start at an offset of at least 1MB
1106 */
1107 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1108
1109 path = btrfs_alloc_path();
1110 if (!path)
1111 return -ENOMEM;
1112again:
1113 max_hole_start = search_start;
1114 max_hole_size = 0;
1115 hole_size = 0;
1116
1117 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1118 ret = -ENOSPC;
1119 goto out;
1120 }
1121
1122 path->reada = 2;
1123 path->search_commit_root = 1;
1124 path->skip_locking = 1;
1125
1126 key.objectid = device->devid;
1127 key.offset = search_start;
1128 key.type = BTRFS_DEV_EXTENT_KEY;
1129
1130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1131 if (ret < 0)
1132 goto out;
1133 if (ret > 0) {
1134 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1135 if (ret < 0)
1136 goto out;
1137 }
1138
1139 while (1) {
1140 l = path->nodes[0];
1141 slot = path->slots[0];
1142 if (slot >= btrfs_header_nritems(l)) {
1143 ret = btrfs_next_leaf(root, path);
1144 if (ret == 0)
1145 continue;
1146 if (ret < 0)
1147 goto out;
1148
1149 break;
1150 }
1151 btrfs_item_key_to_cpu(l, &key, slot);
1152
1153 if (key.objectid < device->devid)
1154 goto next;
1155
1156 if (key.objectid > device->devid)
1157 break;
1158
1159 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1160 goto next;
1161
1162 if (key.offset > search_start) {
1163 hole_size = key.offset - search_start;
1164
1165 /*
1166 * Have to check before we set max_hole_start, otherwise
1167 * we could end up sending back this offset anyway.
1168 */
1169 if (contains_pending_extent(trans, device,
1170 &search_start,
1171 hole_size))
1172 hole_size = 0;
1173
1174 if (hole_size > max_hole_size) {
1175 max_hole_start = search_start;
1176 max_hole_size = hole_size;
1177 }
1178
1179 /*
1180 * If this free space is greater than which we need,
1181 * it must be the max free space that we have found
1182 * until now, so max_hole_start must point to the start
1183 * of this free space and the length of this free space
1184 * is stored in max_hole_size. Thus, we return
1185 * max_hole_start and max_hole_size and go back to the
1186 * caller.
1187 */
1188 if (hole_size >= num_bytes) {
1189 ret = 0;
1190 goto out;
1191 }
1192 }
1193
1194 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1195 extent_end = key.offset + btrfs_dev_extent_length(l,
1196 dev_extent);
1197 if (extent_end > search_start)
1198 search_start = extent_end;
1199next:
1200 path->slots[0]++;
1201 cond_resched();
1202 }
1203
1204 /*
1205 * At this point, search_start should be the end of
1206 * allocated dev extents, and when shrinking the device,
1207 * search_end may be smaller than search_start.
1208 */
1209 if (search_end > search_start)
1210 hole_size = search_end - search_start;
1211
1212 if (hole_size > max_hole_size) {
1213 max_hole_start = search_start;
1214 max_hole_size = hole_size;
1215 }
1216
1217 if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1218 btrfs_release_path(path);
1219 goto again;
1220 }
1221
1222 /* See above. */
1223 if (hole_size < num_bytes)
1224 ret = -ENOSPC;
1225 else
1226 ret = 0;
1227
1228out:
1229 btrfs_free_path(path);
1230 *start = max_hole_start;
1231 if (len)
1232 *len = max_hole_size;
1233 return ret;
1234}
1235
1236static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1237 struct btrfs_device *device,
1238 u64 start)
1239{
1240 int ret;
1241 struct btrfs_path *path;
1242 struct btrfs_root *root = device->dev_root;
1243 struct btrfs_key key;
1244 struct btrfs_key found_key;
1245 struct extent_buffer *leaf = NULL;
1246 struct btrfs_dev_extent *extent = NULL;
1247
1248 path = btrfs_alloc_path();
1249 if (!path)
1250 return -ENOMEM;
1251
1252 key.objectid = device->devid;
1253 key.offset = start;
1254 key.type = BTRFS_DEV_EXTENT_KEY;
1255again:
1256 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1257 if (ret > 0) {
1258 ret = btrfs_previous_item(root, path, key.objectid,
1259 BTRFS_DEV_EXTENT_KEY);
1260 if (ret)
1261 goto out;
1262 leaf = path->nodes[0];
1263 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1264 extent = btrfs_item_ptr(leaf, path->slots[0],
1265 struct btrfs_dev_extent);
1266 BUG_ON(found_key.offset > start || found_key.offset +
1267 btrfs_dev_extent_length(leaf, extent) < start);
1268 key = found_key;
1269 btrfs_release_path(path);
1270 goto again;
1271 } else if (ret == 0) {
1272 leaf = path->nodes[0];
1273 extent = btrfs_item_ptr(leaf, path->slots[0],
1274 struct btrfs_dev_extent);
1275 } else {
1276 btrfs_error(root->fs_info, ret, "Slot search failed");
1277 goto out;
1278 }
1279
1280 if (device->bytes_used > 0) {
1281 u64 len = btrfs_dev_extent_length(leaf, extent);
1282 device->bytes_used -= len;
1283 spin_lock(&root->fs_info->free_chunk_lock);
1284 root->fs_info->free_chunk_space += len;
1285 spin_unlock(&root->fs_info->free_chunk_lock);
1286 }
1287 ret = btrfs_del_item(trans, root, path);
1288 if (ret) {
1289 btrfs_error(root->fs_info, ret,
1290 "Failed to remove dev extent item");
1291 }
1292out:
1293 btrfs_free_path(path);
1294 return ret;
1295}
1296
1297static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1298 struct btrfs_device *device,
1299 u64 chunk_tree, u64 chunk_objectid,
1300 u64 chunk_offset, u64 start, u64 num_bytes)
1301{
1302 int ret;
1303 struct btrfs_path *path;
1304 struct btrfs_root *root = device->dev_root;
1305 struct btrfs_dev_extent *extent;
1306 struct extent_buffer *leaf;
1307 struct btrfs_key key;
1308
1309 WARN_ON(!device->in_fs_metadata);
1310 WARN_ON(device->is_tgtdev_for_dev_replace);
1311 path = btrfs_alloc_path();
1312 if (!path)
1313 return -ENOMEM;
1314
1315 key.objectid = device->devid;
1316 key.offset = start;
1317 key.type = BTRFS_DEV_EXTENT_KEY;
1318 ret = btrfs_insert_empty_item(trans, root, path, &key,
1319 sizeof(*extent));
1320 if (ret)
1321 goto out;
1322
1323 leaf = path->nodes[0];
1324 extent = btrfs_item_ptr(leaf, path->slots[0],
1325 struct btrfs_dev_extent);
1326 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1327 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1328 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1329
1330 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1331 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1332
1333 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1334 btrfs_mark_buffer_dirty(leaf);
1335out:
1336 btrfs_free_path(path);
1337 return ret;
1338}
1339
1340static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1341{
1342 struct extent_map_tree *em_tree;
1343 struct extent_map *em;
1344 struct rb_node *n;
1345 u64 ret = 0;
1346
1347 em_tree = &fs_info->mapping_tree.map_tree;
1348 read_lock(&em_tree->lock);
1349 n = rb_last(&em_tree->map);
1350 if (n) {
1351 em = rb_entry(n, struct extent_map, rb_node);
1352 ret = em->start + em->len;
1353 }
1354 read_unlock(&em_tree->lock);
1355
1356 return ret;
1357}
1358
1359static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1360 u64 *devid_ret)
1361{
1362 int ret;
1363 struct btrfs_key key;
1364 struct btrfs_key found_key;
1365 struct btrfs_path *path;
1366
1367 path = btrfs_alloc_path();
1368 if (!path)
1369 return -ENOMEM;
1370
1371 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1372 key.type = BTRFS_DEV_ITEM_KEY;
1373 key.offset = (u64)-1;
1374
1375 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1376 if (ret < 0)
1377 goto error;
1378
1379 BUG_ON(ret == 0); /* Corruption */
1380
1381 ret = btrfs_previous_item(fs_info->chunk_root, path,
1382 BTRFS_DEV_ITEMS_OBJECTID,
1383 BTRFS_DEV_ITEM_KEY);
1384 if (ret) {
1385 *devid_ret = 1;
1386 } else {
1387 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1388 path->slots[0]);
1389 *devid_ret = found_key.offset + 1;
1390 }
1391 ret = 0;
1392error:
1393 btrfs_free_path(path);
1394 return ret;
1395}
1396
1397/*
1398 * the device information is stored in the chunk root
1399 * the btrfs_device struct should be fully filled in
1400 */
1401static int btrfs_add_device(struct btrfs_trans_handle *trans,
1402 struct btrfs_root *root,
1403 struct btrfs_device *device)
1404{
1405 int ret;
1406 struct btrfs_path *path;
1407 struct btrfs_dev_item *dev_item;
1408 struct extent_buffer *leaf;
1409 struct btrfs_key key;
1410 unsigned long ptr;
1411
1412 root = root->fs_info->chunk_root;
1413
1414 path = btrfs_alloc_path();
1415 if (!path)
1416 return -ENOMEM;
1417
1418 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1419 key.type = BTRFS_DEV_ITEM_KEY;
1420 key.offset = device->devid;
1421
1422 ret = btrfs_insert_empty_item(trans, root, path, &key,
1423 sizeof(*dev_item));
1424 if (ret)
1425 goto out;
1426
1427 leaf = path->nodes[0];
1428 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1429
1430 btrfs_set_device_id(leaf, dev_item, device->devid);
1431 btrfs_set_device_generation(leaf, dev_item, 0);
1432 btrfs_set_device_type(leaf, dev_item, device->type);
1433 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1434 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1435 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1436 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1437 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1438 btrfs_set_device_group(leaf, dev_item, 0);
1439 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1440 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1441 btrfs_set_device_start_offset(leaf, dev_item, 0);
1442
1443 ptr = btrfs_device_uuid(dev_item);
1444 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1445 ptr = btrfs_device_fsid(dev_item);
1446 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1447 btrfs_mark_buffer_dirty(leaf);
1448
1449 ret = 0;
1450out:
1451 btrfs_free_path(path);
1452 return ret;
1453}
1454
1455static int btrfs_rm_dev_item(struct btrfs_root *root,
1456 struct btrfs_device *device)
1457{
1458 int ret;
1459 struct btrfs_path *path;
1460 struct btrfs_key key;
1461 struct btrfs_trans_handle *trans;
1462
1463 root = root->fs_info->chunk_root;
1464
1465 path = btrfs_alloc_path();
1466 if (!path)
1467 return -ENOMEM;
1468
1469 trans = btrfs_start_transaction(root, 0);
1470 if (IS_ERR(trans)) {
1471 btrfs_free_path(path);
1472 return PTR_ERR(trans);
1473 }
1474 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1475 key.type = BTRFS_DEV_ITEM_KEY;
1476 key.offset = device->devid;
1477 lock_chunks(root);
1478
1479 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1480 if (ret < 0)
1481 goto out;
1482
1483 if (ret > 0) {
1484 ret = -ENOENT;
1485 goto out;
1486 }
1487
1488 ret = btrfs_del_item(trans, root, path);
1489 if (ret)
1490 goto out;
1491out:
1492 btrfs_free_path(path);
1493 unlock_chunks(root);
1494 btrfs_commit_transaction(trans, root);
1495 return ret;
1496}
1497
1498int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1499{
1500 struct btrfs_device *device;
1501 struct btrfs_device *next_device;
1502 struct block_device *bdev;
1503 struct buffer_head *bh = NULL;
1504 struct btrfs_super_block *disk_super;
1505 struct btrfs_fs_devices *cur_devices;
1506 u64 all_avail;
1507 u64 devid;
1508 u64 num_devices;
1509 u8 *dev_uuid;
1510 unsigned seq;
1511 int ret = 0;
1512 bool clear_super = false;
1513
1514 mutex_lock(&uuid_mutex);
1515
1516 do {
1517 seq = read_seqbegin(&root->fs_info->profiles_lock);
1518
1519 all_avail = root->fs_info->avail_data_alloc_bits |
1520 root->fs_info->avail_system_alloc_bits |
1521 root->fs_info->avail_metadata_alloc_bits;
1522 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1523
1524 num_devices = root->fs_info->fs_devices->num_devices;
1525 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1526 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1527 WARN_ON(num_devices < 1);
1528 num_devices--;
1529 }
1530 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1531
1532 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1533 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1534 goto out;
1535 }
1536
1537 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1538 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1539 goto out;
1540 }
1541
1542 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1543 root->fs_info->fs_devices->rw_devices <= 2) {
1544 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1545 goto out;
1546 }
1547 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1548 root->fs_info->fs_devices->rw_devices <= 3) {
1549 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1550 goto out;
1551 }
1552
1553 if (strcmp(device_path, "missing") == 0) {
1554 struct list_head *devices;
1555 struct btrfs_device *tmp;
1556
1557 device = NULL;
1558 devices = &root->fs_info->fs_devices->devices;
1559 /*
1560 * It is safe to read the devices since the volume_mutex
1561 * is held.
1562 */
1563 list_for_each_entry(tmp, devices, dev_list) {
1564 if (tmp->in_fs_metadata &&
1565 !tmp->is_tgtdev_for_dev_replace &&
1566 !tmp->bdev) {
1567 device = tmp;
1568 break;
1569 }
1570 }
1571 bdev = NULL;
1572 bh = NULL;
1573 disk_super = NULL;
1574 if (!device) {
1575 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1576 goto out;
1577 }
1578 } else {
1579 ret = btrfs_get_bdev_and_sb(device_path,
1580 FMODE_WRITE | FMODE_EXCL,
1581 root->fs_info->bdev_holder, 0,
1582 &bdev, &bh);
1583 if (ret)
1584 goto out;
1585 disk_super = (struct btrfs_super_block *)bh->b_data;
1586 devid = btrfs_stack_device_id(&disk_super->dev_item);
1587 dev_uuid = disk_super->dev_item.uuid;
1588 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1589 disk_super->fsid);
1590 if (!device) {
1591 ret = -ENOENT;
1592 goto error_brelse;
1593 }
1594 }
1595
1596 if (device->is_tgtdev_for_dev_replace) {
1597 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1598 goto error_brelse;
1599 }
1600
1601 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1602 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1603 goto error_brelse;
1604 }
1605
1606 if (device->writeable) {
1607 lock_chunks(root);
1608 list_del_init(&device->dev_alloc_list);
1609 unlock_chunks(root);
1610 root->fs_info->fs_devices->rw_devices--;
1611 clear_super = true;
1612 }
1613
1614 mutex_unlock(&uuid_mutex);
1615 ret = btrfs_shrink_device(device, 0);
1616 mutex_lock(&uuid_mutex);
1617 if (ret)
1618 goto error_undo;
1619
1620 /*
1621 * TODO: the superblock still includes this device in its num_devices
1622 * counter although write_all_supers() is not locked out. This
1623 * could give a filesystem state which requires a degraded mount.
1624 */
1625 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1626 if (ret)
1627 goto error_undo;
1628
1629 spin_lock(&root->fs_info->free_chunk_lock);
1630 root->fs_info->free_chunk_space = device->total_bytes -
1631 device->bytes_used;
1632 spin_unlock(&root->fs_info->free_chunk_lock);
1633
1634 device->in_fs_metadata = 0;
1635 btrfs_scrub_cancel_dev(root->fs_info, device);
1636
1637 /*
1638 * the device list mutex makes sure that we don't change
1639 * the device list while someone else is writing out all
1640 * the device supers. Whoever is writing all supers, should
1641 * lock the device list mutex before getting the number of
1642 * devices in the super block (super_copy). Conversely,
1643 * whoever updates the number of devices in the super block
1644 * (super_copy) should hold the device list mutex.
1645 */
1646
1647 cur_devices = device->fs_devices;
1648 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1649 list_del_rcu(&device->dev_list);
1650
1651 device->fs_devices->num_devices--;
1652 device->fs_devices->total_devices--;
1653
1654 if (device->missing)
1655 root->fs_info->fs_devices->missing_devices--;
1656
1657 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1658 struct btrfs_device, dev_list);
1659 if (device->bdev == root->fs_info->sb->s_bdev)
1660 root->fs_info->sb->s_bdev = next_device->bdev;
1661 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1662 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1663
1664 if (device->bdev)
1665 device->fs_devices->open_devices--;
1666
1667 call_rcu(&device->rcu, free_device);
1668
1669 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1670 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1671 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1672
1673 if (cur_devices->open_devices == 0) {
1674 struct btrfs_fs_devices *fs_devices;
1675 fs_devices = root->fs_info->fs_devices;
1676 while (fs_devices) {
1677 if (fs_devices->seed == cur_devices)
1678 break;
1679 fs_devices = fs_devices->seed;
1680 }
1681 fs_devices->seed = cur_devices->seed;
1682 cur_devices->seed = NULL;
1683 lock_chunks(root);
1684 __btrfs_close_devices(cur_devices);
1685 unlock_chunks(root);
1686 free_fs_devices(cur_devices);
1687 }
1688
1689 root->fs_info->num_tolerated_disk_barrier_failures =
1690 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1691
1692 /*
1693 * at this point, the device is zero sized. We want to
1694 * remove it from the devices list and zero out the old super
1695 */
1696 if (clear_super && disk_super) {
1697 /* make sure this device isn't detected as part of
1698 * the FS anymore
1699 */
1700 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1701 set_buffer_dirty(bh);
1702 sync_dirty_buffer(bh);
1703 }
1704
1705 ret = 0;
1706
1707 /* Notify udev that device has changed */
1708 if (bdev)
1709 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1710
1711error_brelse:
1712 brelse(bh);
1713 if (bdev)
1714 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1715out:
1716 mutex_unlock(&uuid_mutex);
1717 return ret;
1718error_undo:
1719 if (device->writeable) {
1720 lock_chunks(root);
1721 list_add(&device->dev_alloc_list,
1722 &root->fs_info->fs_devices->alloc_list);
1723 unlock_chunks(root);
1724 root->fs_info->fs_devices->rw_devices++;
1725 }
1726 goto error_brelse;
1727}
1728
1729void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1730 struct btrfs_device *srcdev)
1731{
1732 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1733
1734 list_del_rcu(&srcdev->dev_list);
1735 list_del_rcu(&srcdev->dev_alloc_list);
1736 fs_info->fs_devices->num_devices--;
1737 if (srcdev->missing) {
1738 fs_info->fs_devices->missing_devices--;
1739 fs_info->fs_devices->rw_devices++;
1740 }
1741 if (srcdev->can_discard)
1742 fs_info->fs_devices->num_can_discard--;
1743 if (srcdev->bdev) {
1744 fs_info->fs_devices->open_devices--;
1745
1746 /* zero out the old super */
1747 btrfs_scratch_superblock(srcdev);
1748 }
1749
1750 call_rcu(&srcdev->rcu, free_device);
1751}
1752
1753void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1754 struct btrfs_device *tgtdev)
1755{
1756 struct btrfs_device *next_device;
1757
1758 WARN_ON(!tgtdev);
1759 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1760 if (tgtdev->bdev) {
1761 btrfs_scratch_superblock(tgtdev);
1762 fs_info->fs_devices->open_devices--;
1763 }
1764 fs_info->fs_devices->num_devices--;
1765 if (tgtdev->can_discard)
1766 fs_info->fs_devices->num_can_discard++;
1767
1768 next_device = list_entry(fs_info->fs_devices->devices.next,
1769 struct btrfs_device, dev_list);
1770 if (tgtdev->bdev == fs_info->sb->s_bdev)
1771 fs_info->sb->s_bdev = next_device->bdev;
1772 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1773 fs_info->fs_devices->latest_bdev = next_device->bdev;
1774 list_del_rcu(&tgtdev->dev_list);
1775
1776 call_rcu(&tgtdev->rcu, free_device);
1777
1778 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1779}
1780
1781static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1782 struct btrfs_device **device)
1783{
1784 int ret = 0;
1785 struct btrfs_super_block *disk_super;
1786 u64 devid;
1787 u8 *dev_uuid;
1788 struct block_device *bdev;
1789 struct buffer_head *bh;
1790
1791 *device = NULL;
1792 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1793 root->fs_info->bdev_holder, 0, &bdev, &bh);
1794 if (ret)
1795 return ret;
1796 disk_super = (struct btrfs_super_block *)bh->b_data;
1797 devid = btrfs_stack_device_id(&disk_super->dev_item);
1798 dev_uuid = disk_super->dev_item.uuid;
1799 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1800 disk_super->fsid);
1801 brelse(bh);
1802 if (!*device)
1803 ret = -ENOENT;
1804 blkdev_put(bdev, FMODE_READ);
1805 return ret;
1806}
1807
1808int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1809 char *device_path,
1810 struct btrfs_device **device)
1811{
1812 *device = NULL;
1813 if (strcmp(device_path, "missing") == 0) {
1814 struct list_head *devices;
1815 struct btrfs_device *tmp;
1816
1817 devices = &root->fs_info->fs_devices->devices;
1818 /*
1819 * It is safe to read the devices since the volume_mutex
1820 * is held by the caller.
1821 */
1822 list_for_each_entry(tmp, devices, dev_list) {
1823 if (tmp->in_fs_metadata && !tmp->bdev) {
1824 *device = tmp;
1825 break;
1826 }
1827 }
1828
1829 if (!*device) {
1830 btrfs_err(root->fs_info, "no missing device found");
1831 return -ENOENT;
1832 }
1833
1834 return 0;
1835 } else {
1836 return btrfs_find_device_by_path(root, device_path, device);
1837 }
1838}
1839
1840/*
1841 * does all the dirty work required for changing file system's UUID.
1842 */
1843static int btrfs_prepare_sprout(struct btrfs_root *root)
1844{
1845 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1846 struct btrfs_fs_devices *old_devices;
1847 struct btrfs_fs_devices *seed_devices;
1848 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1849 struct btrfs_device *device;
1850 u64 super_flags;
1851
1852 BUG_ON(!mutex_is_locked(&uuid_mutex));
1853 if (!fs_devices->seeding)
1854 return -EINVAL;
1855
1856 seed_devices = __alloc_fs_devices();
1857 if (IS_ERR(seed_devices))
1858 return PTR_ERR(seed_devices);
1859
1860 old_devices = clone_fs_devices(fs_devices);
1861 if (IS_ERR(old_devices)) {
1862 kfree(seed_devices);
1863 return PTR_ERR(old_devices);
1864 }
1865
1866 list_add(&old_devices->list, &fs_uuids);
1867
1868 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1869 seed_devices->opened = 1;
1870 INIT_LIST_HEAD(&seed_devices->devices);
1871 INIT_LIST_HEAD(&seed_devices->alloc_list);
1872 mutex_init(&seed_devices->device_list_mutex);
1873
1874 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1875 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1876 synchronize_rcu);
1877
1878 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1879 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1880 device->fs_devices = seed_devices;
1881 }
1882
1883 fs_devices->seeding = 0;
1884 fs_devices->num_devices = 0;
1885 fs_devices->open_devices = 0;
1886 fs_devices->total_devices = 0;
1887 fs_devices->seed = seed_devices;
1888
1889 generate_random_uuid(fs_devices->fsid);
1890 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1891 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1892 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1893
1894 super_flags = btrfs_super_flags(disk_super) &
1895 ~BTRFS_SUPER_FLAG_SEEDING;
1896 btrfs_set_super_flags(disk_super, super_flags);
1897
1898 return 0;
1899}
1900
1901/*
1902 * strore the expected generation for seed devices in device items.
1903 */
1904static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1905 struct btrfs_root *root)
1906{
1907 struct btrfs_path *path;
1908 struct extent_buffer *leaf;
1909 struct btrfs_dev_item *dev_item;
1910 struct btrfs_device *device;
1911 struct btrfs_key key;
1912 u8 fs_uuid[BTRFS_UUID_SIZE];
1913 u8 dev_uuid[BTRFS_UUID_SIZE];
1914 u64 devid;
1915 int ret;
1916
1917 path = btrfs_alloc_path();
1918 if (!path)
1919 return -ENOMEM;
1920
1921 root = root->fs_info->chunk_root;
1922 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1923 key.offset = 0;
1924 key.type = BTRFS_DEV_ITEM_KEY;
1925
1926 while (1) {
1927 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1928 if (ret < 0)
1929 goto error;
1930
1931 leaf = path->nodes[0];
1932next_slot:
1933 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1934 ret = btrfs_next_leaf(root, path);
1935 if (ret > 0)
1936 break;
1937 if (ret < 0)
1938 goto error;
1939 leaf = path->nodes[0];
1940 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1941 btrfs_release_path(path);
1942 continue;
1943 }
1944
1945 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1946 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1947 key.type != BTRFS_DEV_ITEM_KEY)
1948 break;
1949
1950 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1951 struct btrfs_dev_item);
1952 devid = btrfs_device_id(leaf, dev_item);
1953 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
1954 BTRFS_UUID_SIZE);
1955 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
1956 BTRFS_UUID_SIZE);
1957 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1958 fs_uuid);
1959 BUG_ON(!device); /* Logic error */
1960
1961 if (device->fs_devices->seeding) {
1962 btrfs_set_device_generation(leaf, dev_item,
1963 device->generation);
1964 btrfs_mark_buffer_dirty(leaf);
1965 }
1966
1967 path->slots[0]++;
1968 goto next_slot;
1969 }
1970 ret = 0;
1971error:
1972 btrfs_free_path(path);
1973 return ret;
1974}
1975
1976int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1977{
1978 struct request_queue *q;
1979 struct btrfs_trans_handle *trans;
1980 struct btrfs_device *device;
1981 struct block_device *bdev;
1982 struct list_head *devices;
1983 struct super_block *sb = root->fs_info->sb;
1984 struct rcu_string *name;
1985 u64 total_bytes;
1986 int seeding_dev = 0;
1987 int ret = 0;
1988
1989 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1990 return -EROFS;
1991
1992 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1993 root->fs_info->bdev_holder);
1994 if (IS_ERR(bdev))
1995 return PTR_ERR(bdev);
1996
1997 if (root->fs_info->fs_devices->seeding) {
1998 seeding_dev = 1;
1999 down_write(&sb->s_umount);
2000 mutex_lock(&uuid_mutex);
2001 }
2002
2003 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2004
2005 devices = &root->fs_info->fs_devices->devices;
2006
2007 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2008 list_for_each_entry(device, devices, dev_list) {
2009 if (device->bdev == bdev) {
2010 ret = -EEXIST;
2011 mutex_unlock(
2012 &root->fs_info->fs_devices->device_list_mutex);
2013 goto error;
2014 }
2015 }
2016 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2017
2018 device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2019 if (IS_ERR(device)) {
2020 /* we can safely leave the fs_devices entry around */
2021 ret = PTR_ERR(device);
2022 goto error;
2023 }
2024
2025 name = rcu_string_strdup(device_path, GFP_NOFS);
2026 if (!name) {
2027 kfree(device);
2028 ret = -ENOMEM;
2029 goto error;
2030 }
2031 rcu_assign_pointer(device->name, name);
2032
2033 trans = btrfs_start_transaction(root, 0);
2034 if (IS_ERR(trans)) {
2035 rcu_string_free(device->name);
2036 kfree(device);
2037 ret = PTR_ERR(trans);
2038 goto error;
2039 }
2040
2041 lock_chunks(root);
2042
2043 q = bdev_get_queue(bdev);
2044 if (blk_queue_discard(q))
2045 device->can_discard = 1;
2046 device->writeable = 1;
2047 device->generation = trans->transid;
2048 device->io_width = root->sectorsize;
2049 device->io_align = root->sectorsize;
2050 device->sector_size = root->sectorsize;
2051 device->total_bytes = i_size_read(bdev->bd_inode);
2052 device->disk_total_bytes = device->total_bytes;
2053 device->dev_root = root->fs_info->dev_root;
2054 device->bdev = bdev;
2055 device->in_fs_metadata = 1;
2056 device->is_tgtdev_for_dev_replace = 0;
2057 device->mode = FMODE_EXCL;
2058 device->dev_stats_valid = 1;
2059 set_blocksize(device->bdev, 4096);
2060
2061 if (seeding_dev) {
2062 sb->s_flags &= ~MS_RDONLY;
2063 ret = btrfs_prepare_sprout(root);
2064 BUG_ON(ret); /* -ENOMEM */
2065 }
2066
2067 device->fs_devices = root->fs_info->fs_devices;
2068
2069 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2070 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2071 list_add(&device->dev_alloc_list,
2072 &root->fs_info->fs_devices->alloc_list);
2073 root->fs_info->fs_devices->num_devices++;
2074 root->fs_info->fs_devices->open_devices++;
2075 root->fs_info->fs_devices->rw_devices++;
2076 root->fs_info->fs_devices->total_devices++;
2077 if (device->can_discard)
2078 root->fs_info->fs_devices->num_can_discard++;
2079 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2080
2081 spin_lock(&root->fs_info->free_chunk_lock);
2082 root->fs_info->free_chunk_space += device->total_bytes;
2083 spin_unlock(&root->fs_info->free_chunk_lock);
2084
2085 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2086 root->fs_info->fs_devices->rotating = 1;
2087
2088 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2089 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2090 total_bytes + device->total_bytes);
2091
2092 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2093 btrfs_set_super_num_devices(root->fs_info->super_copy,
2094 total_bytes + 1);
2095 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2096
2097 if (seeding_dev) {
2098 ret = init_first_rw_device(trans, root, device);
2099 if (ret) {
2100 btrfs_abort_transaction(trans, root, ret);
2101 goto error_trans;
2102 }
2103 ret = btrfs_finish_sprout(trans, root);
2104 if (ret) {
2105 btrfs_abort_transaction(trans, root, ret);
2106 goto error_trans;
2107 }
2108 } else {
2109 ret = btrfs_add_device(trans, root, device);
2110 if (ret) {
2111 btrfs_abort_transaction(trans, root, ret);
2112 goto error_trans;
2113 }
2114 }
2115
2116 /*
2117 * we've got more storage, clear any full flags on the space
2118 * infos
2119 */
2120 btrfs_clear_space_info_full(root->fs_info);
2121
2122 unlock_chunks(root);
2123 root->fs_info->num_tolerated_disk_barrier_failures =
2124 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2125 ret = btrfs_commit_transaction(trans, root);
2126
2127 if (seeding_dev) {
2128 mutex_unlock(&uuid_mutex);
2129 up_write(&sb->s_umount);
2130
2131 if (ret) /* transaction commit */
2132 return ret;
2133
2134 ret = btrfs_relocate_sys_chunks(root);
2135 if (ret < 0)
2136 btrfs_error(root->fs_info, ret,
2137 "Failed to relocate sys chunks after "
2138 "device initialization. This can be fixed "
2139 "using the \"btrfs balance\" command.");
2140 trans = btrfs_attach_transaction(root);
2141 if (IS_ERR(trans)) {
2142 if (PTR_ERR(trans) == -ENOENT)
2143 return 0;
2144 return PTR_ERR(trans);
2145 }
2146 ret = btrfs_commit_transaction(trans, root);
2147 }
2148
2149 return ret;
2150
2151error_trans:
2152 unlock_chunks(root);
2153 btrfs_end_transaction(trans, root);
2154 rcu_string_free(device->name);
2155 kfree(device);
2156error:
2157 blkdev_put(bdev, FMODE_EXCL);
2158 if (seeding_dev) {
2159 mutex_unlock(&uuid_mutex);
2160 up_write(&sb->s_umount);
2161 }
2162 return ret;
2163}
2164
2165int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2166 struct btrfs_device **device_out)
2167{
2168 struct request_queue *q;
2169 struct btrfs_device *device;
2170 struct block_device *bdev;
2171 struct btrfs_fs_info *fs_info = root->fs_info;
2172 struct list_head *devices;
2173 struct rcu_string *name;
2174 u64 devid = BTRFS_DEV_REPLACE_DEVID;
2175 int ret = 0;
2176
2177 *device_out = NULL;
2178 if (fs_info->fs_devices->seeding)
2179 return -EINVAL;
2180
2181 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2182 fs_info->bdev_holder);
2183 if (IS_ERR(bdev))
2184 return PTR_ERR(bdev);
2185
2186 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2187
2188 devices = &fs_info->fs_devices->devices;
2189 list_for_each_entry(device, devices, dev_list) {
2190 if (device->bdev == bdev) {
2191 ret = -EEXIST;
2192 goto error;
2193 }
2194 }
2195
2196 device = btrfs_alloc_device(NULL, &devid, NULL);
2197 if (IS_ERR(device)) {
2198 ret = PTR_ERR(device);
2199 goto error;
2200 }
2201
2202 name = rcu_string_strdup(device_path, GFP_NOFS);
2203 if (!name) {
2204 kfree(device);
2205 ret = -ENOMEM;
2206 goto error;
2207 }
2208 rcu_assign_pointer(device->name, name);
2209
2210 q = bdev_get_queue(bdev);
2211 if (blk_queue_discard(q))
2212 device->can_discard = 1;
2213 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2214 device->writeable = 1;
2215 device->generation = 0;
2216 device->io_width = root->sectorsize;
2217 device->io_align = root->sectorsize;
2218 device->sector_size = root->sectorsize;
2219 device->total_bytes = i_size_read(bdev->bd_inode);
2220 device->disk_total_bytes = device->total_bytes;
2221 device->dev_root = fs_info->dev_root;
2222 device->bdev = bdev;
2223 device->in_fs_metadata = 1;
2224 device->is_tgtdev_for_dev_replace = 1;
2225 device->mode = FMODE_EXCL;
2226 device->dev_stats_valid = 1;
2227 set_blocksize(device->bdev, 4096);
2228 device->fs_devices = fs_info->fs_devices;
2229 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2230 fs_info->fs_devices->num_devices++;
2231 fs_info->fs_devices->open_devices++;
2232 if (device->can_discard)
2233 fs_info->fs_devices->num_can_discard++;
2234 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2235
2236 *device_out = device;
2237 return ret;
2238
2239error:
2240 blkdev_put(bdev, FMODE_EXCL);
2241 return ret;
2242}
2243
2244void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2245 struct btrfs_device *tgtdev)
2246{
2247 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2248 tgtdev->io_width = fs_info->dev_root->sectorsize;
2249 tgtdev->io_align = fs_info->dev_root->sectorsize;
2250 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2251 tgtdev->dev_root = fs_info->dev_root;
2252 tgtdev->in_fs_metadata = 1;
2253}
2254
2255static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2256 struct btrfs_device *device)
2257{
2258 int ret;
2259 struct btrfs_path *path;
2260 struct btrfs_root *root;
2261 struct btrfs_dev_item *dev_item;
2262 struct extent_buffer *leaf;
2263 struct btrfs_key key;
2264
2265 root = device->dev_root->fs_info->chunk_root;
2266
2267 path = btrfs_alloc_path();
2268 if (!path)
2269 return -ENOMEM;
2270
2271 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2272 key.type = BTRFS_DEV_ITEM_KEY;
2273 key.offset = device->devid;
2274
2275 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2276 if (ret < 0)
2277 goto out;
2278
2279 if (ret > 0) {
2280 ret = -ENOENT;
2281 goto out;
2282 }
2283
2284 leaf = path->nodes[0];
2285 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2286
2287 btrfs_set_device_id(leaf, dev_item, device->devid);
2288 btrfs_set_device_type(leaf, dev_item, device->type);
2289 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2290 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2291 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2292 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2293 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2294 btrfs_mark_buffer_dirty(leaf);
2295
2296out:
2297 btrfs_free_path(path);
2298 return ret;
2299}
2300
2301static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2302 struct btrfs_device *device, u64 new_size)
2303{
2304 struct btrfs_super_block *super_copy =
2305 device->dev_root->fs_info->super_copy;
2306 u64 old_total = btrfs_super_total_bytes(super_copy);
2307 u64 diff = new_size - device->total_bytes;
2308
2309 if (!device->writeable)
2310 return -EACCES;
2311 if (new_size <= device->total_bytes ||
2312 device->is_tgtdev_for_dev_replace)
2313 return -EINVAL;
2314
2315 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2316 device->fs_devices->total_rw_bytes += diff;
2317
2318 device->total_bytes = new_size;
2319 device->disk_total_bytes = new_size;
2320 btrfs_clear_space_info_full(device->dev_root->fs_info);
2321
2322 return btrfs_update_device(trans, device);
2323}
2324
2325int btrfs_grow_device(struct btrfs_trans_handle *trans,
2326 struct btrfs_device *device, u64 new_size)
2327{
2328 int ret;
2329 lock_chunks(device->dev_root);
2330 ret = __btrfs_grow_device(trans, device, new_size);
2331 unlock_chunks(device->dev_root);
2332 return ret;
2333}
2334
2335static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2336 struct btrfs_root *root,
2337 u64 chunk_tree, u64 chunk_objectid,
2338 u64 chunk_offset)
2339{
2340 int ret;
2341 struct btrfs_path *path;
2342 struct btrfs_key key;
2343
2344 root = root->fs_info->chunk_root;
2345 path = btrfs_alloc_path();
2346 if (!path)
2347 return -ENOMEM;
2348
2349 key.objectid = chunk_objectid;
2350 key.offset = chunk_offset;
2351 key.type = BTRFS_CHUNK_ITEM_KEY;
2352
2353 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2354 if (ret < 0)
2355 goto out;
2356 else if (ret > 0) { /* Logic error or corruption */
2357 btrfs_error(root->fs_info, -ENOENT,
2358 "Failed lookup while freeing chunk.");
2359 ret = -ENOENT;
2360 goto out;
2361 }
2362
2363 ret = btrfs_del_item(trans, root, path);
2364 if (ret < 0)
2365 btrfs_error(root->fs_info, ret,
2366 "Failed to delete chunk item.");
2367out:
2368 btrfs_free_path(path);
2369 return ret;
2370}
2371
2372static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2373 chunk_offset)
2374{
2375 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2376 struct btrfs_disk_key *disk_key;
2377 struct btrfs_chunk *chunk;
2378 u8 *ptr;
2379 int ret = 0;
2380 u32 num_stripes;
2381 u32 array_size;
2382 u32 len = 0;
2383 u32 cur;
2384 struct btrfs_key key;
2385
2386 array_size = btrfs_super_sys_array_size(super_copy);
2387
2388 ptr = super_copy->sys_chunk_array;
2389 cur = 0;
2390
2391 while (cur < array_size) {
2392 disk_key = (struct btrfs_disk_key *)ptr;
2393 btrfs_disk_key_to_cpu(&key, disk_key);
2394
2395 len = sizeof(*disk_key);
2396
2397 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2398 chunk = (struct btrfs_chunk *)(ptr + len);
2399 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2400 len += btrfs_chunk_item_size(num_stripes);
2401 } else {
2402 ret = -EIO;
2403 break;
2404 }
2405 if (key.objectid == chunk_objectid &&
2406 key.offset == chunk_offset) {
2407 memmove(ptr, ptr + len, array_size - (cur + len));
2408 array_size -= len;
2409 btrfs_set_super_sys_array_size(super_copy, array_size);
2410 } else {
2411 ptr += len;
2412 cur += len;
2413 }
2414 }
2415 return ret;
2416}
2417
2418static int btrfs_relocate_chunk(struct btrfs_root *root,
2419 u64 chunk_tree, u64 chunk_objectid,
2420 u64 chunk_offset)
2421{
2422 struct extent_map_tree *em_tree;
2423 struct btrfs_root *extent_root;
2424 struct btrfs_trans_handle *trans;
2425 struct extent_map *em;
2426 struct map_lookup *map;
2427 int ret;
2428 int i;
2429
2430 root = root->fs_info->chunk_root;
2431 extent_root = root->fs_info->extent_root;
2432 em_tree = &root->fs_info->mapping_tree.map_tree;
2433
2434 ret = btrfs_can_relocate(extent_root, chunk_offset);
2435 if (ret)
2436 return -ENOSPC;
2437
2438 /* step one, relocate all the extents inside this chunk */
2439 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2440 if (ret)
2441 return ret;
2442
2443 trans = btrfs_start_transaction(root, 0);
2444 if (IS_ERR(trans)) {
2445 ret = PTR_ERR(trans);
2446 btrfs_std_error(root->fs_info, ret);
2447 return ret;
2448 }
2449
2450 lock_chunks(root);
2451
2452 /*
2453 * step two, delete the device extents and the
2454 * chunk tree entries
2455 */
2456 read_lock(&em_tree->lock);
2457 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2458 read_unlock(&em_tree->lock);
2459
2460 BUG_ON(!em || em->start > chunk_offset ||
2461 em->start + em->len < chunk_offset);
2462 map = (struct map_lookup *)em->bdev;
2463
2464 for (i = 0; i < map->num_stripes; i++) {
2465 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2466 map->stripes[i].physical);
2467 BUG_ON(ret);
2468
2469 if (map->stripes[i].dev) {
2470 ret = btrfs_update_device(trans, map->stripes[i].dev);
2471 BUG_ON(ret);
2472 }
2473 }
2474 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2475 chunk_offset);
2476
2477 BUG_ON(ret);
2478
2479 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2480
2481 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2482 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2483 BUG_ON(ret);
2484 }
2485
2486 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2487 BUG_ON(ret);
2488
2489 write_lock(&em_tree->lock);
2490 remove_extent_mapping(em_tree, em);
2491 write_unlock(&em_tree->lock);
2492
2493 kfree(map);
2494 em->bdev = NULL;
2495
2496 /* once for the tree */
2497 free_extent_map(em);
2498 /* once for us */
2499 free_extent_map(em);
2500
2501 unlock_chunks(root);
2502 btrfs_end_transaction(trans, root);
2503 return 0;
2504}
2505
2506static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2507{
2508 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2509 struct btrfs_path *path;
2510 struct extent_buffer *leaf;
2511 struct btrfs_chunk *chunk;
2512 struct btrfs_key key;
2513 struct btrfs_key found_key;
2514 u64 chunk_tree = chunk_root->root_key.objectid;
2515 u64 chunk_type;
2516 bool retried = false;
2517 int failed = 0;
2518 int ret;
2519
2520 path = btrfs_alloc_path();
2521 if (!path)
2522 return -ENOMEM;
2523
2524again:
2525 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2526 key.offset = (u64)-1;
2527 key.type = BTRFS_CHUNK_ITEM_KEY;
2528
2529 while (1) {
2530 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2531 if (ret < 0)
2532 goto error;
2533 BUG_ON(ret == 0); /* Corruption */
2534
2535 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2536 key.type);
2537 if (ret < 0)
2538 goto error;
2539 if (ret > 0)
2540 break;
2541
2542 leaf = path->nodes[0];
2543 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2544
2545 chunk = btrfs_item_ptr(leaf, path->slots[0],
2546 struct btrfs_chunk);
2547 chunk_type = btrfs_chunk_type(leaf, chunk);
2548 btrfs_release_path(path);
2549
2550 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2551 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2552 found_key.objectid,
2553 found_key.offset);
2554 if (ret == -ENOSPC)
2555 failed++;
2556 else if (ret)
2557 BUG();
2558 }
2559
2560 if (found_key.offset == 0)
2561 break;
2562 key.offset = found_key.offset - 1;
2563 }
2564 ret = 0;
2565 if (failed && !retried) {
2566 failed = 0;
2567 retried = true;
2568 goto again;
2569 } else if (WARN_ON(failed && retried)) {
2570 ret = -ENOSPC;
2571 }
2572error:
2573 btrfs_free_path(path);
2574 return ret;
2575}
2576
2577static int insert_balance_item(struct btrfs_root *root,
2578 struct btrfs_balance_control *bctl)
2579{
2580 struct btrfs_trans_handle *trans;
2581 struct btrfs_balance_item *item;
2582 struct btrfs_disk_balance_args disk_bargs;
2583 struct btrfs_path *path;
2584 struct extent_buffer *leaf;
2585 struct btrfs_key key;
2586 int ret, err;
2587
2588 path = btrfs_alloc_path();
2589 if (!path)
2590 return -ENOMEM;
2591
2592 trans = btrfs_start_transaction(root, 0);
2593 if (IS_ERR(trans)) {
2594 btrfs_free_path(path);
2595 return PTR_ERR(trans);
2596 }
2597
2598 key.objectid = BTRFS_BALANCE_OBJECTID;
2599 key.type = BTRFS_BALANCE_ITEM_KEY;
2600 key.offset = 0;
2601
2602 ret = btrfs_insert_empty_item(trans, root, path, &key,
2603 sizeof(*item));
2604 if (ret)
2605 goto out;
2606
2607 leaf = path->nodes[0];
2608 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2609
2610 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2611
2612 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2613 btrfs_set_balance_data(leaf, item, &disk_bargs);
2614 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2615 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2616 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2617 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2618
2619 btrfs_set_balance_flags(leaf, item, bctl->flags);
2620
2621 btrfs_mark_buffer_dirty(leaf);
2622out:
2623 btrfs_free_path(path);
2624 err = btrfs_commit_transaction(trans, root);
2625 if (err && !ret)
2626 ret = err;
2627 return ret;
2628}
2629
2630static int del_balance_item(struct btrfs_root *root)
2631{
2632 struct btrfs_trans_handle *trans;
2633 struct btrfs_path *path;
2634 struct btrfs_key key;
2635 int ret, err;
2636
2637 path = btrfs_alloc_path();
2638 if (!path)
2639 return -ENOMEM;
2640
2641 trans = btrfs_start_transaction(root, 0);
2642 if (IS_ERR(trans)) {
2643 btrfs_free_path(path);
2644 return PTR_ERR(trans);
2645 }
2646
2647 key.objectid = BTRFS_BALANCE_OBJECTID;
2648 key.type = BTRFS_BALANCE_ITEM_KEY;
2649 key.offset = 0;
2650
2651 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2652 if (ret < 0)
2653 goto out;
2654 if (ret > 0) {
2655 ret = -ENOENT;
2656 goto out;
2657 }
2658
2659 ret = btrfs_del_item(trans, root, path);
2660out:
2661 btrfs_free_path(path);
2662 err = btrfs_commit_transaction(trans, root);
2663 if (err && !ret)
2664 ret = err;
2665 return ret;
2666}
2667
2668/*
2669 * This is a heuristic used to reduce the number of chunks balanced on
2670 * resume after balance was interrupted.
2671 */
2672static void update_balance_args(struct btrfs_balance_control *bctl)
2673{
2674 /*
2675 * Turn on soft mode for chunk types that were being converted.
2676 */
2677 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2678 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2679 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2680 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2681 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2682 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2683
2684 /*
2685 * Turn on usage filter if is not already used. The idea is
2686 * that chunks that we have already balanced should be
2687 * reasonably full. Don't do it for chunks that are being
2688 * converted - that will keep us from relocating unconverted
2689 * (albeit full) chunks.
2690 */
2691 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2692 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2693 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2694 bctl->data.usage = 90;
2695 }
2696 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2697 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2698 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2699 bctl->sys.usage = 90;
2700 }
2701 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2702 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2703 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2704 bctl->meta.usage = 90;
2705 }
2706}
2707
2708/*
2709 * Should be called with both balance and volume mutexes held to
2710 * serialize other volume operations (add_dev/rm_dev/resize) with
2711 * restriper. Same goes for unset_balance_control.
2712 */
2713static void set_balance_control(struct btrfs_balance_control *bctl)
2714{
2715 struct btrfs_fs_info *fs_info = bctl->fs_info;
2716
2717 BUG_ON(fs_info->balance_ctl);
2718
2719 spin_lock(&fs_info->balance_lock);
2720 fs_info->balance_ctl = bctl;
2721 spin_unlock(&fs_info->balance_lock);
2722}
2723
2724static void unset_balance_control(struct btrfs_fs_info *fs_info)
2725{
2726 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2727
2728 BUG_ON(!fs_info->balance_ctl);
2729
2730 spin_lock(&fs_info->balance_lock);
2731 fs_info->balance_ctl = NULL;
2732 spin_unlock(&fs_info->balance_lock);
2733
2734 kfree(bctl);
2735}
2736
2737/*
2738 * Balance filters. Return 1 if chunk should be filtered out
2739 * (should not be balanced).
2740 */
2741static int chunk_profiles_filter(u64 chunk_type,
2742 struct btrfs_balance_args *bargs)
2743{
2744 chunk_type = chunk_to_extended(chunk_type) &
2745 BTRFS_EXTENDED_PROFILE_MASK;
2746
2747 if (bargs->profiles & chunk_type)
2748 return 0;
2749
2750 return 1;
2751}
2752
2753static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2754 struct btrfs_balance_args *bargs)
2755{
2756 struct btrfs_block_group_cache *cache;
2757 u64 chunk_used, user_thresh;
2758 int ret = 1;
2759
2760 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2761 chunk_used = btrfs_block_group_used(&cache->item);
2762
2763 if (bargs->usage == 0)
2764 user_thresh = 1;
2765 else if (bargs->usage > 100)
2766 user_thresh = cache->key.offset;
2767 else
2768 user_thresh = div_factor_fine(cache->key.offset,
2769 bargs->usage);
2770
2771 if (chunk_used < user_thresh)
2772 ret = 0;
2773
2774 btrfs_put_block_group(cache);
2775 return ret;
2776}
2777
2778static int chunk_devid_filter(struct extent_buffer *leaf,
2779 struct btrfs_chunk *chunk,
2780 struct btrfs_balance_args *bargs)
2781{
2782 struct btrfs_stripe *stripe;
2783 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2784 int i;
2785
2786 for (i = 0; i < num_stripes; i++) {
2787 stripe = btrfs_stripe_nr(chunk, i);
2788 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2789 return 0;
2790 }
2791
2792 return 1;
2793}
2794
2795/* [pstart, pend) */
2796static int chunk_drange_filter(struct extent_buffer *leaf,
2797 struct btrfs_chunk *chunk,
2798 u64 chunk_offset,
2799 struct btrfs_balance_args *bargs)
2800{
2801 struct btrfs_stripe *stripe;
2802 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2803 u64 stripe_offset;
2804 u64 stripe_length;
2805 int factor;
2806 int i;
2807
2808 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2809 return 0;
2810
2811 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2812 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2813 factor = num_stripes / 2;
2814 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2815 factor = num_stripes - 1;
2816 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2817 factor = num_stripes - 2;
2818 } else {
2819 factor = num_stripes;
2820 }
2821
2822 for (i = 0; i < num_stripes; i++) {
2823 stripe = btrfs_stripe_nr(chunk, i);
2824 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2825 continue;
2826
2827 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2828 stripe_length = btrfs_chunk_length(leaf, chunk);
2829 do_div(stripe_length, factor);
2830
2831 if (stripe_offset < bargs->pend &&
2832 stripe_offset + stripe_length > bargs->pstart)
2833 return 0;
2834 }
2835
2836 return 1;
2837}
2838
2839/* [vstart, vend) */
2840static int chunk_vrange_filter(struct extent_buffer *leaf,
2841 struct btrfs_chunk *chunk,
2842 u64 chunk_offset,
2843 struct btrfs_balance_args *bargs)
2844{
2845 if (chunk_offset < bargs->vend &&
2846 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2847 /* at least part of the chunk is inside this vrange */
2848 return 0;
2849
2850 return 1;
2851}
2852
2853static int chunk_soft_convert_filter(u64 chunk_type,
2854 struct btrfs_balance_args *bargs)
2855{
2856 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2857 return 0;
2858
2859 chunk_type = chunk_to_extended(chunk_type) &
2860 BTRFS_EXTENDED_PROFILE_MASK;
2861
2862 if (bargs->target == chunk_type)
2863 return 1;
2864
2865 return 0;
2866}
2867
2868static int should_balance_chunk(struct btrfs_root *root,
2869 struct extent_buffer *leaf,
2870 struct btrfs_chunk *chunk, u64 chunk_offset)
2871{
2872 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2873 struct btrfs_balance_args *bargs = NULL;
2874 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2875
2876 /* type filter */
2877 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2878 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2879 return 0;
2880 }
2881
2882 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2883 bargs = &bctl->data;
2884 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2885 bargs = &bctl->sys;
2886 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2887 bargs = &bctl->meta;
2888
2889 /* profiles filter */
2890 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2891 chunk_profiles_filter(chunk_type, bargs)) {
2892 return 0;
2893 }
2894
2895 /* usage filter */
2896 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2897 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2898 return 0;
2899 }
2900
2901 /* devid filter */
2902 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2903 chunk_devid_filter(leaf, chunk, bargs)) {
2904 return 0;
2905 }
2906
2907 /* drange filter, makes sense only with devid filter */
2908 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2909 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2910 return 0;
2911 }
2912
2913 /* vrange filter */
2914 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2915 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2916 return 0;
2917 }
2918
2919 /* soft profile changing mode */
2920 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2921 chunk_soft_convert_filter(chunk_type, bargs)) {
2922 return 0;
2923 }
2924
2925 return 1;
2926}
2927
2928static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2929{
2930 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2931 struct btrfs_root *chunk_root = fs_info->chunk_root;
2932 struct btrfs_root *dev_root = fs_info->dev_root;
2933 struct list_head *devices;
2934 struct btrfs_device *device;
2935 u64 old_size;
2936 u64 size_to_free;
2937 struct btrfs_chunk *chunk;
2938 struct btrfs_path *path;
2939 struct btrfs_key key;
2940 struct btrfs_key found_key;
2941 struct btrfs_trans_handle *trans;
2942 struct extent_buffer *leaf;
2943 int slot;
2944 int ret;
2945 int enospc_errors = 0;
2946 bool counting = true;
2947
2948 /* step one make some room on all the devices */
2949 devices = &fs_info->fs_devices->devices;
2950 list_for_each_entry(device, devices, dev_list) {
2951 old_size = device->total_bytes;
2952 size_to_free = div_factor(old_size, 1);
2953 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2954 if (!device->writeable ||
2955 device->total_bytes - device->bytes_used > size_to_free ||
2956 device->is_tgtdev_for_dev_replace)
2957 continue;
2958
2959 ret = btrfs_shrink_device(device, old_size - size_to_free);
2960 if (ret == -ENOSPC)
2961 break;
2962 BUG_ON(ret);
2963
2964 trans = btrfs_start_transaction(dev_root, 0);
2965 BUG_ON(IS_ERR(trans));
2966
2967 ret = btrfs_grow_device(trans, device, old_size);
2968 BUG_ON(ret);
2969
2970 btrfs_end_transaction(trans, dev_root);
2971 }
2972
2973 /* step two, relocate all the chunks */
2974 path = btrfs_alloc_path();
2975 if (!path) {
2976 ret = -ENOMEM;
2977 goto error;
2978 }
2979
2980 /* zero out stat counters */
2981 spin_lock(&fs_info->balance_lock);
2982 memset(&bctl->stat, 0, sizeof(bctl->stat));
2983 spin_unlock(&fs_info->balance_lock);
2984again:
2985 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2986 key.offset = (u64)-1;
2987 key.type = BTRFS_CHUNK_ITEM_KEY;
2988
2989 while (1) {
2990 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2991 atomic_read(&fs_info->balance_cancel_req)) {
2992 ret = -ECANCELED;
2993 goto error;
2994 }
2995
2996 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2997 if (ret < 0)
2998 goto error;
2999
3000 /*
3001 * this shouldn't happen, it means the last relocate
3002 * failed
3003 */
3004 if (ret == 0)
3005 BUG(); /* FIXME break ? */
3006
3007 ret = btrfs_previous_item(chunk_root, path, 0,
3008 BTRFS_CHUNK_ITEM_KEY);
3009 if (ret) {
3010 ret = 0;
3011 break;
3012 }
3013
3014 leaf = path->nodes[0];
3015 slot = path->slots[0];
3016 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3017
3018 if (found_key.objectid != key.objectid)
3019 break;
3020
3021 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3022
3023 if (!counting) {
3024 spin_lock(&fs_info->balance_lock);
3025 bctl->stat.considered++;
3026 spin_unlock(&fs_info->balance_lock);
3027 }
3028
3029 ret = should_balance_chunk(chunk_root, leaf, chunk,
3030 found_key.offset);
3031 btrfs_release_path(path);
3032 if (!ret)
3033 goto loop;
3034
3035 if (counting) {
3036 spin_lock(&fs_info->balance_lock);
3037 bctl->stat.expected++;
3038 spin_unlock(&fs_info->balance_lock);
3039 goto loop;
3040 }
3041
3042 ret = btrfs_relocate_chunk(chunk_root,
3043 chunk_root->root_key.objectid,
3044 found_key.objectid,
3045 found_key.offset);
3046 if (ret && ret != -ENOSPC)
3047 goto error;
3048 if (ret == -ENOSPC) {
3049 enospc_errors++;
3050 } else {
3051 spin_lock(&fs_info->balance_lock);
3052 bctl->stat.completed++;
3053 spin_unlock(&fs_info->balance_lock);
3054 }
3055loop:
3056 if (found_key.offset == 0)
3057 break;
3058 key.offset = found_key.offset - 1;
3059 }
3060
3061 if (counting) {
3062 btrfs_release_path(path);
3063 counting = false;
3064 goto again;
3065 }
3066error:
3067 btrfs_free_path(path);
3068 if (enospc_errors) {
3069 btrfs_info(fs_info, "%d enospc errors during balance",
3070 enospc_errors);
3071 if (!ret)
3072 ret = -ENOSPC;
3073 }
3074
3075 return ret;
3076}
3077
3078/**
3079 * alloc_profile_is_valid - see if a given profile is valid and reduced
3080 * @flags: profile to validate
3081 * @extended: if true @flags is treated as an extended profile
3082 */
3083static int alloc_profile_is_valid(u64 flags, int extended)
3084{
3085 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3086 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3087
3088 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3089
3090 /* 1) check that all other bits are zeroed */
3091 if (flags & ~mask)
3092 return 0;
3093
3094 /* 2) see if profile is reduced */
3095 if (flags == 0)
3096 return !extended; /* "0" is valid for usual profiles */
3097
3098 /* true if exactly one bit set */
3099 return (flags & (flags - 1)) == 0;
3100}
3101
3102static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3103{
3104 /* cancel requested || normal exit path */
3105 return atomic_read(&fs_info->balance_cancel_req) ||
3106 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3107 atomic_read(&fs_info->balance_cancel_req) == 0);
3108}
3109
3110static void __cancel_balance(struct btrfs_fs_info *fs_info)
3111{
3112 int ret;
3113
3114 unset_balance_control(fs_info);
3115 ret = del_balance_item(fs_info->tree_root);
3116 if (ret)
3117 btrfs_std_error(fs_info, ret);
3118
3119 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3120}
3121
3122/*
3123 * Should be called with both balance and volume mutexes held
3124 */
3125int btrfs_balance(struct btrfs_balance_control *bctl,
3126 struct btrfs_ioctl_balance_args *bargs)
3127{
3128 struct btrfs_fs_info *fs_info = bctl->fs_info;
3129 u64 allowed;
3130 int mixed = 0;
3131 int ret;
3132 u64 num_devices;
3133 unsigned seq;
3134
3135 if (btrfs_fs_closing(fs_info) ||
3136 atomic_read(&fs_info->balance_pause_req) ||
3137 atomic_read(&fs_info->balance_cancel_req)) {
3138 ret = -EINVAL;
3139 goto out;
3140 }
3141
3142 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3143 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3144 mixed = 1;
3145
3146 /*
3147 * In case of mixed groups both data and meta should be picked,
3148 * and identical options should be given for both of them.
3149 */
3150 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3151 if (mixed && (bctl->flags & allowed)) {
3152 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3153 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3154 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3155 btrfs_err(fs_info, "with mixed groups data and "
3156 "metadata balance options must be the same");
3157 ret = -EINVAL;
3158 goto out;
3159 }
3160 }
3161
3162 num_devices = fs_info->fs_devices->num_devices;
3163 btrfs_dev_replace_lock(&fs_info->dev_replace);
3164 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3165 BUG_ON(num_devices < 1);
3166 num_devices--;
3167 }
3168 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3169 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3170 if (num_devices == 1)
3171 allowed |= BTRFS_BLOCK_GROUP_DUP;
3172 else if (num_devices > 1)
3173 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3174 if (num_devices > 2)
3175 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3176 if (num_devices > 3)
3177 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3178 BTRFS_BLOCK_GROUP_RAID6);
3179 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3180 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3181 (bctl->data.target & ~allowed))) {
3182 btrfs_err(fs_info, "unable to start balance with target "
3183 "data profile %llu",
3184 bctl->data.target);
3185 ret = -EINVAL;
3186 goto out;
3187 }
3188 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3189 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3190 (bctl->meta.target & ~allowed))) {
3191 btrfs_err(fs_info,
3192 "unable to start balance with target metadata profile %llu",
3193 bctl->meta.target);
3194 ret = -EINVAL;
3195 goto out;
3196 }
3197 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3198 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3199 (bctl->sys.target & ~allowed))) {
3200 btrfs_err(fs_info,
3201 "unable to start balance with target system profile %llu",
3202 bctl->sys.target);
3203 ret = -EINVAL;
3204 goto out;
3205 }
3206
3207 /* allow dup'ed data chunks only in mixed mode */
3208 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3209 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3210 btrfs_err(fs_info, "dup for data is not allowed");
3211 ret = -EINVAL;
3212 goto out;
3213 }
3214
3215 /* allow to reduce meta or sys integrity only if force set */
3216 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3217 BTRFS_BLOCK_GROUP_RAID10 |
3218 BTRFS_BLOCK_GROUP_RAID5 |
3219 BTRFS_BLOCK_GROUP_RAID6;
3220 do {
3221 seq = read_seqbegin(&fs_info->profiles_lock);
3222
3223 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3224 (fs_info->avail_system_alloc_bits & allowed) &&
3225 !(bctl->sys.target & allowed)) ||
3226 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3227 (fs_info->avail_metadata_alloc_bits & allowed) &&
3228 !(bctl->meta.target & allowed))) {
3229 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3230 btrfs_info(fs_info, "force reducing metadata integrity");
3231 } else {
3232 btrfs_err(fs_info, "balance will reduce metadata "
3233 "integrity, use force if you want this");
3234 ret = -EINVAL;
3235 goto out;
3236 }
3237 }
3238 } while (read_seqretry(&fs_info->profiles_lock, seq));
3239
3240 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3241 int num_tolerated_disk_barrier_failures;
3242 u64 target = bctl->sys.target;
3243
3244 num_tolerated_disk_barrier_failures =
3245 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3246 if (num_tolerated_disk_barrier_failures > 0 &&
3247 (target &
3248 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3249 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3250 num_tolerated_disk_barrier_failures = 0;
3251 else if (num_tolerated_disk_barrier_failures > 1 &&
3252 (target &
3253 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3254 num_tolerated_disk_barrier_failures = 1;
3255
3256 fs_info->num_tolerated_disk_barrier_failures =
3257 num_tolerated_disk_barrier_failures;
3258 }
3259
3260 ret = insert_balance_item(fs_info->tree_root, bctl);
3261 if (ret && ret != -EEXIST)
3262 goto out;
3263
3264 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3265 BUG_ON(ret == -EEXIST);
3266 set_balance_control(bctl);
3267 } else {
3268 BUG_ON(ret != -EEXIST);
3269 spin_lock(&fs_info->balance_lock);
3270 update_balance_args(bctl);
3271 spin_unlock(&fs_info->balance_lock);
3272 }
3273
3274 atomic_inc(&fs_info->balance_running);
3275 mutex_unlock(&fs_info->balance_mutex);
3276
3277 ret = __btrfs_balance(fs_info);
3278
3279 mutex_lock(&fs_info->balance_mutex);
3280 atomic_dec(&fs_info->balance_running);
3281
3282 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3283 fs_info->num_tolerated_disk_barrier_failures =
3284 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3285 }
3286
3287 if (bargs) {
3288 memset(bargs, 0, sizeof(*bargs));
3289 update_ioctl_balance_args(fs_info, 0, bargs);
3290 }
3291
3292 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3293 balance_need_close(fs_info)) {
3294 __cancel_balance(fs_info);
3295 }
3296
3297 wake_up(&fs_info->balance_wait_q);
3298
3299 return ret;
3300out:
3301 if (bctl->flags & BTRFS_BALANCE_RESUME)
3302 __cancel_balance(fs_info);
3303 else {
3304 kfree(bctl);
3305 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3306 }
3307 return ret;
3308}
3309
3310static int balance_kthread(void *data)
3311{
3312 struct btrfs_fs_info *fs_info = data;
3313 int ret = 0;
3314
3315 mutex_lock(&fs_info->volume_mutex);
3316 mutex_lock(&fs_info->balance_mutex);
3317
3318 if (fs_info->balance_ctl) {
3319 btrfs_info(fs_info, "continuing balance");
3320 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3321 }
3322
3323 mutex_unlock(&fs_info->balance_mutex);
3324 mutex_unlock(&fs_info->volume_mutex);
3325
3326 return ret;
3327}
3328
3329int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3330{
3331 struct task_struct *tsk;
3332
3333 spin_lock(&fs_info->balance_lock);
3334 if (!fs_info->balance_ctl) {
3335 spin_unlock(&fs_info->balance_lock);
3336 return 0;
3337 }
3338 spin_unlock(&fs_info->balance_lock);
3339
3340 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3341 btrfs_info(fs_info, "force skipping balance");
3342 return 0;
3343 }
3344
3345 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3346 return PTR_ERR_OR_ZERO(tsk);
3347}
3348
3349int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3350{
3351 struct btrfs_balance_control *bctl;
3352 struct btrfs_balance_item *item;
3353 struct btrfs_disk_balance_args disk_bargs;
3354 struct btrfs_path *path;
3355 struct extent_buffer *leaf;
3356 struct btrfs_key key;
3357 int ret;
3358
3359 path = btrfs_alloc_path();
3360 if (!path)
3361 return -ENOMEM;
3362
3363 key.objectid = BTRFS_BALANCE_OBJECTID;
3364 key.type = BTRFS_BALANCE_ITEM_KEY;
3365 key.offset = 0;
3366
3367 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3368 if (ret < 0)
3369 goto out;
3370 if (ret > 0) { /* ret = -ENOENT; */
3371 ret = 0;
3372 goto out;
3373 }
3374
3375 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3376 if (!bctl) {
3377 ret = -ENOMEM;
3378 goto out;
3379 }
3380
3381 leaf = path->nodes[0];
3382 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3383
3384 bctl->fs_info = fs_info;
3385 bctl->flags = btrfs_balance_flags(leaf, item);
3386 bctl->flags |= BTRFS_BALANCE_RESUME;
3387
3388 btrfs_balance_data(leaf, item, &disk_bargs);
3389 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3390 btrfs_balance_meta(leaf, item, &disk_bargs);
3391 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3392 btrfs_balance_sys(leaf, item, &disk_bargs);
3393 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3394
3395 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3396
3397 mutex_lock(&fs_info->volume_mutex);
3398 mutex_lock(&fs_info->balance_mutex);
3399
3400 set_balance_control(bctl);
3401
3402 mutex_unlock(&fs_info->balance_mutex);
3403 mutex_unlock(&fs_info->volume_mutex);
3404out:
3405 btrfs_free_path(path);
3406 return ret;
3407}
3408
3409int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3410{
3411 int ret = 0;
3412
3413 mutex_lock(&fs_info->balance_mutex);
3414 if (!fs_info->balance_ctl) {
3415 mutex_unlock(&fs_info->balance_mutex);
3416 return -ENOTCONN;
3417 }
3418
3419 if (atomic_read(&fs_info->balance_running)) {
3420 atomic_inc(&fs_info->balance_pause_req);
3421 mutex_unlock(&fs_info->balance_mutex);
3422
3423 wait_event(fs_info->balance_wait_q,
3424 atomic_read(&fs_info->balance_running) == 0);
3425
3426 mutex_lock(&fs_info->balance_mutex);
3427 /* we are good with balance_ctl ripped off from under us */
3428 BUG_ON(atomic_read(&fs_info->balance_running));
3429 atomic_dec(&fs_info->balance_pause_req);
3430 } else {
3431 ret = -ENOTCONN;
3432 }
3433
3434 mutex_unlock(&fs_info->balance_mutex);
3435 return ret;
3436}
3437
3438int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3439{
3440 if (fs_info->sb->s_flags & MS_RDONLY)
3441 return -EROFS;
3442
3443 mutex_lock(&fs_info->balance_mutex);
3444 if (!fs_info->balance_ctl) {
3445 mutex_unlock(&fs_info->balance_mutex);
3446 return -ENOTCONN;
3447 }
3448
3449 atomic_inc(&fs_info->balance_cancel_req);
3450 /*
3451 * if we are running just wait and return, balance item is
3452 * deleted in btrfs_balance in this case
3453 */
3454 if (atomic_read(&fs_info->balance_running)) {
3455 mutex_unlock(&fs_info->balance_mutex);
3456 wait_event(fs_info->balance_wait_q,
3457 atomic_read(&fs_info->balance_running) == 0);
3458 mutex_lock(&fs_info->balance_mutex);
3459 } else {
3460 /* __cancel_balance needs volume_mutex */
3461 mutex_unlock(&fs_info->balance_mutex);
3462 mutex_lock(&fs_info->volume_mutex);
3463 mutex_lock(&fs_info->balance_mutex);
3464
3465 if (fs_info->balance_ctl)
3466 __cancel_balance(fs_info);
3467
3468 mutex_unlock(&fs_info->volume_mutex);
3469 }
3470
3471 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3472 atomic_dec(&fs_info->balance_cancel_req);
3473 mutex_unlock(&fs_info->balance_mutex);
3474 return 0;
3475}
3476
3477static int btrfs_uuid_scan_kthread(void *data)
3478{
3479 struct btrfs_fs_info *fs_info = data;
3480 struct btrfs_root *root = fs_info->tree_root;
3481 struct btrfs_key key;
3482 struct btrfs_key max_key;
3483 struct btrfs_path *path = NULL;
3484 int ret = 0;
3485 struct extent_buffer *eb;
3486 int slot;
3487 struct btrfs_root_item root_item;
3488 u32 item_size;
3489 struct btrfs_trans_handle *trans = NULL;
3490
3491 path = btrfs_alloc_path();
3492 if (!path) {
3493 ret = -ENOMEM;
3494 goto out;
3495 }
3496
3497 key.objectid = 0;
3498 key.type = BTRFS_ROOT_ITEM_KEY;
3499 key.offset = 0;
3500
3501 max_key.objectid = (u64)-1;
3502 max_key.type = BTRFS_ROOT_ITEM_KEY;
3503 max_key.offset = (u64)-1;
3504
3505 path->keep_locks = 1;
3506
3507 while (1) {
3508 ret = btrfs_search_forward(root, &key, path, 0);
3509 if (ret) {
3510 if (ret > 0)
3511 ret = 0;
3512 break;
3513 }
3514
3515 if (key.type != BTRFS_ROOT_ITEM_KEY ||
3516 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3517 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3518 key.objectid > BTRFS_LAST_FREE_OBJECTID)
3519 goto skip;
3520
3521 eb = path->nodes[0];
3522 slot = path->slots[0];
3523 item_size = btrfs_item_size_nr(eb, slot);
3524 if (item_size < sizeof(root_item))
3525 goto skip;
3526
3527 read_extent_buffer(eb, &root_item,
3528 btrfs_item_ptr_offset(eb, slot),
3529 (int)sizeof(root_item));
3530 if (btrfs_root_refs(&root_item) == 0)
3531 goto skip;
3532
3533 if (!btrfs_is_empty_uuid(root_item.uuid) ||
3534 !btrfs_is_empty_uuid(root_item.received_uuid)) {
3535 if (trans)
3536 goto update_tree;
3537
3538 btrfs_release_path(path);
3539 /*
3540 * 1 - subvol uuid item
3541 * 1 - received_subvol uuid item
3542 */
3543 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3544 if (IS_ERR(trans)) {
3545 ret = PTR_ERR(trans);
3546 break;
3547 }
3548 continue;
3549 } else {
3550 goto skip;
3551 }
3552update_tree:
3553 if (!btrfs_is_empty_uuid(root_item.uuid)) {
3554 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3555 root_item.uuid,
3556 BTRFS_UUID_KEY_SUBVOL,
3557 key.objectid);
3558 if (ret < 0) {
3559 btrfs_warn(fs_info, "uuid_tree_add failed %d",
3560 ret);
3561 break;
3562 }
3563 }
3564
3565 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3566 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3567 root_item.received_uuid,
3568 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3569 key.objectid);
3570 if (ret < 0) {
3571 btrfs_warn(fs_info, "uuid_tree_add failed %d",
3572 ret);
3573 break;
3574 }
3575 }
3576
3577skip:
3578 if (trans) {
3579 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3580 trans = NULL;
3581 if (ret)
3582 break;
3583 }
3584
3585 btrfs_release_path(path);
3586 if (key.offset < (u64)-1) {
3587 key.offset++;
3588 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3589 key.offset = 0;
3590 key.type = BTRFS_ROOT_ITEM_KEY;
3591 } else if (key.objectid < (u64)-1) {
3592 key.offset = 0;
3593 key.type = BTRFS_ROOT_ITEM_KEY;
3594 key.objectid++;
3595 } else {
3596 break;
3597 }
3598 cond_resched();
3599 }
3600
3601out:
3602 btrfs_free_path(path);
3603 if (trans && !IS_ERR(trans))
3604 btrfs_end_transaction(trans, fs_info->uuid_root);
3605 if (ret)
3606 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
3607 else
3608 fs_info->update_uuid_tree_gen = 1;
3609 up(&fs_info->uuid_tree_rescan_sem);
3610 return 0;
3611}
3612
3613/*
3614 * Callback for btrfs_uuid_tree_iterate().
3615 * returns:
3616 * 0 check succeeded, the entry is not outdated.
3617 * < 0 if an error occured.
3618 * > 0 if the check failed, which means the caller shall remove the entry.
3619 */
3620static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3621 u8 *uuid, u8 type, u64 subid)
3622{
3623 struct btrfs_key key;
3624 int ret = 0;
3625 struct btrfs_root *subvol_root;
3626
3627 if (type != BTRFS_UUID_KEY_SUBVOL &&
3628 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3629 goto out;
3630
3631 key.objectid = subid;
3632 key.type = BTRFS_ROOT_ITEM_KEY;
3633 key.offset = (u64)-1;
3634 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3635 if (IS_ERR(subvol_root)) {
3636 ret = PTR_ERR(subvol_root);
3637 if (ret == -ENOENT)
3638 ret = 1;
3639 goto out;
3640 }
3641
3642 switch (type) {
3643 case BTRFS_UUID_KEY_SUBVOL:
3644 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3645 ret = 1;
3646 break;
3647 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3648 if (memcmp(uuid, subvol_root->root_item.received_uuid,
3649 BTRFS_UUID_SIZE))
3650 ret = 1;
3651 break;
3652 }
3653
3654out:
3655 return ret;
3656}
3657
3658static int btrfs_uuid_rescan_kthread(void *data)
3659{
3660 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3661 int ret;
3662
3663 /*
3664 * 1st step is to iterate through the existing UUID tree and
3665 * to delete all entries that contain outdated data.
3666 * 2nd step is to add all missing entries to the UUID tree.
3667 */
3668 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3669 if (ret < 0) {
3670 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
3671 up(&fs_info->uuid_tree_rescan_sem);
3672 return ret;
3673 }
3674 return btrfs_uuid_scan_kthread(data);
3675}
3676
3677int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3678{
3679 struct btrfs_trans_handle *trans;
3680 struct btrfs_root *tree_root = fs_info->tree_root;
3681 struct btrfs_root *uuid_root;
3682 struct task_struct *task;
3683 int ret;
3684
3685 /*
3686 * 1 - root node
3687 * 1 - root item
3688 */
3689 trans = btrfs_start_transaction(tree_root, 2);
3690 if (IS_ERR(trans))
3691 return PTR_ERR(trans);
3692
3693 uuid_root = btrfs_create_tree(trans, fs_info,
3694 BTRFS_UUID_TREE_OBJECTID);
3695 if (IS_ERR(uuid_root)) {
3696 btrfs_abort_transaction(trans, tree_root,
3697 PTR_ERR(uuid_root));
3698 return PTR_ERR(uuid_root);
3699 }
3700
3701 fs_info->uuid_root = uuid_root;
3702
3703 ret = btrfs_commit_transaction(trans, tree_root);
3704 if (ret)
3705 return ret;
3706
3707 down(&fs_info->uuid_tree_rescan_sem);
3708 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3709 if (IS_ERR(task)) {
3710 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3711 btrfs_warn(fs_info, "failed to start uuid_scan task");
3712 up(&fs_info->uuid_tree_rescan_sem);
3713 return PTR_ERR(task);
3714 }
3715
3716 return 0;
3717}
3718
3719int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3720{
3721 struct task_struct *task;
3722
3723 down(&fs_info->uuid_tree_rescan_sem);
3724 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3725 if (IS_ERR(task)) {
3726 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3727 btrfs_warn(fs_info, "failed to start uuid_rescan task");
3728 up(&fs_info->uuid_tree_rescan_sem);
3729 return PTR_ERR(task);
3730 }
3731
3732 return 0;
3733}
3734
3735/*
3736 * shrinking a device means finding all of the device extents past
3737 * the new size, and then following the back refs to the chunks.
3738 * The chunk relocation code actually frees the device extent
3739 */
3740int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3741{
3742 struct btrfs_trans_handle *trans;
3743 struct btrfs_root *root = device->dev_root;
3744 struct btrfs_dev_extent *dev_extent = NULL;
3745 struct btrfs_path *path;
3746 u64 length;
3747 u64 chunk_tree;
3748 u64 chunk_objectid;
3749 u64 chunk_offset;
3750 int ret;
3751 int slot;
3752 int failed = 0;
3753 bool retried = false;
3754 struct extent_buffer *l;
3755 struct btrfs_key key;
3756 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3757 u64 old_total = btrfs_super_total_bytes(super_copy);
3758 u64 old_size = device->total_bytes;
3759 u64 diff = device->total_bytes - new_size;
3760
3761 if (device->is_tgtdev_for_dev_replace)
3762 return -EINVAL;
3763
3764 path = btrfs_alloc_path();
3765 if (!path)
3766 return -ENOMEM;
3767
3768 path->reada = 2;
3769
3770 lock_chunks(root);
3771
3772 device->total_bytes = new_size;
3773 if (device->writeable) {
3774 device->fs_devices->total_rw_bytes -= diff;
3775 spin_lock(&root->fs_info->free_chunk_lock);
3776 root->fs_info->free_chunk_space -= diff;
3777 spin_unlock(&root->fs_info->free_chunk_lock);
3778 }
3779 unlock_chunks(root);
3780
3781again:
3782 key.objectid = device->devid;
3783 key.offset = (u64)-1;
3784 key.type = BTRFS_DEV_EXTENT_KEY;
3785
3786 do {
3787 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3788 if (ret < 0)
3789 goto done;
3790
3791 ret = btrfs_previous_item(root, path, 0, key.type);
3792 if (ret < 0)
3793 goto done;
3794 if (ret) {
3795 ret = 0;
3796 btrfs_release_path(path);
3797 break;
3798 }
3799
3800 l = path->nodes[0];
3801 slot = path->slots[0];
3802 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3803
3804 if (key.objectid != device->devid) {
3805 btrfs_release_path(path);
3806 break;
3807 }
3808
3809 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3810 length = btrfs_dev_extent_length(l, dev_extent);
3811
3812 if (key.offset + length <= new_size) {
3813 btrfs_release_path(path);
3814 break;
3815 }
3816
3817 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3818 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3819 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3820 btrfs_release_path(path);
3821
3822 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3823 chunk_offset);
3824 if (ret && ret != -ENOSPC)
3825 goto done;
3826 if (ret == -ENOSPC)
3827 failed++;
3828 } while (key.offset-- > 0);
3829
3830 if (failed && !retried) {
3831 failed = 0;
3832 retried = true;
3833 goto again;
3834 } else if (failed && retried) {
3835 ret = -ENOSPC;
3836 lock_chunks(root);
3837
3838 device->total_bytes = old_size;
3839 if (device->writeable)
3840 device->fs_devices->total_rw_bytes += diff;
3841 spin_lock(&root->fs_info->free_chunk_lock);
3842 root->fs_info->free_chunk_space += diff;
3843 spin_unlock(&root->fs_info->free_chunk_lock);
3844 unlock_chunks(root);
3845 goto done;
3846 }
3847
3848 /* Shrinking succeeded, else we would be at "done". */
3849 trans = btrfs_start_transaction(root, 0);
3850 if (IS_ERR(trans)) {
3851 ret = PTR_ERR(trans);
3852 goto done;
3853 }
3854
3855 lock_chunks(root);
3856
3857 device->disk_total_bytes = new_size;
3858 /* Now btrfs_update_device() will change the on-disk size. */
3859 ret = btrfs_update_device(trans, device);
3860 if (ret) {
3861 unlock_chunks(root);
3862 btrfs_end_transaction(trans, root);
3863 goto done;
3864 }
3865 WARN_ON(diff > old_total);
3866 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3867 unlock_chunks(root);
3868 btrfs_end_transaction(trans, root);
3869done:
3870 btrfs_free_path(path);
3871 return ret;
3872}
3873
3874static int btrfs_add_system_chunk(struct btrfs_root *root,
3875 struct btrfs_key *key,
3876 struct btrfs_chunk *chunk, int item_size)
3877{
3878 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3879 struct btrfs_disk_key disk_key;
3880 u32 array_size;
3881 u8 *ptr;
3882
3883 array_size = btrfs_super_sys_array_size(super_copy);
3884 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3885 return -EFBIG;
3886
3887 ptr = super_copy->sys_chunk_array + array_size;
3888 btrfs_cpu_key_to_disk(&disk_key, key);
3889 memcpy(ptr, &disk_key, sizeof(disk_key));
3890 ptr += sizeof(disk_key);
3891 memcpy(ptr, chunk, item_size);
3892 item_size += sizeof(disk_key);
3893 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3894 return 0;
3895}
3896
3897/*
3898 * sort the devices in descending order by max_avail, total_avail
3899 */
3900static int btrfs_cmp_device_info(const void *a, const void *b)
3901{
3902 const struct btrfs_device_info *di_a = a;
3903 const struct btrfs_device_info *di_b = b;
3904
3905 if (di_a->max_avail > di_b->max_avail)
3906 return -1;
3907 if (di_a->max_avail < di_b->max_avail)
3908 return 1;
3909 if (di_a->total_avail > di_b->total_avail)
3910 return -1;
3911 if (di_a->total_avail < di_b->total_avail)
3912 return 1;
3913 return 0;
3914}
3915
3916static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3917 [BTRFS_RAID_RAID10] = {
3918 .sub_stripes = 2,
3919 .dev_stripes = 1,
3920 .devs_max = 0, /* 0 == as many as possible */
3921 .devs_min = 4,
3922 .devs_increment = 2,
3923 .ncopies = 2,
3924 },
3925 [BTRFS_RAID_RAID1] = {
3926 .sub_stripes = 1,
3927 .dev_stripes = 1,
3928 .devs_max = 2,
3929 .devs_min = 2,
3930 .devs_increment = 2,
3931 .ncopies = 2,
3932 },
3933 [BTRFS_RAID_DUP] = {
3934 .sub_stripes = 1,
3935 .dev_stripes = 2,
3936 .devs_max = 1,
3937 .devs_min = 1,
3938 .devs_increment = 1,
3939 .ncopies = 2,
3940 },
3941 [BTRFS_RAID_RAID0] = {
3942 .sub_stripes = 1,
3943 .dev_stripes = 1,
3944 .devs_max = 0,
3945 .devs_min = 2,
3946 .devs_increment = 1,
3947 .ncopies = 1,
3948 },
3949 [BTRFS_RAID_SINGLE] = {
3950 .sub_stripes = 1,
3951 .dev_stripes = 1,
3952 .devs_max = 1,
3953 .devs_min = 1,
3954 .devs_increment = 1,
3955 .ncopies = 1,
3956 },
3957 [BTRFS_RAID_RAID5] = {
3958 .sub_stripes = 1,
3959 .dev_stripes = 1,
3960 .devs_max = 0,
3961 .devs_min = 2,
3962 .devs_increment = 1,
3963 .ncopies = 2,
3964 },
3965 [BTRFS_RAID_RAID6] = {
3966 .sub_stripes = 1,
3967 .dev_stripes = 1,
3968 .devs_max = 0,
3969 .devs_min = 3,
3970 .devs_increment = 1,
3971 .ncopies = 3,
3972 },
3973};
3974
3975static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3976{
3977 /* TODO allow them to set a preferred stripe size */
3978 return 64 * 1024;
3979}
3980
3981static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3982{
3983 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3984 return;
3985
3986 btrfs_set_fs_incompat(info, RAID56);
3987}
3988
3989static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3990 struct btrfs_root *extent_root, u64 start,
3991 u64 type)
3992{
3993 struct btrfs_fs_info *info = extent_root->fs_info;
3994 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3995 struct list_head *cur;
3996 struct map_lookup *map = NULL;
3997 struct extent_map_tree *em_tree;
3998 struct extent_map *em;
3999 struct btrfs_device_info *devices_info = NULL;
4000 u64 total_avail;
4001 int num_stripes; /* total number of stripes to allocate */
4002 int data_stripes; /* number of stripes that count for
4003 block group size */
4004 int sub_stripes; /* sub_stripes info for map */
4005 int dev_stripes; /* stripes per dev */
4006 int devs_max; /* max devs to use */
4007 int devs_min; /* min devs needed */
4008 int devs_increment; /* ndevs has to be a multiple of this */
4009 int ncopies; /* how many copies to data has */
4010 int ret;
4011 u64 max_stripe_size;
4012 u64 max_chunk_size;
4013 u64 stripe_size;
4014 u64 num_bytes;
4015 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4016 int ndevs;
4017 int i;
4018 int j;
4019 int index;
4020
4021 BUG_ON(!alloc_profile_is_valid(type, 0));
4022
4023 if (list_empty(&fs_devices->alloc_list))
4024 return -ENOSPC;
4025
4026 index = __get_raid_index(type);
4027
4028 sub_stripes = btrfs_raid_array[index].sub_stripes;
4029 dev_stripes = btrfs_raid_array[index].dev_stripes;
4030 devs_max = btrfs_raid_array[index].devs_max;
4031 devs_min = btrfs_raid_array[index].devs_min;
4032 devs_increment = btrfs_raid_array[index].devs_increment;
4033 ncopies = btrfs_raid_array[index].ncopies;
4034
4035 if (type & BTRFS_BLOCK_GROUP_DATA) {
4036 max_stripe_size = 1024 * 1024 * 1024;
4037 max_chunk_size = 10 * max_stripe_size;
4038 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4039 /* for larger filesystems, use larger metadata chunks */
4040 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4041 max_stripe_size = 1024 * 1024 * 1024;
4042 else
4043 max_stripe_size = 256 * 1024 * 1024;
4044 max_chunk_size = max_stripe_size;
4045 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4046 max_stripe_size = 32 * 1024 * 1024;
4047 max_chunk_size = 2 * max_stripe_size;
4048 } else {
4049 btrfs_err(info, "invalid chunk type 0x%llx requested\n",
4050 type);
4051 BUG_ON(1);
4052 }
4053
4054 /* we don't want a chunk larger than 10% of writeable space */
4055 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4056 max_chunk_size);
4057
4058 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4059 GFP_NOFS);
4060 if (!devices_info)
4061 return -ENOMEM;
4062
4063 cur = fs_devices->alloc_list.next;
4064
4065 /*
4066 * in the first pass through the devices list, we gather information
4067 * about the available holes on each device.
4068 */
4069 ndevs = 0;
4070 while (cur != &fs_devices->alloc_list) {
4071 struct btrfs_device *device;
4072 u64 max_avail;
4073 u64 dev_offset;
4074
4075 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4076
4077 cur = cur->next;
4078
4079 if (!device->writeable) {
4080 WARN(1, KERN_ERR
4081 "BTRFS: read-only device in alloc_list\n");
4082 continue;
4083 }
4084
4085 if (!device->in_fs_metadata ||
4086 device->is_tgtdev_for_dev_replace)
4087 continue;
4088
4089 if (device->total_bytes > device->bytes_used)
4090 total_avail = device->total_bytes - device->bytes_used;
4091 else
4092 total_avail = 0;
4093
4094 /* If there is no space on this device, skip it. */
4095 if (total_avail == 0)
4096 continue;
4097
4098 ret = find_free_dev_extent(trans, device,
4099 max_stripe_size * dev_stripes,
4100 &dev_offset, &max_avail);
4101 if (ret && ret != -ENOSPC)
4102 goto error;
4103
4104 if (ret == 0)
4105 max_avail = max_stripe_size * dev_stripes;
4106
4107 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4108 continue;
4109
4110 if (ndevs == fs_devices->rw_devices) {
4111 WARN(1, "%s: found more than %llu devices\n",
4112 __func__, fs_devices->rw_devices);
4113 break;
4114 }
4115 devices_info[ndevs].dev_offset = dev_offset;
4116 devices_info[ndevs].max_avail = max_avail;
4117 devices_info[ndevs].total_avail = total_avail;
4118 devices_info[ndevs].dev = device;
4119 ++ndevs;
4120 }
4121
4122 /*
4123 * now sort the devices by hole size / available space
4124 */
4125 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4126 btrfs_cmp_device_info, NULL);
4127
4128 /* round down to number of usable stripes */
4129 ndevs -= ndevs % devs_increment;
4130
4131 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4132 ret = -ENOSPC;
4133 goto error;
4134 }
4135
4136 if (devs_max && ndevs > devs_max)
4137 ndevs = devs_max;
4138 /*
4139 * the primary goal is to maximize the number of stripes, so use as many
4140 * devices as possible, even if the stripes are not maximum sized.
4141 */
4142 stripe_size = devices_info[ndevs-1].max_avail;
4143 num_stripes = ndevs * dev_stripes;
4144
4145 /*
4146 * this will have to be fixed for RAID1 and RAID10 over
4147 * more drives
4148 */
4149 data_stripes = num_stripes / ncopies;
4150
4151 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4152 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4153 btrfs_super_stripesize(info->super_copy));
4154 data_stripes = num_stripes - 1;
4155 }
4156 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4157 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4158 btrfs_super_stripesize(info->super_copy));
4159 data_stripes = num_stripes - 2;
4160 }
4161
4162 /*
4163 * Use the number of data stripes to figure out how big this chunk
4164 * is really going to be in terms of logical address space,
4165 * and compare that answer with the max chunk size
4166 */
4167 if (stripe_size * data_stripes > max_chunk_size) {
4168 u64 mask = (1ULL << 24) - 1;
4169 stripe_size = max_chunk_size;
4170 do_div(stripe_size, data_stripes);
4171
4172 /* bump the answer up to a 16MB boundary */
4173 stripe_size = (stripe_size + mask) & ~mask;
4174
4175 /* but don't go higher than the limits we found
4176 * while searching for free extents
4177 */
4178 if (stripe_size > devices_info[ndevs-1].max_avail)
4179 stripe_size = devices_info[ndevs-1].max_avail;
4180 }
4181
4182 do_div(stripe_size, dev_stripes);
4183
4184 /* align to BTRFS_STRIPE_LEN */
4185 do_div(stripe_size, raid_stripe_len);
4186 stripe_size *= raid_stripe_len;
4187
4188 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4189 if (!map) {
4190 ret = -ENOMEM;
4191 goto error;
4192 }
4193 map->num_stripes = num_stripes;
4194
4195 for (i = 0; i < ndevs; ++i) {
4196 for (j = 0; j < dev_stripes; ++j) {
4197 int s = i * dev_stripes + j;
4198 map->stripes[s].dev = devices_info[i].dev;
4199 map->stripes[s].physical = devices_info[i].dev_offset +
4200 j * stripe_size;
4201 }
4202 }
4203 map->sector_size = extent_root->sectorsize;
4204 map->stripe_len = raid_stripe_len;
4205 map->io_align = raid_stripe_len;
4206 map->io_width = raid_stripe_len;
4207 map->type = type;
4208 map->sub_stripes = sub_stripes;
4209
4210 num_bytes = stripe_size * data_stripes;
4211
4212 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4213
4214 em = alloc_extent_map();
4215 if (!em) {
4216 ret = -ENOMEM;
4217 goto error;
4218 }
4219 em->bdev = (struct block_device *)map;
4220 em->start = start;
4221 em->len = num_bytes;
4222 em->block_start = 0;
4223 em->block_len = em->len;
4224 em->orig_block_len = stripe_size;
4225
4226 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4227 write_lock(&em_tree->lock);
4228 ret = add_extent_mapping(em_tree, em, 0);
4229 if (!ret) {
4230 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4231 atomic_inc(&em->refs);
4232 }
4233 write_unlock(&em_tree->lock);
4234 if (ret) {
4235 free_extent_map(em);
4236 goto error;
4237 }
4238
4239 ret = btrfs_make_block_group(trans, extent_root, 0, type,
4240 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4241 start, num_bytes);
4242 if (ret)
4243 goto error_del_extent;
4244
4245 free_extent_map(em);
4246 check_raid56_incompat_flag(extent_root->fs_info, type);
4247
4248 kfree(devices_info);
4249 return 0;
4250
4251error_del_extent:
4252 write_lock(&em_tree->lock);
4253 remove_extent_mapping(em_tree, em);
4254 write_unlock(&em_tree->lock);
4255
4256 /* One for our allocation */
4257 free_extent_map(em);
4258 /* One for the tree reference */
4259 free_extent_map(em);
4260error:
4261 kfree(map);
4262 kfree(devices_info);
4263 return ret;
4264}
4265
4266int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4267 struct btrfs_root *extent_root,
4268 u64 chunk_offset, u64 chunk_size)
4269{
4270 struct btrfs_key key;
4271 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4272 struct btrfs_device *device;
4273 struct btrfs_chunk *chunk;
4274 struct btrfs_stripe *stripe;
4275 struct extent_map_tree *em_tree;
4276 struct extent_map *em;
4277 struct map_lookup *map;
4278 size_t item_size;
4279 u64 dev_offset;
4280 u64 stripe_size;
4281 int i = 0;
4282 int ret;
4283
4284 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4285 read_lock(&em_tree->lock);
4286 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4287 read_unlock(&em_tree->lock);
4288
4289 if (!em) {
4290 btrfs_crit(extent_root->fs_info, "unable to find logical "
4291 "%Lu len %Lu", chunk_offset, chunk_size);
4292 return -EINVAL;
4293 }
4294
4295 if (em->start != chunk_offset || em->len != chunk_size) {
4296 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4297 " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4298 chunk_size, em->start, em->len);
4299 free_extent_map(em);
4300 return -EINVAL;
4301 }
4302
4303 map = (struct map_lookup *)em->bdev;
4304 item_size = btrfs_chunk_item_size(map->num_stripes);
4305 stripe_size = em->orig_block_len;
4306
4307 chunk = kzalloc(item_size, GFP_NOFS);
4308 if (!chunk) {
4309 ret = -ENOMEM;
4310 goto out;
4311 }
4312
4313 for (i = 0; i < map->num_stripes; i++) {
4314 device = map->stripes[i].dev;
4315 dev_offset = map->stripes[i].physical;
4316
4317 device->bytes_used += stripe_size;
4318 ret = btrfs_update_device(trans, device);
4319 if (ret)
4320 goto out;
4321 ret = btrfs_alloc_dev_extent(trans, device,
4322 chunk_root->root_key.objectid,
4323 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4324 chunk_offset, dev_offset,
4325 stripe_size);
4326 if (ret)
4327 goto out;
4328 }
4329
4330 spin_lock(&extent_root->fs_info->free_chunk_lock);
4331 extent_root->fs_info->free_chunk_space -= (stripe_size *
4332 map->num_stripes);
4333 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4334
4335 stripe = &chunk->stripe;
4336 for (i = 0; i < map->num_stripes; i++) {
4337 device = map->stripes[i].dev;
4338 dev_offset = map->stripes[i].physical;
4339
4340 btrfs_set_stack_stripe_devid(stripe, device->devid);
4341 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4342 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4343 stripe++;
4344 }
4345
4346 btrfs_set_stack_chunk_length(chunk, chunk_size);
4347 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4348 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4349 btrfs_set_stack_chunk_type(chunk, map->type);
4350 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4351 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4352 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4353 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4354 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4355
4356 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4357 key.type = BTRFS_CHUNK_ITEM_KEY;
4358 key.offset = chunk_offset;
4359
4360 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4361 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4362 /*
4363 * TODO: Cleanup of inserted chunk root in case of
4364 * failure.
4365 */
4366 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4367 item_size);
4368 }
4369
4370out:
4371 kfree(chunk);
4372 free_extent_map(em);
4373 return ret;
4374}
4375
4376/*
4377 * Chunk allocation falls into two parts. The first part does works
4378 * that make the new allocated chunk useable, but not do any operation
4379 * that modifies the chunk tree. The second part does the works that
4380 * require modifying the chunk tree. This division is important for the
4381 * bootstrap process of adding storage to a seed btrfs.
4382 */
4383int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4384 struct btrfs_root *extent_root, u64 type)
4385{
4386 u64 chunk_offset;
4387
4388 chunk_offset = find_next_chunk(extent_root->fs_info);
4389 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4390}
4391
4392static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4393 struct btrfs_root *root,
4394 struct btrfs_device *device)
4395{
4396 u64 chunk_offset;
4397 u64 sys_chunk_offset;
4398 u64 alloc_profile;
4399 struct btrfs_fs_info *fs_info = root->fs_info;
4400 struct btrfs_root *extent_root = fs_info->extent_root;
4401 int ret;
4402
4403 chunk_offset = find_next_chunk(fs_info);
4404 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4405 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4406 alloc_profile);
4407 if (ret)
4408 return ret;
4409
4410 sys_chunk_offset = find_next_chunk(root->fs_info);
4411 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4412 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4413 alloc_profile);
4414 if (ret) {
4415 btrfs_abort_transaction(trans, root, ret);
4416 goto out;
4417 }
4418
4419 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4420 if (ret)
4421 btrfs_abort_transaction(trans, root, ret);
4422out:
4423 return ret;
4424}
4425
4426int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4427{
4428 struct extent_map *em;
4429 struct map_lookup *map;
4430 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4431 int readonly = 0;
4432 int i;
4433
4434 read_lock(&map_tree->map_tree.lock);
4435 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4436 read_unlock(&map_tree->map_tree.lock);
4437 if (!em)
4438 return 1;
4439
4440 if (btrfs_test_opt(root, DEGRADED)) {
4441 free_extent_map(em);
4442 return 0;
4443 }
4444
4445 map = (struct map_lookup *)em->bdev;
4446 for (i = 0; i < map->num_stripes; i++) {
4447 if (!map->stripes[i].dev->writeable) {
4448 readonly = 1;
4449 break;
4450 }
4451 }
4452 free_extent_map(em);
4453 return readonly;
4454}
4455
4456void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4457{
4458 extent_map_tree_init(&tree->map_tree);
4459}
4460
4461void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4462{
4463 struct extent_map *em;
4464
4465 while (1) {
4466 write_lock(&tree->map_tree.lock);
4467 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4468 if (em)
4469 remove_extent_mapping(&tree->map_tree, em);
4470 write_unlock(&tree->map_tree.lock);
4471 if (!em)
4472 break;
4473 kfree(em->bdev);
4474 /* once for us */
4475 free_extent_map(em);
4476 /* once for the tree */
4477 free_extent_map(em);
4478 }
4479}
4480
4481int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4482{
4483 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4484 struct extent_map *em;
4485 struct map_lookup *map;
4486 struct extent_map_tree *em_tree = &map_tree->map_tree;
4487 int ret;
4488
4489 read_lock(&em_tree->lock);
4490 em = lookup_extent_mapping(em_tree, logical, len);
4491 read_unlock(&em_tree->lock);
4492
4493 /*
4494 * We could return errors for these cases, but that could get ugly and
4495 * we'd probably do the same thing which is just not do anything else
4496 * and exit, so return 1 so the callers don't try to use other copies.
4497 */
4498 if (!em) {
4499 btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4500 logical+len);
4501 return 1;
4502 }
4503
4504 if (em->start > logical || em->start + em->len < logical) {
4505 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4506 "%Lu-%Lu\n", logical, logical+len, em->start,
4507 em->start + em->len);
4508 free_extent_map(em);
4509 return 1;
4510 }
4511
4512 map = (struct map_lookup *)em->bdev;
4513 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4514 ret = map->num_stripes;
4515 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4516 ret = map->sub_stripes;
4517 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4518 ret = 2;
4519 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4520 ret = 3;
4521 else
4522 ret = 1;
4523 free_extent_map(em);
4524
4525 btrfs_dev_replace_lock(&fs_info->dev_replace);
4526 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4527 ret++;
4528 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4529
4530 return ret;
4531}
4532
4533unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4534 struct btrfs_mapping_tree *map_tree,
4535 u64 logical)
4536{
4537 struct extent_map *em;
4538 struct map_lookup *map;
4539 struct extent_map_tree *em_tree = &map_tree->map_tree;
4540 unsigned long len = root->sectorsize;
4541
4542 read_lock(&em_tree->lock);
4543 em = lookup_extent_mapping(em_tree, logical, len);
4544 read_unlock(&em_tree->lock);
4545 BUG_ON(!em);
4546
4547 BUG_ON(em->start > logical || em->start + em->len < logical);
4548 map = (struct map_lookup *)em->bdev;
4549 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4550 BTRFS_BLOCK_GROUP_RAID6)) {
4551 len = map->stripe_len * nr_data_stripes(map);
4552 }
4553 free_extent_map(em);
4554 return len;
4555}
4556
4557int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4558 u64 logical, u64 len, int mirror_num)
4559{
4560 struct extent_map *em;
4561 struct map_lookup *map;
4562 struct extent_map_tree *em_tree = &map_tree->map_tree;
4563 int ret = 0;
4564
4565 read_lock(&em_tree->lock);
4566 em = lookup_extent_mapping(em_tree, logical, len);
4567 read_unlock(&em_tree->lock);
4568 BUG_ON(!em);
4569
4570 BUG_ON(em->start > logical || em->start + em->len < logical);
4571 map = (struct map_lookup *)em->bdev;
4572 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4573 BTRFS_BLOCK_GROUP_RAID6))
4574 ret = 1;
4575 free_extent_map(em);
4576 return ret;
4577}
4578
4579static int find_live_mirror(struct btrfs_fs_info *fs_info,
4580 struct map_lookup *map, int first, int num,
4581 int optimal, int dev_replace_is_ongoing)
4582{
4583 int i;
4584 int tolerance;
4585 struct btrfs_device *srcdev;
4586
4587 if (dev_replace_is_ongoing &&
4588 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4589 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4590 srcdev = fs_info->dev_replace.srcdev;
4591 else
4592 srcdev = NULL;
4593
4594 /*
4595 * try to avoid the drive that is the source drive for a
4596 * dev-replace procedure, only choose it if no other non-missing
4597 * mirror is available
4598 */
4599 for (tolerance = 0; tolerance < 2; tolerance++) {
4600 if (map->stripes[optimal].dev->bdev &&
4601 (tolerance || map->stripes[optimal].dev != srcdev))
4602 return optimal;
4603 for (i = first; i < first + num; i++) {
4604 if (map->stripes[i].dev->bdev &&
4605 (tolerance || map->stripes[i].dev != srcdev))
4606 return i;
4607 }
4608 }
4609
4610 /* we couldn't find one that doesn't fail. Just return something
4611 * and the io error handling code will clean up eventually
4612 */
4613 return optimal;
4614}
4615
4616static inline int parity_smaller(u64 a, u64 b)
4617{
4618 return a > b;
4619}
4620
4621/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4622static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4623{
4624 struct btrfs_bio_stripe s;
4625 int i;
4626 u64 l;
4627 int again = 1;
4628
4629 while (again) {
4630 again = 0;
4631 for (i = 0; i < bbio->num_stripes - 1; i++) {
4632 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4633 s = bbio->stripes[i];
4634 l = raid_map[i];
4635 bbio->stripes[i] = bbio->stripes[i+1];
4636 raid_map[i] = raid_map[i+1];
4637 bbio->stripes[i+1] = s;
4638 raid_map[i+1] = l;
4639 again = 1;
4640 }
4641 }
4642 }
4643}
4644
4645static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4646 u64 logical, u64 *length,
4647 struct btrfs_bio **bbio_ret,
4648 int mirror_num, u64 **raid_map_ret)
4649{
4650 struct extent_map *em;
4651 struct map_lookup *map;
4652 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4653 struct extent_map_tree *em_tree = &map_tree->map_tree;
4654 u64 offset;
4655 u64 stripe_offset;
4656 u64 stripe_end_offset;
4657 u64 stripe_nr;
4658 u64 stripe_nr_orig;
4659 u64 stripe_nr_end;
4660 u64 stripe_len;
4661 u64 *raid_map = NULL;
4662 int stripe_index;
4663 int i;
4664 int ret = 0;
4665 int num_stripes;
4666 int max_errors = 0;
4667 struct btrfs_bio *bbio = NULL;
4668 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4669 int dev_replace_is_ongoing = 0;
4670 int num_alloc_stripes;
4671 int patch_the_first_stripe_for_dev_replace = 0;
4672 u64 physical_to_patch_in_first_stripe = 0;
4673 u64 raid56_full_stripe_start = (u64)-1;
4674
4675 read_lock(&em_tree->lock);
4676 em = lookup_extent_mapping(em_tree, logical, *length);
4677 read_unlock(&em_tree->lock);
4678
4679 if (!em) {
4680 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4681 logical, *length);
4682 return -EINVAL;
4683 }
4684
4685 if (em->start > logical || em->start + em->len < logical) {
4686 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4687 "found %Lu-%Lu\n", logical, em->start,
4688 em->start + em->len);
4689 free_extent_map(em);
4690 return -EINVAL;
4691 }
4692
4693 map = (struct map_lookup *)em->bdev;
4694 offset = logical - em->start;
4695
4696 stripe_len = map->stripe_len;
4697 stripe_nr = offset;
4698 /*
4699 * stripe_nr counts the total number of stripes we have to stride
4700 * to get to this block
4701 */
4702 do_div(stripe_nr, stripe_len);
4703
4704 stripe_offset = stripe_nr * stripe_len;
4705 BUG_ON(offset < stripe_offset);
4706
4707 /* stripe_offset is the offset of this block in its stripe*/
4708 stripe_offset = offset - stripe_offset;
4709
4710 /* if we're here for raid56, we need to know the stripe aligned start */
4711 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4712 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4713 raid56_full_stripe_start = offset;
4714
4715 /* allow a write of a full stripe, but make sure we don't
4716 * allow straddling of stripes
4717 */
4718 do_div(raid56_full_stripe_start, full_stripe_len);
4719 raid56_full_stripe_start *= full_stripe_len;
4720 }
4721
4722 if (rw & REQ_DISCARD) {
4723 /* we don't discard raid56 yet */
4724 if (map->type &
4725 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4726 ret = -EOPNOTSUPP;
4727 goto out;
4728 }
4729 *length = min_t(u64, em->len - offset, *length);
4730 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4731 u64 max_len;
4732 /* For writes to RAID[56], allow a full stripeset across all disks.
4733 For other RAID types and for RAID[56] reads, just allow a single
4734 stripe (on a single disk). */
4735 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4736 (rw & REQ_WRITE)) {
4737 max_len = stripe_len * nr_data_stripes(map) -
4738 (offset - raid56_full_stripe_start);
4739 } else {
4740 /* we limit the length of each bio to what fits in a stripe */
4741 max_len = stripe_len - stripe_offset;
4742 }
4743 *length = min_t(u64, em->len - offset, max_len);
4744 } else {
4745 *length = em->len - offset;
4746 }
4747
4748 /* This is for when we're called from btrfs_merge_bio_hook() and all
4749 it cares about is the length */
4750 if (!bbio_ret)
4751 goto out;
4752
4753 btrfs_dev_replace_lock(dev_replace);
4754 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4755 if (!dev_replace_is_ongoing)
4756 btrfs_dev_replace_unlock(dev_replace);
4757
4758 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4759 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4760 dev_replace->tgtdev != NULL) {
4761 /*
4762 * in dev-replace case, for repair case (that's the only
4763 * case where the mirror is selected explicitly when
4764 * calling btrfs_map_block), blocks left of the left cursor
4765 * can also be read from the target drive.
4766 * For REQ_GET_READ_MIRRORS, the target drive is added as
4767 * the last one to the array of stripes. For READ, it also
4768 * needs to be supported using the same mirror number.
4769 * If the requested block is not left of the left cursor,
4770 * EIO is returned. This can happen because btrfs_num_copies()
4771 * returns one more in the dev-replace case.
4772 */
4773 u64 tmp_length = *length;
4774 struct btrfs_bio *tmp_bbio = NULL;
4775 int tmp_num_stripes;
4776 u64 srcdev_devid = dev_replace->srcdev->devid;
4777 int index_srcdev = 0;
4778 int found = 0;
4779 u64 physical_of_found = 0;
4780
4781 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4782 logical, &tmp_length, &tmp_bbio, 0, NULL);
4783 if (ret) {
4784 WARN_ON(tmp_bbio != NULL);
4785 goto out;
4786 }
4787
4788 tmp_num_stripes = tmp_bbio->num_stripes;
4789 if (mirror_num > tmp_num_stripes) {
4790 /*
4791 * REQ_GET_READ_MIRRORS does not contain this
4792 * mirror, that means that the requested area
4793 * is not left of the left cursor
4794 */
4795 ret = -EIO;
4796 kfree(tmp_bbio);
4797 goto out;
4798 }
4799
4800 /*
4801 * process the rest of the function using the mirror_num
4802 * of the source drive. Therefore look it up first.
4803 * At the end, patch the device pointer to the one of the
4804 * target drive.
4805 */
4806 for (i = 0; i < tmp_num_stripes; i++) {
4807 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4808 /*
4809 * In case of DUP, in order to keep it
4810 * simple, only add the mirror with the
4811 * lowest physical address
4812 */
4813 if (found &&
4814 physical_of_found <=
4815 tmp_bbio->stripes[i].physical)
4816 continue;
4817 index_srcdev = i;
4818 found = 1;
4819 physical_of_found =
4820 tmp_bbio->stripes[i].physical;
4821 }
4822 }
4823
4824 if (found) {
4825 mirror_num = index_srcdev + 1;
4826 patch_the_first_stripe_for_dev_replace = 1;
4827 physical_to_patch_in_first_stripe = physical_of_found;
4828 } else {
4829 WARN_ON(1);
4830 ret = -EIO;
4831 kfree(tmp_bbio);
4832 goto out;
4833 }
4834
4835 kfree(tmp_bbio);
4836 } else if (mirror_num > map->num_stripes) {
4837 mirror_num = 0;
4838 }
4839
4840 num_stripes = 1;
4841 stripe_index = 0;
4842 stripe_nr_orig = stripe_nr;
4843 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4844 do_div(stripe_nr_end, map->stripe_len);
4845 stripe_end_offset = stripe_nr_end * map->stripe_len -
4846 (offset + *length);
4847
4848 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4849 if (rw & REQ_DISCARD)
4850 num_stripes = min_t(u64, map->num_stripes,
4851 stripe_nr_end - stripe_nr_orig);
4852 stripe_index = do_div(stripe_nr, map->num_stripes);
4853 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4854 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4855 num_stripes = map->num_stripes;
4856 else if (mirror_num)
4857 stripe_index = mirror_num - 1;
4858 else {
4859 stripe_index = find_live_mirror(fs_info, map, 0,
4860 map->num_stripes,
4861 current->pid % map->num_stripes,
4862 dev_replace_is_ongoing);
4863 mirror_num = stripe_index + 1;
4864 }
4865
4866 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4867 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4868 num_stripes = map->num_stripes;
4869 } else if (mirror_num) {
4870 stripe_index = mirror_num - 1;
4871 } else {
4872 mirror_num = 1;
4873 }
4874
4875 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4876 int factor = map->num_stripes / map->sub_stripes;
4877
4878 stripe_index = do_div(stripe_nr, factor);
4879 stripe_index *= map->sub_stripes;
4880
4881 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4882 num_stripes = map->sub_stripes;
4883 else if (rw & REQ_DISCARD)
4884 num_stripes = min_t(u64, map->sub_stripes *
4885 (stripe_nr_end - stripe_nr_orig),
4886 map->num_stripes);
4887 else if (mirror_num)
4888 stripe_index += mirror_num - 1;
4889 else {
4890 int old_stripe_index = stripe_index;
4891 stripe_index = find_live_mirror(fs_info, map,
4892 stripe_index,
4893 map->sub_stripes, stripe_index +
4894 current->pid % map->sub_stripes,
4895 dev_replace_is_ongoing);
4896 mirror_num = stripe_index - old_stripe_index + 1;
4897 }
4898
4899 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4900 BTRFS_BLOCK_GROUP_RAID6)) {
4901 u64 tmp;
4902
4903 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4904 && raid_map_ret) {
4905 int i, rot;
4906
4907 /* push stripe_nr back to the start of the full stripe */
4908 stripe_nr = raid56_full_stripe_start;
4909 do_div(stripe_nr, stripe_len);
4910
4911 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4912
4913 /* RAID[56] write or recovery. Return all stripes */
4914 num_stripes = map->num_stripes;
4915 max_errors = nr_parity_stripes(map);
4916
4917 raid_map = kmalloc_array(num_stripes, sizeof(u64),
4918 GFP_NOFS);
4919 if (!raid_map) {
4920 ret = -ENOMEM;
4921 goto out;
4922 }
4923
4924 /* Work out the disk rotation on this stripe-set */
4925 tmp = stripe_nr;
4926 rot = do_div(tmp, num_stripes);
4927
4928 /* Fill in the logical address of each stripe */
4929 tmp = stripe_nr * nr_data_stripes(map);
4930 for (i = 0; i < nr_data_stripes(map); i++)
4931 raid_map[(i+rot) % num_stripes] =
4932 em->start + (tmp + i) * map->stripe_len;
4933
4934 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4935 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4936 raid_map[(i+rot+1) % num_stripes] =
4937 RAID6_Q_STRIPE;
4938
4939 *length = map->stripe_len;
4940 stripe_index = 0;
4941 stripe_offset = 0;
4942 } else {
4943 /*
4944 * Mirror #0 or #1 means the original data block.
4945 * Mirror #2 is RAID5 parity block.
4946 * Mirror #3 is RAID6 Q block.
4947 */
4948 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4949 if (mirror_num > 1)
4950 stripe_index = nr_data_stripes(map) +
4951 mirror_num - 2;
4952
4953 /* We distribute the parity blocks across stripes */
4954 tmp = stripe_nr + stripe_index;
4955 stripe_index = do_div(tmp, map->num_stripes);
4956 }
4957 } else {
4958 /*
4959 * after this do_div call, stripe_nr is the number of stripes
4960 * on this device we have to walk to find the data, and
4961 * stripe_index is the number of our device in the stripe array
4962 */
4963 stripe_index = do_div(stripe_nr, map->num_stripes);
4964 mirror_num = stripe_index + 1;
4965 }
4966 BUG_ON(stripe_index >= map->num_stripes);
4967
4968 num_alloc_stripes = num_stripes;
4969 if (dev_replace_is_ongoing) {
4970 if (rw & (REQ_WRITE | REQ_DISCARD))
4971 num_alloc_stripes <<= 1;
4972 if (rw & REQ_GET_READ_MIRRORS)
4973 num_alloc_stripes++;
4974 }
4975 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4976 if (!bbio) {
4977 kfree(raid_map);
4978 ret = -ENOMEM;
4979 goto out;
4980 }
4981 atomic_set(&bbio->error, 0);
4982
4983 if (rw & REQ_DISCARD) {
4984 int factor = 0;
4985 int sub_stripes = 0;
4986 u64 stripes_per_dev = 0;
4987 u32 remaining_stripes = 0;
4988 u32 last_stripe = 0;
4989
4990 if (map->type &
4991 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4992 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4993 sub_stripes = 1;
4994 else
4995 sub_stripes = map->sub_stripes;
4996
4997 factor = map->num_stripes / sub_stripes;
4998 stripes_per_dev = div_u64_rem(stripe_nr_end -
4999 stripe_nr_orig,
5000 factor,
5001 &remaining_stripes);
5002 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5003 last_stripe *= sub_stripes;
5004 }
5005
5006 for (i = 0; i < num_stripes; i++) {
5007 bbio->stripes[i].physical =
5008 map->stripes[stripe_index].physical +
5009 stripe_offset + stripe_nr * map->stripe_len;
5010 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5011
5012 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5013 BTRFS_BLOCK_GROUP_RAID10)) {
5014 bbio->stripes[i].length = stripes_per_dev *
5015 map->stripe_len;
5016
5017 if (i / sub_stripes < remaining_stripes)
5018 bbio->stripes[i].length +=
5019 map->stripe_len;
5020
5021 /*
5022 * Special for the first stripe and
5023 * the last stripe:
5024 *
5025 * |-------|...|-------|
5026 * |----------|
5027 * off end_off
5028 */
5029 if (i < sub_stripes)
5030 bbio->stripes[i].length -=
5031 stripe_offset;
5032
5033 if (stripe_index >= last_stripe &&
5034 stripe_index <= (last_stripe +
5035 sub_stripes - 1))
5036 bbio->stripes[i].length -=
5037 stripe_end_offset;
5038
5039 if (i == sub_stripes - 1)
5040 stripe_offset = 0;
5041 } else
5042 bbio->stripes[i].length = *length;
5043
5044 stripe_index++;
5045 if (stripe_index == map->num_stripes) {
5046 /* This could only happen for RAID0/10 */
5047 stripe_index = 0;
5048 stripe_nr++;
5049 }
5050 }
5051 } else {
5052 for (i = 0; i < num_stripes; i++) {
5053 bbio->stripes[i].physical =
5054 map->stripes[stripe_index].physical +
5055 stripe_offset +
5056 stripe_nr * map->stripe_len;
5057 bbio->stripes[i].dev =
5058 map->stripes[stripe_index].dev;
5059 stripe_index++;
5060 }
5061 }
5062
5063 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5064 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5065 BTRFS_BLOCK_GROUP_RAID10 |
5066 BTRFS_BLOCK_GROUP_RAID5 |
5067 BTRFS_BLOCK_GROUP_DUP)) {
5068 max_errors = 1;
5069 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5070 max_errors = 2;
5071 }
5072 }
5073
5074 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5075 dev_replace->tgtdev != NULL) {
5076 int index_where_to_add;
5077 u64 srcdev_devid = dev_replace->srcdev->devid;
5078
5079 /*
5080 * duplicate the write operations while the dev replace
5081 * procedure is running. Since the copying of the old disk
5082 * to the new disk takes place at run time while the
5083 * filesystem is mounted writable, the regular write
5084 * operations to the old disk have to be duplicated to go
5085 * to the new disk as well.
5086 * Note that device->missing is handled by the caller, and
5087 * that the write to the old disk is already set up in the
5088 * stripes array.
5089 */
5090 index_where_to_add = num_stripes;
5091 for (i = 0; i < num_stripes; i++) {
5092 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5093 /* write to new disk, too */
5094 struct btrfs_bio_stripe *new =
5095 bbio->stripes + index_where_to_add;
5096 struct btrfs_bio_stripe *old =
5097 bbio->stripes + i;
5098
5099 new->physical = old->physical;
5100 new->length = old->length;
5101 new->dev = dev_replace->tgtdev;
5102 index_where_to_add++;
5103 max_errors++;
5104 }
5105 }
5106 num_stripes = index_where_to_add;
5107 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5108 dev_replace->tgtdev != NULL) {
5109 u64 srcdev_devid = dev_replace->srcdev->devid;
5110 int index_srcdev = 0;
5111 int found = 0;
5112 u64 physical_of_found = 0;
5113
5114 /*
5115 * During the dev-replace procedure, the target drive can
5116 * also be used to read data in case it is needed to repair
5117 * a corrupt block elsewhere. This is possible if the
5118 * requested area is left of the left cursor. In this area,
5119 * the target drive is a full copy of the source drive.
5120 */
5121 for (i = 0; i < num_stripes; i++) {
5122 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5123 /*
5124 * In case of DUP, in order to keep it
5125 * simple, only add the mirror with the
5126 * lowest physical address
5127 */
5128 if (found &&
5129 physical_of_found <=
5130 bbio->stripes[i].physical)
5131 continue;
5132 index_srcdev = i;
5133 found = 1;
5134 physical_of_found = bbio->stripes[i].physical;
5135 }
5136 }
5137 if (found) {
5138 u64 length = map->stripe_len;
5139
5140 if (physical_of_found + length <=
5141 dev_replace->cursor_left) {
5142 struct btrfs_bio_stripe *tgtdev_stripe =
5143 bbio->stripes + num_stripes;
5144
5145 tgtdev_stripe->physical = physical_of_found;
5146 tgtdev_stripe->length =
5147 bbio->stripes[index_srcdev].length;
5148 tgtdev_stripe->dev = dev_replace->tgtdev;
5149
5150 num_stripes++;
5151 }
5152 }
5153 }
5154
5155 *bbio_ret = bbio;
5156 bbio->num_stripes = num_stripes;
5157 bbio->max_errors = max_errors;
5158 bbio->mirror_num = mirror_num;
5159
5160 /*
5161 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5162 * mirror_num == num_stripes + 1 && dev_replace target drive is
5163 * available as a mirror
5164 */
5165 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5166 WARN_ON(num_stripes > 1);
5167 bbio->stripes[0].dev = dev_replace->tgtdev;
5168 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5169 bbio->mirror_num = map->num_stripes + 1;
5170 }
5171 if (raid_map) {
5172 sort_parity_stripes(bbio, raid_map);
5173 *raid_map_ret = raid_map;
5174 }
5175out:
5176 if (dev_replace_is_ongoing)
5177 btrfs_dev_replace_unlock(dev_replace);
5178 free_extent_map(em);
5179 return ret;
5180}
5181
5182int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5183 u64 logical, u64 *length,
5184 struct btrfs_bio **bbio_ret, int mirror_num)
5185{
5186 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5187 mirror_num, NULL);
5188}
5189
5190int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5191 u64 chunk_start, u64 physical, u64 devid,
5192 u64 **logical, int *naddrs, int *stripe_len)
5193{
5194 struct extent_map_tree *em_tree = &map_tree->map_tree;
5195 struct extent_map *em;
5196 struct map_lookup *map;
5197 u64 *buf;
5198 u64 bytenr;
5199 u64 length;
5200 u64 stripe_nr;
5201 u64 rmap_len;
5202 int i, j, nr = 0;
5203
5204 read_lock(&em_tree->lock);
5205 em = lookup_extent_mapping(em_tree, chunk_start, 1);
5206 read_unlock(&em_tree->lock);
5207
5208 if (!em) {
5209 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5210 chunk_start);
5211 return -EIO;
5212 }
5213
5214 if (em->start != chunk_start) {
5215 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5216 em->start, chunk_start);
5217 free_extent_map(em);
5218 return -EIO;
5219 }
5220 map = (struct map_lookup *)em->bdev;
5221
5222 length = em->len;
5223 rmap_len = map->stripe_len;
5224
5225 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5226 do_div(length, map->num_stripes / map->sub_stripes);
5227 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5228 do_div(length, map->num_stripes);
5229 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5230 BTRFS_BLOCK_GROUP_RAID6)) {
5231 do_div(length, nr_data_stripes(map));
5232 rmap_len = map->stripe_len * nr_data_stripes(map);
5233 }
5234
5235 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5236 BUG_ON(!buf); /* -ENOMEM */
5237
5238 for (i = 0; i < map->num_stripes; i++) {
5239 if (devid && map->stripes[i].dev->devid != devid)
5240 continue;
5241 if (map->stripes[i].physical > physical ||
5242 map->stripes[i].physical + length <= physical)
5243 continue;
5244
5245 stripe_nr = physical - map->stripes[i].physical;
5246 do_div(stripe_nr, map->stripe_len);
5247
5248 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5249 stripe_nr = stripe_nr * map->num_stripes + i;
5250 do_div(stripe_nr, map->sub_stripes);
5251 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5252 stripe_nr = stripe_nr * map->num_stripes + i;
5253 } /* else if RAID[56], multiply by nr_data_stripes().
5254 * Alternatively, just use rmap_len below instead of
5255 * map->stripe_len */
5256
5257 bytenr = chunk_start + stripe_nr * rmap_len;
5258 WARN_ON(nr >= map->num_stripes);
5259 for (j = 0; j < nr; j++) {
5260 if (buf[j] == bytenr)
5261 break;
5262 }
5263 if (j == nr) {
5264 WARN_ON(nr >= map->num_stripes);
5265 buf[nr++] = bytenr;
5266 }
5267 }
5268
5269 *logical = buf;
5270 *naddrs = nr;
5271 *stripe_len = rmap_len;
5272
5273 free_extent_map(em);
5274 return 0;
5275}
5276
5277static void btrfs_end_bio(struct bio *bio, int err)
5278{
5279 struct btrfs_bio *bbio = bio->bi_private;
5280 struct btrfs_device *dev = bbio->stripes[0].dev;
5281 int is_orig_bio = 0;
5282
5283 if (err) {
5284 atomic_inc(&bbio->error);
5285 if (err == -EIO || err == -EREMOTEIO) {
5286 unsigned int stripe_index =
5287 btrfs_io_bio(bio)->stripe_index;
5288
5289 BUG_ON(stripe_index >= bbio->num_stripes);
5290 dev = bbio->stripes[stripe_index].dev;
5291 if (dev->bdev) {
5292 if (bio->bi_rw & WRITE)
5293 btrfs_dev_stat_inc(dev,
5294 BTRFS_DEV_STAT_WRITE_ERRS);
5295 else
5296 btrfs_dev_stat_inc(dev,
5297 BTRFS_DEV_STAT_READ_ERRS);
5298 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5299 btrfs_dev_stat_inc(dev,
5300 BTRFS_DEV_STAT_FLUSH_ERRS);
5301 btrfs_dev_stat_print_on_error(dev);
5302 }
5303 }
5304 }
5305
5306 if (bio == bbio->orig_bio)
5307 is_orig_bio = 1;
5308
5309 btrfs_bio_counter_dec(bbio->fs_info);
5310
5311 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5312 if (!is_orig_bio) {
5313 bio_put(bio);
5314 bio = bbio->orig_bio;
5315 }
5316
5317 /*
5318 * We have original bio now. So increment bi_remaining to
5319 * account for it in endio
5320 */
5321 atomic_inc(&bio->bi_remaining);
5322
5323 bio->bi_private = bbio->private;
5324 bio->bi_end_io = bbio->end_io;
5325 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5326 /* only send an error to the higher layers if it is
5327 * beyond the tolerance of the btrfs bio
5328 */
5329 if (atomic_read(&bbio->error) > bbio->max_errors) {
5330 err = -EIO;
5331 } else {
5332 /*
5333 * this bio is actually up to date, we didn't
5334 * go over the max number of errors
5335 */
5336 set_bit(BIO_UPTODATE, &bio->bi_flags);
5337 err = 0;
5338 }
5339 kfree(bbio);
5340
5341 bio_endio(bio, err);
5342 } else if (!is_orig_bio) {
5343 bio_put(bio);
5344 }
5345}
5346
5347/*
5348 * see run_scheduled_bios for a description of why bios are collected for
5349 * async submit.
5350 *
5351 * This will add one bio to the pending list for a device and make sure
5352 * the work struct is scheduled.
5353 */
5354static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5355 struct btrfs_device *device,
5356 int rw, struct bio *bio)
5357{
5358 int should_queue = 1;
5359 struct btrfs_pending_bios *pending_bios;
5360
5361 if (device->missing || !device->bdev) {
5362 bio_endio(bio, -EIO);
5363 return;
5364 }
5365
5366 /* don't bother with additional async steps for reads, right now */
5367 if (!(rw & REQ_WRITE)) {
5368 bio_get(bio);
5369 btrfsic_submit_bio(rw, bio);
5370 bio_put(bio);
5371 return;
5372 }
5373
5374 /*
5375 * nr_async_bios allows us to reliably return congestion to the
5376 * higher layers. Otherwise, the async bio makes it appear we have
5377 * made progress against dirty pages when we've really just put it
5378 * on a queue for later
5379 */
5380 atomic_inc(&root->fs_info->nr_async_bios);
5381 WARN_ON(bio->bi_next);
5382 bio->bi_next = NULL;
5383 bio->bi_rw |= rw;
5384
5385 spin_lock(&device->io_lock);
5386 if (bio->bi_rw & REQ_SYNC)
5387 pending_bios = &device->pending_sync_bios;
5388 else
5389 pending_bios = &device->pending_bios;
5390
5391 if (pending_bios->tail)
5392 pending_bios->tail->bi_next = bio;
5393
5394 pending_bios->tail = bio;
5395 if (!pending_bios->head)
5396 pending_bios->head = bio;
5397 if (device->running_pending)
5398 should_queue = 0;
5399
5400 spin_unlock(&device->io_lock);
5401
5402 if (should_queue)
5403 btrfs_queue_work(root->fs_info->submit_workers,
5404 &device->work);
5405}
5406
5407static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5408 sector_t sector)
5409{
5410 struct bio_vec *prev;
5411 struct request_queue *q = bdev_get_queue(bdev);
5412 unsigned int max_sectors = queue_max_sectors(q);
5413 struct bvec_merge_data bvm = {
5414 .bi_bdev = bdev,
5415 .bi_sector = sector,
5416 .bi_rw = bio->bi_rw,
5417 };
5418
5419 if (WARN_ON(bio->bi_vcnt == 0))
5420 return 1;
5421
5422 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5423 if (bio_sectors(bio) > max_sectors)
5424 return 0;
5425
5426 if (!q->merge_bvec_fn)
5427 return 1;
5428
5429 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5430 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5431 return 0;
5432 return 1;
5433}
5434
5435static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5436 struct bio *bio, u64 physical, int dev_nr,
5437 int rw, int async)
5438{
5439 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5440
5441 bio->bi_private = bbio;
5442 btrfs_io_bio(bio)->stripe_index = dev_nr;
5443 bio->bi_end_io = btrfs_end_bio;
5444 bio->bi_iter.bi_sector = physical >> 9;
5445#ifdef DEBUG
5446 {
5447 struct rcu_string *name;
5448
5449 rcu_read_lock();
5450 name = rcu_dereference(dev->name);
5451 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5452 "(%s id %llu), size=%u\n", rw,
5453 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5454 name->str, dev->devid, bio->bi_size);
5455 rcu_read_unlock();
5456 }
5457#endif
5458 bio->bi_bdev = dev->bdev;
5459
5460 btrfs_bio_counter_inc_noblocked(root->fs_info);
5461
5462 if (async)
5463 btrfs_schedule_bio(root, dev, rw, bio);
5464 else
5465 btrfsic_submit_bio(rw, bio);
5466}
5467
5468static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5469 struct bio *first_bio, struct btrfs_device *dev,
5470 int dev_nr, int rw, int async)
5471{
5472 struct bio_vec *bvec = first_bio->bi_io_vec;
5473 struct bio *bio;
5474 int nr_vecs = bio_get_nr_vecs(dev->bdev);
5475 u64 physical = bbio->stripes[dev_nr].physical;
5476
5477again:
5478 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5479 if (!bio)
5480 return -ENOMEM;
5481
5482 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5483 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5484 bvec->bv_offset) < bvec->bv_len) {
5485 u64 len = bio->bi_iter.bi_size;
5486
5487 atomic_inc(&bbio->stripes_pending);
5488 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5489 rw, async);
5490 physical += len;
5491 goto again;
5492 }
5493 bvec++;
5494 }
5495
5496 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5497 return 0;
5498}
5499
5500static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5501{
5502 atomic_inc(&bbio->error);
5503 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5504 bio->bi_private = bbio->private;
5505 bio->bi_end_io = bbio->end_io;
5506 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5507 bio->bi_iter.bi_sector = logical >> 9;
5508 kfree(bbio);
5509 bio_endio(bio, -EIO);
5510 }
5511}
5512
5513int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5514 int mirror_num, int async_submit)
5515{
5516 struct btrfs_device *dev;
5517 struct bio *first_bio = bio;
5518 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5519 u64 length = 0;
5520 u64 map_length;
5521 u64 *raid_map = NULL;
5522 int ret;
5523 int dev_nr = 0;
5524 int total_devs = 1;
5525 struct btrfs_bio *bbio = NULL;
5526
5527 length = bio->bi_iter.bi_size;
5528 map_length = length;
5529
5530 btrfs_bio_counter_inc_blocked(root->fs_info);
5531 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5532 mirror_num, &raid_map);
5533 if (ret) {
5534 btrfs_bio_counter_dec(root->fs_info);
5535 return ret;
5536 }
5537
5538 total_devs = bbio->num_stripes;
5539 bbio->orig_bio = first_bio;
5540 bbio->private = first_bio->bi_private;
5541 bbio->end_io = first_bio->bi_end_io;
5542 bbio->fs_info = root->fs_info;
5543 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5544
5545 if (raid_map) {
5546 /* In this case, map_length has been set to the length of
5547 a single stripe; not the whole write */
5548 if (rw & WRITE) {
5549 ret = raid56_parity_write(root, bio, bbio,
5550 raid_map, map_length);
5551 } else {
5552 ret = raid56_parity_recover(root, bio, bbio,
5553 raid_map, map_length,
5554 mirror_num);
5555 }
5556 /*
5557 * FIXME, replace dosen't support raid56 yet, please fix
5558 * it in the future.
5559 */
5560 btrfs_bio_counter_dec(root->fs_info);
5561 return ret;
5562 }
5563
5564 if (map_length < length) {
5565 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5566 logical, length, map_length);
5567 BUG();
5568 }
5569
5570 while (dev_nr < total_devs) {
5571 dev = bbio->stripes[dev_nr].dev;
5572 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5573 bbio_error(bbio, first_bio, logical);
5574 dev_nr++;
5575 continue;
5576 }
5577
5578 /*
5579 * Check and see if we're ok with this bio based on it's size
5580 * and offset with the given device.
5581 */
5582 if (!bio_size_ok(dev->bdev, first_bio,
5583 bbio->stripes[dev_nr].physical >> 9)) {
5584 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5585 dev_nr, rw, async_submit);
5586 BUG_ON(ret);
5587 dev_nr++;
5588 continue;
5589 }
5590
5591 if (dev_nr < total_devs - 1) {
5592 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5593 BUG_ON(!bio); /* -ENOMEM */
5594 } else {
5595 bio = first_bio;
5596 }
5597
5598 submit_stripe_bio(root, bbio, bio,
5599 bbio->stripes[dev_nr].physical, dev_nr, rw,
5600 async_submit);
5601 dev_nr++;
5602 }
5603 btrfs_bio_counter_dec(root->fs_info);
5604 return 0;
5605}
5606
5607struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5608 u8 *uuid, u8 *fsid)
5609{
5610 struct btrfs_device *device;
5611 struct btrfs_fs_devices *cur_devices;
5612
5613 cur_devices = fs_info->fs_devices;
5614 while (cur_devices) {
5615 if (!fsid ||
5616 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5617 device = __find_device(&cur_devices->devices,
5618 devid, uuid);
5619 if (device)
5620 return device;
5621 }
5622 cur_devices = cur_devices->seed;
5623 }
5624 return NULL;
5625}
5626
5627static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5628 u64 devid, u8 *dev_uuid)
5629{
5630 struct btrfs_device *device;
5631 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5632
5633 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5634 if (IS_ERR(device))
5635 return NULL;
5636
5637 list_add(&device->dev_list, &fs_devices->devices);
5638 device->fs_devices = fs_devices;
5639 fs_devices->num_devices++;
5640
5641 device->missing = 1;
5642 fs_devices->missing_devices++;
5643
5644 return device;
5645}
5646
5647/**
5648 * btrfs_alloc_device - allocate struct btrfs_device
5649 * @fs_info: used only for generating a new devid, can be NULL if
5650 * devid is provided (i.e. @devid != NULL).
5651 * @devid: a pointer to devid for this device. If NULL a new devid
5652 * is generated.
5653 * @uuid: a pointer to UUID for this device. If NULL a new UUID
5654 * is generated.
5655 *
5656 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5657 * on error. Returned struct is not linked onto any lists and can be
5658 * destroyed with kfree() right away.
5659 */
5660struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5661 const u64 *devid,
5662 const u8 *uuid)
5663{
5664 struct btrfs_device *dev;
5665 u64 tmp;
5666
5667 if (WARN_ON(!devid && !fs_info))
5668 return ERR_PTR(-EINVAL);
5669
5670 dev = __alloc_device();
5671 if (IS_ERR(dev))
5672 return dev;
5673
5674 if (devid)
5675 tmp = *devid;
5676 else {
5677 int ret;
5678
5679 ret = find_next_devid(fs_info, &tmp);
5680 if (ret) {
5681 kfree(dev);
5682 return ERR_PTR(ret);
5683 }
5684 }
5685 dev->devid = tmp;
5686
5687 if (uuid)
5688 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5689 else
5690 generate_random_uuid(dev->uuid);
5691
5692 btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
5693
5694 return dev;
5695}
5696
5697static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5698 struct extent_buffer *leaf,
5699 struct btrfs_chunk *chunk)
5700{
5701 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5702 struct map_lookup *map;
5703 struct extent_map *em;
5704 u64 logical;
5705 u64 length;
5706 u64 devid;
5707 u8 uuid[BTRFS_UUID_SIZE];
5708 int num_stripes;
5709 int ret;
5710 int i;
5711
5712 logical = key->offset;
5713 length = btrfs_chunk_length(leaf, chunk);
5714
5715 read_lock(&map_tree->map_tree.lock);
5716 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5717 read_unlock(&map_tree->map_tree.lock);
5718
5719 /* already mapped? */
5720 if (em && em->start <= logical && em->start + em->len > logical) {
5721 free_extent_map(em);
5722 return 0;
5723 } else if (em) {
5724 free_extent_map(em);
5725 }
5726
5727 em = alloc_extent_map();
5728 if (!em)
5729 return -ENOMEM;
5730 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5731 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5732 if (!map) {
5733 free_extent_map(em);
5734 return -ENOMEM;
5735 }
5736
5737 em->bdev = (struct block_device *)map;
5738 em->start = logical;
5739 em->len = length;
5740 em->orig_start = 0;
5741 em->block_start = 0;
5742 em->block_len = em->len;
5743
5744 map->num_stripes = num_stripes;
5745 map->io_width = btrfs_chunk_io_width(leaf, chunk);
5746 map->io_align = btrfs_chunk_io_align(leaf, chunk);
5747 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5748 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5749 map->type = btrfs_chunk_type(leaf, chunk);
5750 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5751 for (i = 0; i < num_stripes; i++) {
5752 map->stripes[i].physical =
5753 btrfs_stripe_offset_nr(leaf, chunk, i);
5754 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5755 read_extent_buffer(leaf, uuid, (unsigned long)
5756 btrfs_stripe_dev_uuid_nr(chunk, i),
5757 BTRFS_UUID_SIZE);
5758 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5759 uuid, NULL);
5760 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5761 kfree(map);
5762 free_extent_map(em);
5763 return -EIO;
5764 }
5765 if (!map->stripes[i].dev) {
5766 map->stripes[i].dev =
5767 add_missing_dev(root, devid, uuid);
5768 if (!map->stripes[i].dev) {
5769 kfree(map);
5770 free_extent_map(em);
5771 return -EIO;
5772 }
5773 }
5774 map->stripes[i].dev->in_fs_metadata = 1;
5775 }
5776
5777 write_lock(&map_tree->map_tree.lock);
5778 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5779 write_unlock(&map_tree->map_tree.lock);
5780 BUG_ON(ret); /* Tree corruption */
5781 free_extent_map(em);
5782
5783 return 0;
5784}
5785
5786static void fill_device_from_item(struct extent_buffer *leaf,
5787 struct btrfs_dev_item *dev_item,
5788 struct btrfs_device *device)
5789{
5790 unsigned long ptr;
5791
5792 device->devid = btrfs_device_id(leaf, dev_item);
5793 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5794 device->total_bytes = device->disk_total_bytes;
5795 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5796 device->type = btrfs_device_type(leaf, dev_item);
5797 device->io_align = btrfs_device_io_align(leaf, dev_item);
5798 device->io_width = btrfs_device_io_width(leaf, dev_item);
5799 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5800 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5801 device->is_tgtdev_for_dev_replace = 0;
5802
5803 ptr = btrfs_device_uuid(dev_item);
5804 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5805}
5806
5807static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5808{
5809 struct btrfs_fs_devices *fs_devices;
5810 int ret;
5811
5812 BUG_ON(!mutex_is_locked(&uuid_mutex));
5813
5814 fs_devices = root->fs_info->fs_devices->seed;
5815 while (fs_devices) {
5816 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5817 ret = 0;
5818 goto out;
5819 }
5820 fs_devices = fs_devices->seed;
5821 }
5822
5823 fs_devices = find_fsid(fsid);
5824 if (!fs_devices) {
5825 ret = -ENOENT;
5826 goto out;
5827 }
5828
5829 fs_devices = clone_fs_devices(fs_devices);
5830 if (IS_ERR(fs_devices)) {
5831 ret = PTR_ERR(fs_devices);
5832 goto out;
5833 }
5834
5835 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5836 root->fs_info->bdev_holder);
5837 if (ret) {
5838 free_fs_devices(fs_devices);
5839 goto out;
5840 }
5841
5842 if (!fs_devices->seeding) {
5843 __btrfs_close_devices(fs_devices);
5844 free_fs_devices(fs_devices);
5845 ret = -EINVAL;
5846 goto out;
5847 }
5848
5849 fs_devices->seed = root->fs_info->fs_devices->seed;
5850 root->fs_info->fs_devices->seed = fs_devices;
5851out:
5852 return ret;
5853}
5854
5855static int read_one_dev(struct btrfs_root *root,
5856 struct extent_buffer *leaf,
5857 struct btrfs_dev_item *dev_item)
5858{
5859 struct btrfs_device *device;
5860 u64 devid;
5861 int ret;
5862 u8 fs_uuid[BTRFS_UUID_SIZE];
5863 u8 dev_uuid[BTRFS_UUID_SIZE];
5864
5865 devid = btrfs_device_id(leaf, dev_item);
5866 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
5867 BTRFS_UUID_SIZE);
5868 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
5869 BTRFS_UUID_SIZE);
5870
5871 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5872 ret = open_seed_devices(root, fs_uuid);
5873 if (ret && !btrfs_test_opt(root, DEGRADED))
5874 return ret;
5875 }
5876
5877 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5878 if (!device || !device->bdev) {
5879 if (!btrfs_test_opt(root, DEGRADED))
5880 return -EIO;
5881
5882 if (!device) {
5883 btrfs_warn(root->fs_info, "devid %llu missing", devid);
5884 device = add_missing_dev(root, devid, dev_uuid);
5885 if (!device)
5886 return -ENOMEM;
5887 } else if (!device->missing) {
5888 /*
5889 * this happens when a device that was properly setup
5890 * in the device info lists suddenly goes bad.
5891 * device->bdev is NULL, and so we have to set
5892 * device->missing to one here
5893 */
5894 root->fs_info->fs_devices->missing_devices++;
5895 device->missing = 1;
5896 }
5897 }
5898
5899 if (device->fs_devices != root->fs_info->fs_devices) {
5900 BUG_ON(device->writeable);
5901 if (device->generation !=
5902 btrfs_device_generation(leaf, dev_item))
5903 return -EINVAL;
5904 }
5905
5906 fill_device_from_item(leaf, dev_item, device);
5907 device->in_fs_metadata = 1;
5908 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5909 device->fs_devices->total_rw_bytes += device->total_bytes;
5910 spin_lock(&root->fs_info->free_chunk_lock);
5911 root->fs_info->free_chunk_space += device->total_bytes -
5912 device->bytes_used;
5913 spin_unlock(&root->fs_info->free_chunk_lock);
5914 }
5915 ret = 0;
5916 return ret;
5917}
5918
5919int btrfs_read_sys_array(struct btrfs_root *root)
5920{
5921 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5922 struct extent_buffer *sb;
5923 struct btrfs_disk_key *disk_key;
5924 struct btrfs_chunk *chunk;
5925 u8 *ptr;
5926 unsigned long sb_ptr;
5927 int ret = 0;
5928 u32 num_stripes;
5929 u32 array_size;
5930 u32 len = 0;
5931 u32 cur;
5932 struct btrfs_key key;
5933
5934 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5935 BTRFS_SUPER_INFO_SIZE);
5936 if (!sb)
5937 return -ENOMEM;
5938 btrfs_set_buffer_uptodate(sb);
5939 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5940 /*
5941 * The sb extent buffer is artifical and just used to read the system array.
5942 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5943 * pages up-to-date when the page is larger: extent does not cover the
5944 * whole page and consequently check_page_uptodate does not find all
5945 * the page's extents up-to-date (the hole beyond sb),
5946 * write_extent_buffer then triggers a WARN_ON.
5947 *
5948 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5949 * but sb spans only this function. Add an explicit SetPageUptodate call
5950 * to silence the warning eg. on PowerPC 64.
5951 */
5952 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5953 SetPageUptodate(sb->pages[0]);
5954
5955 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5956 array_size = btrfs_super_sys_array_size(super_copy);
5957
5958 ptr = super_copy->sys_chunk_array;
5959 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5960 cur = 0;
5961
5962 while (cur < array_size) {
5963 disk_key = (struct btrfs_disk_key *)ptr;
5964 btrfs_disk_key_to_cpu(&key, disk_key);
5965
5966 len = sizeof(*disk_key); ptr += len;
5967 sb_ptr += len;
5968 cur += len;
5969
5970 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5971 chunk = (struct btrfs_chunk *)sb_ptr;
5972 ret = read_one_chunk(root, &key, sb, chunk);
5973 if (ret)
5974 break;
5975 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5976 len = btrfs_chunk_item_size(num_stripes);
5977 } else {
5978 ret = -EIO;
5979 break;
5980 }
5981 ptr += len;
5982 sb_ptr += len;
5983 cur += len;
5984 }
5985 free_extent_buffer(sb);
5986 return ret;
5987}
5988
5989int btrfs_read_chunk_tree(struct btrfs_root *root)
5990{
5991 struct btrfs_path *path;
5992 struct extent_buffer *leaf;
5993 struct btrfs_key key;
5994 struct btrfs_key found_key;
5995 int ret;
5996 int slot;
5997
5998 root = root->fs_info->chunk_root;
5999
6000 path = btrfs_alloc_path();
6001 if (!path)
6002 return -ENOMEM;
6003
6004 mutex_lock(&uuid_mutex);
6005 lock_chunks(root);
6006
6007 /*
6008 * Read all device items, and then all the chunk items. All
6009 * device items are found before any chunk item (their object id
6010 * is smaller than the lowest possible object id for a chunk
6011 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6012 */
6013 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6014 key.offset = 0;
6015 key.type = 0;
6016 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6017 if (ret < 0)
6018 goto error;
6019 while (1) {
6020 leaf = path->nodes[0];
6021 slot = path->slots[0];
6022 if (slot >= btrfs_header_nritems(leaf)) {
6023 ret = btrfs_next_leaf(root, path);
6024 if (ret == 0)
6025 continue;
6026 if (ret < 0)
6027 goto error;
6028 break;
6029 }
6030 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6031 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6032 struct btrfs_dev_item *dev_item;
6033 dev_item = btrfs_item_ptr(leaf, slot,
6034 struct btrfs_dev_item);
6035 ret = read_one_dev(root, leaf, dev_item);
6036 if (ret)
6037 goto error;
6038 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6039 struct btrfs_chunk *chunk;
6040 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6041 ret = read_one_chunk(root, &found_key, leaf, chunk);
6042 if (ret)
6043 goto error;
6044 }
6045 path->slots[0]++;
6046 }
6047 ret = 0;
6048error:
6049 unlock_chunks(root);
6050 mutex_unlock(&uuid_mutex);
6051
6052 btrfs_free_path(path);
6053 return ret;
6054}
6055
6056void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6057{
6058 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6059 struct btrfs_device *device;
6060
6061 mutex_lock(&fs_devices->device_list_mutex);
6062 list_for_each_entry(device, &fs_devices->devices, dev_list)
6063 device->dev_root = fs_info->dev_root;
6064 mutex_unlock(&fs_devices->device_list_mutex);
6065}
6066
6067static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6068{
6069 int i;
6070
6071 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6072 btrfs_dev_stat_reset(dev, i);
6073}
6074
6075int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6076{
6077 struct btrfs_key key;
6078 struct btrfs_key found_key;
6079 struct btrfs_root *dev_root = fs_info->dev_root;
6080 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6081 struct extent_buffer *eb;
6082 int slot;
6083 int ret = 0;
6084 struct btrfs_device *device;
6085 struct btrfs_path *path = NULL;
6086 int i;
6087
6088 path = btrfs_alloc_path();
6089 if (!path) {
6090 ret = -ENOMEM;
6091 goto out;
6092 }
6093
6094 mutex_lock(&fs_devices->device_list_mutex);
6095 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6096 int item_size;
6097 struct btrfs_dev_stats_item *ptr;
6098
6099 key.objectid = 0;
6100 key.type = BTRFS_DEV_STATS_KEY;
6101 key.offset = device->devid;
6102 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6103 if (ret) {
6104 __btrfs_reset_dev_stats(device);
6105 device->dev_stats_valid = 1;
6106 btrfs_release_path(path);
6107 continue;
6108 }
6109 slot = path->slots[0];
6110 eb = path->nodes[0];
6111 btrfs_item_key_to_cpu(eb, &found_key, slot);
6112 item_size = btrfs_item_size_nr(eb, slot);
6113
6114 ptr = btrfs_item_ptr(eb, slot,
6115 struct btrfs_dev_stats_item);
6116
6117 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6118 if (item_size >= (1 + i) * sizeof(__le64))
6119 btrfs_dev_stat_set(device, i,
6120 btrfs_dev_stats_value(eb, ptr, i));
6121 else
6122 btrfs_dev_stat_reset(device, i);
6123 }
6124
6125 device->dev_stats_valid = 1;
6126 btrfs_dev_stat_print_on_load(device);
6127 btrfs_release_path(path);
6128 }
6129 mutex_unlock(&fs_devices->device_list_mutex);
6130
6131out:
6132 btrfs_free_path(path);
6133 return ret < 0 ? ret : 0;
6134}
6135
6136static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6137 struct btrfs_root *dev_root,
6138 struct btrfs_device *device)
6139{
6140 struct btrfs_path *path;
6141 struct btrfs_key key;
6142 struct extent_buffer *eb;
6143 struct btrfs_dev_stats_item *ptr;
6144 int ret;
6145 int i;
6146
6147 key.objectid = 0;
6148 key.type = BTRFS_DEV_STATS_KEY;
6149 key.offset = device->devid;
6150
6151 path = btrfs_alloc_path();
6152 BUG_ON(!path);
6153 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6154 if (ret < 0) {
6155 printk_in_rcu(KERN_WARNING "BTRFS: "
6156 "error %d while searching for dev_stats item for device %s!\n",
6157 ret, rcu_str_deref(device->name));
6158 goto out;
6159 }
6160
6161 if (ret == 0 &&
6162 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6163 /* need to delete old one and insert a new one */
6164 ret = btrfs_del_item(trans, dev_root, path);
6165 if (ret != 0) {
6166 printk_in_rcu(KERN_WARNING "BTRFS: "
6167 "delete too small dev_stats item for device %s failed %d!\n",
6168 rcu_str_deref(device->name), ret);
6169 goto out;
6170 }
6171 ret = 1;
6172 }
6173
6174 if (ret == 1) {
6175 /* need to insert a new item */
6176 btrfs_release_path(path);
6177 ret = btrfs_insert_empty_item(trans, dev_root, path,
6178 &key, sizeof(*ptr));
6179 if (ret < 0) {
6180 printk_in_rcu(KERN_WARNING "BTRFS: "
6181 "insert dev_stats item for device %s failed %d!\n",
6182 rcu_str_deref(device->name), ret);
6183 goto out;
6184 }
6185 }
6186
6187 eb = path->nodes[0];
6188 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6189 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6190 btrfs_set_dev_stats_value(eb, ptr, i,
6191 btrfs_dev_stat_read(device, i));
6192 btrfs_mark_buffer_dirty(eb);
6193
6194out:
6195 btrfs_free_path(path);
6196 return ret;
6197}
6198
6199/*
6200 * called from commit_transaction. Writes all changed device stats to disk.
6201 */
6202int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6203 struct btrfs_fs_info *fs_info)
6204{
6205 struct btrfs_root *dev_root = fs_info->dev_root;
6206 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6207 struct btrfs_device *device;
6208 int ret = 0;
6209
6210 mutex_lock(&fs_devices->device_list_mutex);
6211 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6212 if (!device->dev_stats_valid || !device->dev_stats_dirty)
6213 continue;
6214
6215 ret = update_dev_stat_item(trans, dev_root, device);
6216 if (!ret)
6217 device->dev_stats_dirty = 0;
6218 }
6219 mutex_unlock(&fs_devices->device_list_mutex);
6220
6221 return ret;
6222}
6223
6224void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6225{
6226 btrfs_dev_stat_inc(dev, index);
6227 btrfs_dev_stat_print_on_error(dev);
6228}
6229
6230static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6231{
6232 if (!dev->dev_stats_valid)
6233 return;
6234 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
6235 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6236 rcu_str_deref(dev->name),
6237 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6238 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6239 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6240 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6241 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6242}
6243
6244static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6245{
6246 int i;
6247
6248 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6249 if (btrfs_dev_stat_read(dev, i) != 0)
6250 break;
6251 if (i == BTRFS_DEV_STAT_VALUES_MAX)
6252 return; /* all values == 0, suppress message */
6253
6254 printk_in_rcu(KERN_INFO "BTRFS: "
6255 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6256 rcu_str_deref(dev->name),
6257 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6258 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6259 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6260 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6261 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6262}
6263
6264int btrfs_get_dev_stats(struct btrfs_root *root,
6265 struct btrfs_ioctl_get_dev_stats *stats)
6266{
6267 struct btrfs_device *dev;
6268 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6269 int i;
6270
6271 mutex_lock(&fs_devices->device_list_mutex);
6272 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6273 mutex_unlock(&fs_devices->device_list_mutex);
6274
6275 if (!dev) {
6276 btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6277 return -ENODEV;
6278 } else if (!dev->dev_stats_valid) {
6279 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6280 return -ENODEV;
6281 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6282 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6283 if (stats->nr_items > i)
6284 stats->values[i] =
6285 btrfs_dev_stat_read_and_reset(dev, i);
6286 else
6287 btrfs_dev_stat_reset(dev, i);
6288 }
6289 } else {
6290 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6291 if (stats->nr_items > i)
6292 stats->values[i] = btrfs_dev_stat_read(dev, i);
6293 }
6294 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6295 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6296 return 0;
6297}
6298
6299int btrfs_scratch_superblock(struct btrfs_device *device)
6300{
6301 struct buffer_head *bh;
6302 struct btrfs_super_block *disk_super;
6303
6304 bh = btrfs_read_dev_super(device->bdev);
6305 if (!bh)
6306 return -EINVAL;
6307 disk_super = (struct btrfs_super_block *)bh->b_data;
6308
6309 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6310 set_buffer_dirty(bh);
6311 sync_dirty_buffer(bh);
6312 brelse(bh);
6313
6314 return 0;
6315}