Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/sched/mm.h>
 
   8#include <linux/slab.h>
 
   9#include <linux/ratelimit.h>
  10#include <linux/kthread.h>
 
  11#include <linux/semaphore.h>
  12#include <linux/uuid.h>
  13#include <linux/list_sort.h>
  14#include <linux/namei.h>
  15#include "misc.h"
  16#include "ctree.h"
  17#include "extent_map.h"
  18#include "disk-io.h"
  19#include "transaction.h"
  20#include "print-tree.h"
  21#include "volumes.h"
  22#include "raid56.h"
 
 
  23#include "rcu-string.h"
  24#include "dev-replace.h"
  25#include "sysfs.h"
  26#include "tree-checker.h"
  27#include "space-info.h"
  28#include "block-group.h"
  29#include "discard.h"
  30#include "zoned.h"
  31#include "fs.h"
  32#include "accessors.h"
  33#include "uuid-tree.h"
  34#include "ioctl.h"
  35#include "relocation.h"
  36#include "scrub.h"
  37#include "super.h"
  38
  39#define BTRFS_BLOCK_GROUP_STRIPE_MASK	(BTRFS_BLOCK_GROUP_RAID0 | \
  40					 BTRFS_BLOCK_GROUP_RAID10 | \
  41					 BTRFS_BLOCK_GROUP_RAID56_MASK)
  42
  43const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
  44	[BTRFS_RAID_RAID10] = {
  45		.sub_stripes	= 2,
  46		.dev_stripes	= 1,
  47		.devs_max	= 0,	/* 0 == as many as possible */
  48		.devs_min	= 2,
  49		.tolerated_failures = 1,
  50		.devs_increment	= 2,
  51		.ncopies	= 2,
  52		.nparity        = 0,
  53		.raid_name	= "raid10",
  54		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
  55		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
  56	},
  57	[BTRFS_RAID_RAID1] = {
  58		.sub_stripes	= 1,
  59		.dev_stripes	= 1,
  60		.devs_max	= 2,
  61		.devs_min	= 2,
  62		.tolerated_failures = 1,
  63		.devs_increment	= 2,
  64		.ncopies	= 2,
  65		.nparity        = 0,
  66		.raid_name	= "raid1",
  67		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
  68		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
  69	},
  70	[BTRFS_RAID_RAID1C3] = {
  71		.sub_stripes	= 1,
  72		.dev_stripes	= 1,
  73		.devs_max	= 3,
  74		.devs_min	= 3,
  75		.tolerated_failures = 2,
  76		.devs_increment	= 3,
  77		.ncopies	= 3,
  78		.nparity        = 0,
  79		.raid_name	= "raid1c3",
  80		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
  81		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
  82	},
  83	[BTRFS_RAID_RAID1C4] = {
  84		.sub_stripes	= 1,
  85		.dev_stripes	= 1,
  86		.devs_max	= 4,
  87		.devs_min	= 4,
  88		.tolerated_failures = 3,
  89		.devs_increment	= 4,
  90		.ncopies	= 4,
  91		.nparity        = 0,
  92		.raid_name	= "raid1c4",
  93		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
  94		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
  95	},
  96	[BTRFS_RAID_DUP] = {
  97		.sub_stripes	= 1,
  98		.dev_stripes	= 2,
  99		.devs_max	= 1,
 100		.devs_min	= 1,
 101		.tolerated_failures = 0,
 102		.devs_increment	= 1,
 103		.ncopies	= 2,
 104		.nparity        = 0,
 105		.raid_name	= "dup",
 106		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
 107		.mindev_error	= 0,
 108	},
 109	[BTRFS_RAID_RAID0] = {
 110		.sub_stripes	= 1,
 111		.dev_stripes	= 1,
 112		.devs_max	= 0,
 113		.devs_min	= 1,
 114		.tolerated_failures = 0,
 115		.devs_increment	= 1,
 116		.ncopies	= 1,
 117		.nparity        = 0,
 118		.raid_name	= "raid0",
 119		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
 120		.mindev_error	= 0,
 121	},
 122	[BTRFS_RAID_SINGLE] = {
 123		.sub_stripes	= 1,
 124		.dev_stripes	= 1,
 125		.devs_max	= 1,
 126		.devs_min	= 1,
 127		.tolerated_failures = 0,
 128		.devs_increment	= 1,
 129		.ncopies	= 1,
 130		.nparity        = 0,
 131		.raid_name	= "single",
 132		.bg_flag	= 0,
 133		.mindev_error	= 0,
 134	},
 135	[BTRFS_RAID_RAID5] = {
 136		.sub_stripes	= 1,
 137		.dev_stripes	= 1,
 138		.devs_max	= 0,
 139		.devs_min	= 2,
 140		.tolerated_failures = 1,
 141		.devs_increment	= 1,
 142		.ncopies	= 1,
 143		.nparity        = 1,
 144		.raid_name	= "raid5",
 145		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
 146		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
 147	},
 148	[BTRFS_RAID_RAID6] = {
 149		.sub_stripes	= 1,
 150		.dev_stripes	= 1,
 151		.devs_max	= 0,
 152		.devs_min	= 3,
 153		.tolerated_failures = 2,
 154		.devs_increment	= 1,
 155		.ncopies	= 1,
 156		.nparity        = 2,
 157		.raid_name	= "raid6",
 158		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
 159		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
 160	},
 161};
 162
 163/*
 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
 165 * can be used as index to access btrfs_raid_array[].
 166 */
 167enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
 168{
 169	const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
 170
 171	if (!profile)
 172		return BTRFS_RAID_SINGLE;
 173
 174	return BTRFS_BG_FLAG_TO_INDEX(profile);
 175}
 176
 177const char *btrfs_bg_type_to_raid_name(u64 flags)
 178{
 179	const int index = btrfs_bg_flags_to_raid_index(flags);
 180
 181	if (index >= BTRFS_NR_RAID_TYPES)
 182		return NULL;
 183
 184	return btrfs_raid_array[index].raid_name;
 185}
 186
 187int btrfs_nr_parity_stripes(u64 type)
 188{
 189	enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
 190
 191	return btrfs_raid_array[index].nparity;
 192}
 193
 194/*
 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 196 * bytes including terminating null byte.
 197 */
 198void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
 199{
 200	int i;
 201	int ret;
 202	char *bp = buf;
 203	u64 flags = bg_flags;
 204	u32 size_bp = size_buf;
 205
 206	if (!flags) {
 207		strcpy(bp, "NONE");
 208		return;
 209	}
 210
 211#define DESCRIBE_FLAG(flag, desc)						\
 212	do {								\
 213		if (flags & (flag)) {					\
 214			ret = snprintf(bp, size_bp, "%s|", (desc));	\
 215			if (ret < 0 || ret >= size_bp)			\
 216				goto out_overflow;			\
 217			size_bp -= ret;					\
 218			bp += ret;					\
 219			flags &= ~(flag);				\
 220		}							\
 221	} while (0)
 222
 223	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
 224	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
 225	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
 226
 227	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
 228	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
 229		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
 230			      btrfs_raid_array[i].raid_name);
 231#undef DESCRIBE_FLAG
 232
 233	if (flags) {
 234		ret = snprintf(bp, size_bp, "0x%llx|", flags);
 235		size_bp -= ret;
 236	}
 237
 238	if (size_bp < size_buf)
 239		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
 240
 241	/*
 242	 * The text is trimmed, it's up to the caller to provide sufficiently
 243	 * large buffer
 244	 */
 245out_overflow:;
 246}
 247
 248static int init_first_rw_device(struct btrfs_trans_handle *trans);
 249static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
 
 250static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
 
 
 
 
 251
 252/*
 253 * Device locking
 254 * ==============
 255 *
 256 * There are several mutexes that protect manipulation of devices and low-level
 257 * structures like chunks but not block groups, extents or files
 258 *
 259 * uuid_mutex (global lock)
 260 * ------------------------
 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 263 * device) or requested by the device= mount option
 264 *
 265 * the mutex can be very coarse and can cover long-running operations
 266 *
 267 * protects: updates to fs_devices counters like missing devices, rw devices,
 268 * seeding, structure cloning, opening/closing devices at mount/umount time
 269 *
 270 * global::fs_devs - add, remove, updates to the global list
 271 *
 272 * does not protect: manipulation of the fs_devices::devices list in general
 273 * but in mount context it could be used to exclude list modifications by eg.
 274 * scan ioctl
 275 *
 276 * btrfs_device::name - renames (write side), read is RCU
 277 *
 278 * fs_devices::device_list_mutex (per-fs, with RCU)
 279 * ------------------------------------------------
 280 * protects updates to fs_devices::devices, ie. adding and deleting
 281 *
 282 * simple list traversal with read-only actions can be done with RCU protection
 283 *
 284 * may be used to exclude some operations from running concurrently without any
 285 * modifications to the list (see write_all_supers)
 286 *
 287 * Is not required at mount and close times, because our device list is
 288 * protected by the uuid_mutex at that point.
 289 *
 290 * balance_mutex
 291 * -------------
 292 * protects balance structures (status, state) and context accessed from
 293 * several places (internally, ioctl)
 294 *
 295 * chunk_mutex
 296 * -----------
 297 * protects chunks, adding or removing during allocation, trim or when a new
 298 * device is added/removed. Additionally it also protects post_commit_list of
 299 * individual devices, since they can be added to the transaction's
 300 * post_commit_list only with chunk_mutex held.
 301 *
 302 * cleaner_mutex
 303 * -------------
 304 * a big lock that is held by the cleaner thread and prevents running subvolume
 305 * cleaning together with relocation or delayed iputs
 306 *
 307 *
 308 * Lock nesting
 309 * ============
 310 *
 311 * uuid_mutex
 312 *   device_list_mutex
 313 *     chunk_mutex
 314 *   balance_mutex
 315 *
 316 *
 317 * Exclusive operations
 318 * ====================
 319 *
 320 * Maintains the exclusivity of the following operations that apply to the
 321 * whole filesystem and cannot run in parallel.
 322 *
 323 * - Balance (*)
 324 * - Device add
 325 * - Device remove
 326 * - Device replace (*)
 327 * - Resize
 328 *
 329 * The device operations (as above) can be in one of the following states:
 330 *
 331 * - Running state
 332 * - Paused state
 333 * - Completed state
 334 *
 335 * Only device operations marked with (*) can go into the Paused state for the
 336 * following reasons:
 337 *
 338 * - ioctl (only Balance can be Paused through ioctl)
 339 * - filesystem remounted as read-only
 340 * - filesystem unmounted and mounted as read-only
 341 * - system power-cycle and filesystem mounted as read-only
 342 * - filesystem or device errors leading to forced read-only
 343 *
 344 * The status of exclusive operation is set and cleared atomically.
 345 * During the course of Paused state, fs_info::exclusive_operation remains set.
 346 * A device operation in Paused or Running state can be canceled or resumed
 347 * either by ioctl (Balance only) or when remounted as read-write.
 348 * The exclusive status is cleared when the device operation is canceled or
 349 * completed.
 350 */
 351
 352DEFINE_MUTEX(uuid_mutex);
 353static LIST_HEAD(fs_uuids);
 354struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
 355{
 356	return &fs_uuids;
 357}
 358
 359/*
 360 * alloc_fs_devices - allocate struct btrfs_fs_devices
 361 * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
 362 * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
 363 *
 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 365 * The returned struct is not linked onto any lists and can be destroyed with
 366 * kfree() right away.
 367 */
 368static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
 369						 const u8 *metadata_fsid)
 370{
 371	struct btrfs_fs_devices *fs_devs;
 372
 373	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
 374	if (!fs_devs)
 375		return ERR_PTR(-ENOMEM);
 376
 377	mutex_init(&fs_devs->device_list_mutex);
 378
 379	INIT_LIST_HEAD(&fs_devs->devices);
 380	INIT_LIST_HEAD(&fs_devs->alloc_list);
 381	INIT_LIST_HEAD(&fs_devs->fs_list);
 382	INIT_LIST_HEAD(&fs_devs->seed_list);
 383	if (fsid)
 384		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 385
 386	if (metadata_fsid)
 387		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
 388	else if (fsid)
 389		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
 390
 391	return fs_devs;
 392}
 393
 394void btrfs_free_device(struct btrfs_device *device)
 395{
 396	WARN_ON(!list_empty(&device->post_commit_list));
 397	rcu_string_free(device->name);
 398	extent_io_tree_release(&device->alloc_state);
 399	btrfs_destroy_dev_zone_info(device);
 400	kfree(device);
 401}
 402
 403static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
 404{
 405	struct btrfs_device *device;
 406
 407	WARN_ON(fs_devices->opened);
 408	while (!list_empty(&fs_devices->devices)) {
 409		device = list_entry(fs_devices->devices.next,
 410				    struct btrfs_device, dev_list);
 411		list_del(&device->dev_list);
 412		btrfs_free_device(device);
 413	}
 414	kfree(fs_devices);
 415}
 416
 417void __exit btrfs_cleanup_fs_uuids(void)
 418{
 419	struct btrfs_fs_devices *fs_devices;
 420
 421	while (!list_empty(&fs_uuids)) {
 422		fs_devices = list_entry(fs_uuids.next,
 423					struct btrfs_fs_devices, fs_list);
 424		list_del(&fs_devices->fs_list);
 425		free_fs_devices(fs_devices);
 426	}
 427}
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429static noinline struct btrfs_fs_devices *find_fsid(
 430		const u8 *fsid, const u8 *metadata_fsid)
 431{
 432	struct btrfs_fs_devices *fs_devices;
 433
 434	ASSERT(fsid);
 435
 436	/* Handle non-split brain cases */
 437	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 438		if (metadata_fsid) {
 439			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
 440			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
 441				      BTRFS_FSID_SIZE) == 0)
 442				return fs_devices;
 443		} else {
 444			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 445				return fs_devices;
 446		}
 447	}
 448	return NULL;
 449}
 450
 451static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
 452				struct btrfs_super_block *disk_super)
 453{
 454
 455	struct btrfs_fs_devices *fs_devices;
 456
 457	/*
 458	 * Handle scanned device having completed its fsid change but
 459	 * belonging to a fs_devices that was created by first scanning
 460	 * a device which didn't have its fsid/metadata_uuid changed
 461	 * at all and the CHANGING_FSID_V2 flag set.
 462	 */
 463	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 464		if (fs_devices->fsid_change &&
 465		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
 466			   BTRFS_FSID_SIZE) == 0 &&
 467		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 468			   BTRFS_FSID_SIZE) == 0) {
 469			return fs_devices;
 470		}
 471	}
 472	/*
 473	 * Handle scanned device having completed its fsid change but
 474	 * belonging to a fs_devices that was created by a device that
 475	 * has an outdated pair of fsid/metadata_uuid and
 476	 * CHANGING_FSID_V2 flag set.
 477	 */
 478	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 479		if (fs_devices->fsid_change &&
 480		    memcmp(fs_devices->metadata_uuid,
 481			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
 482		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
 483			   BTRFS_FSID_SIZE) == 0) {
 484			return fs_devices;
 485		}
 486	}
 487
 488	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
 489}
 490
 491
 492static int
 493btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 494		      int flush, struct block_device **bdev,
 495		      struct btrfs_super_block **disk_super)
 496{
 497	int ret;
 498
 499	*bdev = blkdev_get_by_path(device_path, flags, holder);
 500
 501	if (IS_ERR(*bdev)) {
 502		ret = PTR_ERR(*bdev);
 503		goto error;
 504	}
 505
 506	if (flush)
 507		sync_blockdev(*bdev);
 508	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
 509	if (ret) {
 510		blkdev_put(*bdev, flags);
 511		goto error;
 512	}
 513	invalidate_bdev(*bdev);
 514	*disk_super = btrfs_read_dev_super(*bdev);
 515	if (IS_ERR(*disk_super)) {
 516		ret = PTR_ERR(*disk_super);
 517		blkdev_put(*bdev, flags);
 518		goto error;
 519	}
 520
 521	return 0;
 522
 523error:
 524	*bdev = NULL;
 525	return ret;
 526}
 527
 
 
 
 
 
 
 
 
 
 
 
 528/*
 529 *  Search and remove all stale devices (which are not mounted).  When both
 530 *  inputs are NULL, it will search and release all stale devices.
 531 *
 532 *  @devt:         Optional. When provided will it release all unmounted devices
 533 *                 matching this devt only.
 534 *  @skip_device:  Optional. Will skip this device when searching for the stale
 535 *                 devices.
 536 *
 537 *  Return:	0 for success or if @devt is 0.
 538 *		-EBUSY if @devt is a mounted device.
 539 *		-ENOENT if @devt does not match any device in the list.
 540 */
 541static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
 
 542{
 543	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
 544	struct btrfs_device *device, *tmp_device;
 545	int ret = 0;
 546
 547	lockdep_assert_held(&uuid_mutex);
 548
 549	if (devt)
 550		ret = -ENOENT;
 551
 552	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
 553
 554		mutex_lock(&fs_devices->device_list_mutex);
 555		list_for_each_entry_safe(device, tmp_device,
 556					 &fs_devices->devices, dev_list) {
 557			if (skip_device && skip_device == device)
 558				continue;
 559			if (devt && devt != device->devt)
 
 
 560				continue;
 561			if (fs_devices->opened) {
 562				/* for an already deleted device return 0 */
 563				if (devt && ret != 0)
 564					ret = -EBUSY;
 565				break;
 566			}
 567
 568			/* delete the stale device */
 569			fs_devices->num_devices--;
 570			list_del(&device->dev_list);
 571			btrfs_free_device(device);
 572
 573			ret = 0;
 
 
 574		}
 575		mutex_unlock(&fs_devices->device_list_mutex);
 576
 577		if (fs_devices->num_devices == 0) {
 578			btrfs_sysfs_remove_fsid(fs_devices);
 579			list_del(&fs_devices->fs_list);
 580			free_fs_devices(fs_devices);
 581		}
 582	}
 583
 584	return ret;
 585}
 586
 587/*
 588 * This is only used on mount, and we are protected from competing things
 589 * messing with our fs_devices by the uuid_mutex, thus we do not need the
 590 * fs_devices->device_list_mutex here.
 591 */
 592static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
 593			struct btrfs_device *device, fmode_t flags,
 594			void *holder)
 595{
 
 596	struct block_device *bdev;
 597	struct btrfs_super_block *disk_super;
 598	u64 devid;
 599	int ret;
 600
 601	if (device->bdev)
 602		return -EINVAL;
 603	if (!device->name)
 604		return -EINVAL;
 605
 606	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 607				    &bdev, &disk_super);
 608	if (ret)
 609		return ret;
 610
 611	devid = btrfs_stack_device_id(&disk_super->dev_item);
 612	if (devid != device->devid)
 613		goto error_free_page;
 614
 615	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
 616		goto error_free_page;
 617
 618	device->generation = btrfs_super_generation(disk_super);
 619
 620	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 621		if (btrfs_super_incompat_flags(disk_super) &
 622		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
 623			pr_err(
 624		"BTRFS: Invalid seeding and uuid-changed device detected\n");
 625			goto error_free_page;
 626		}
 627
 628		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 629		fs_devices->seeding = true;
 630	} else {
 631		if (bdev_read_only(bdev))
 632			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 633		else
 634			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 635	}
 636
 637	if (!bdev_nonrot(bdev))
 
 638		fs_devices->rotating = true;
 639
 640	if (bdev_max_discard_sectors(bdev))
 641		fs_devices->discardable = true;
 642
 643	device->bdev = bdev;
 644	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
 645	device->mode = flags;
 646
 647	fs_devices->open_devices++;
 648	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
 649	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 650		fs_devices->rw_devices++;
 651		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
 652	}
 653	btrfs_release_disk_super(disk_super);
 654
 655	return 0;
 656
 657error_free_page:
 658	btrfs_release_disk_super(disk_super);
 659	blkdev_put(bdev, flags);
 660
 661	return -EINVAL;
 662}
 663
 664/*
 665 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
 666 * being created with a disk that has already completed its fsid change. Such
 667 * disk can belong to an fs which has its FSID changed or to one which doesn't.
 668 * Handle both cases here.
 669 */
 670static struct btrfs_fs_devices *find_fsid_inprogress(
 671					struct btrfs_super_block *disk_super)
 672{
 673	struct btrfs_fs_devices *fs_devices;
 674
 675	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 676		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 677			   BTRFS_FSID_SIZE) != 0 &&
 678		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 679			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
 680			return fs_devices;
 681		}
 682	}
 683
 684	return find_fsid(disk_super->fsid, NULL);
 685}
 686
 687
 688static struct btrfs_fs_devices *find_fsid_changed(
 689					struct btrfs_super_block *disk_super)
 690{
 691	struct btrfs_fs_devices *fs_devices;
 692
 693	/*
 694	 * Handles the case where scanned device is part of an fs that had
 695	 * multiple successful changes of FSID but currently device didn't
 696	 * observe it. Meaning our fsid will be different than theirs. We need
 697	 * to handle two subcases :
 698	 *  1 - The fs still continues to have different METADATA/FSID uuids.
 699	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
 700	 *  are equal).
 701	 */
 702	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 703		/* Changed UUIDs */
 704		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 705			   BTRFS_FSID_SIZE) != 0 &&
 706		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
 707			   BTRFS_FSID_SIZE) == 0 &&
 708		    memcmp(fs_devices->fsid, disk_super->fsid,
 709			   BTRFS_FSID_SIZE) != 0)
 710			return fs_devices;
 711
 712		/* Unchanged UUIDs */
 713		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 714			   BTRFS_FSID_SIZE) == 0 &&
 715		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
 716			   BTRFS_FSID_SIZE) == 0)
 717			return fs_devices;
 718	}
 719
 720	return NULL;
 721}
 722
 723static struct btrfs_fs_devices *find_fsid_reverted_metadata(
 724				struct btrfs_super_block *disk_super)
 725{
 726	struct btrfs_fs_devices *fs_devices;
 727
 728	/*
 729	 * Handle the case where the scanned device is part of an fs whose last
 730	 * metadata UUID change reverted it to the original FSID. At the same
 731	 * time * fs_devices was first created by another constitutent device
 732	 * which didn't fully observe the operation. This results in an
 733	 * btrfs_fs_devices created with metadata/fsid different AND
 734	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
 735	 * fs_devices equal to the FSID of the disk.
 736	 */
 737	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 738		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 739			   BTRFS_FSID_SIZE) != 0 &&
 740		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 741			   BTRFS_FSID_SIZE) == 0 &&
 742		    fs_devices->fsid_change)
 743			return fs_devices;
 744	}
 745
 746	return NULL;
 747}
 748/*
 749 * Add new device to list of registered devices
 750 *
 751 * Returns:
 752 * device pointer which was just added or updated when successful
 753 * error pointer when failed
 754 */
 755static noinline struct btrfs_device *device_list_add(const char *path,
 756			   struct btrfs_super_block *disk_super,
 757			   bool *new_device_added)
 758{
 759	struct btrfs_device *device;
 760	struct btrfs_fs_devices *fs_devices = NULL;
 761	struct rcu_string *name;
 762	u64 found_transid = btrfs_super_generation(disk_super);
 763	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
 764	dev_t path_devt;
 765	int error;
 766	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
 767		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
 768	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
 769					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
 770
 771	error = lookup_bdev(path, &path_devt);
 772	if (error) {
 773		btrfs_err(NULL, "failed to lookup block device for path %s: %d",
 774			  path, error);
 775		return ERR_PTR(error);
 776	}
 777
 778	if (fsid_change_in_progress) {
 779		if (!has_metadata_uuid)
 780			fs_devices = find_fsid_inprogress(disk_super);
 781		else
 782			fs_devices = find_fsid_changed(disk_super);
 783	} else if (has_metadata_uuid) {
 784		fs_devices = find_fsid_with_metadata_uuid(disk_super);
 785	} else {
 786		fs_devices = find_fsid_reverted_metadata(disk_super);
 787		if (!fs_devices)
 788			fs_devices = find_fsid(disk_super->fsid, NULL);
 789	}
 790
 791
 792	if (!fs_devices) {
 793		if (has_metadata_uuid)
 794			fs_devices = alloc_fs_devices(disk_super->fsid,
 795						      disk_super->metadata_uuid);
 796		else
 797			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
 798
 799		if (IS_ERR(fs_devices))
 800			return ERR_CAST(fs_devices);
 801
 802		fs_devices->fsid_change = fsid_change_in_progress;
 803
 804		mutex_lock(&fs_devices->device_list_mutex);
 805		list_add(&fs_devices->fs_list, &fs_uuids);
 806
 807		device = NULL;
 808	} else {
 809		struct btrfs_dev_lookup_args args = {
 810			.devid = devid,
 811			.uuid = disk_super->dev_item.uuid,
 812		};
 813
 814		mutex_lock(&fs_devices->device_list_mutex);
 815		device = btrfs_find_device(fs_devices, &args);
 
 816
 817		/*
 818		 * If this disk has been pulled into an fs devices created by
 819		 * a device which had the CHANGING_FSID_V2 flag then replace the
 820		 * metadata_uuid/fsid values of the fs_devices.
 821		 */
 822		if (fs_devices->fsid_change &&
 823		    found_transid > fs_devices->latest_generation) {
 824			memcpy(fs_devices->fsid, disk_super->fsid,
 825					BTRFS_FSID_SIZE);
 826
 827			if (has_metadata_uuid)
 828				memcpy(fs_devices->metadata_uuid,
 829				       disk_super->metadata_uuid,
 830				       BTRFS_FSID_SIZE);
 831			else
 832				memcpy(fs_devices->metadata_uuid,
 833				       disk_super->fsid, BTRFS_FSID_SIZE);
 834
 835			fs_devices->fsid_change = false;
 836		}
 837	}
 838
 839	if (!device) {
 840		unsigned int nofs_flag;
 841
 842		if (fs_devices->opened) {
 843			btrfs_err(NULL,
 844		"device %s belongs to fsid %pU, and the fs is already mounted",
 845				  path, fs_devices->fsid);
 846			mutex_unlock(&fs_devices->device_list_mutex);
 847			return ERR_PTR(-EBUSY);
 848		}
 849
 850		nofs_flag = memalloc_nofs_save();
 851		device = btrfs_alloc_device(NULL, &devid,
 852					    disk_super->dev_item.uuid, path);
 853		memalloc_nofs_restore(nofs_flag);
 854		if (IS_ERR(device)) {
 855			mutex_unlock(&fs_devices->device_list_mutex);
 856			/* we can safely leave the fs_devices entry around */
 857			return device;
 858		}
 859
 860		device->devt = path_devt;
 
 
 
 
 
 
 861
 862		list_add_rcu(&device->dev_list, &fs_devices->devices);
 863		fs_devices->num_devices++;
 864
 865		device->fs_devices = fs_devices;
 866		*new_device_added = true;
 867
 868		if (disk_super->label[0])
 869			pr_info(
 870	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
 871				disk_super->label, devid, found_transid, path,
 872				current->comm, task_pid_nr(current));
 873		else
 874			pr_info(
 875	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
 876				disk_super->fsid, devid, found_transid, path,
 877				current->comm, task_pid_nr(current));
 878
 879	} else if (!device->name || strcmp(device->name->str, path)) {
 880		/*
 881		 * When FS is already mounted.
 882		 * 1. If you are here and if the device->name is NULL that
 883		 *    means this device was missing at time of FS mount.
 884		 * 2. If you are here and if the device->name is different
 885		 *    from 'path' that means either
 886		 *      a. The same device disappeared and reappeared with
 887		 *         different name. or
 888		 *      b. The missing-disk-which-was-replaced, has
 889		 *         reappeared now.
 890		 *
 891		 * We must allow 1 and 2a above. But 2b would be a spurious
 892		 * and unintentional.
 893		 *
 894		 * Further in case of 1 and 2a above, the disk at 'path'
 895		 * would have missed some transaction when it was away and
 896		 * in case of 2a the stale bdev has to be updated as well.
 897		 * 2b must not be allowed at all time.
 898		 */
 899
 900		/*
 901		 * For now, we do allow update to btrfs_fs_device through the
 902		 * btrfs dev scan cli after FS has been mounted.  We're still
 903		 * tracking a problem where systems fail mount by subvolume id
 904		 * when we reject replacement on a mounted FS.
 905		 */
 906		if (!fs_devices->opened && found_transid < device->generation) {
 907			/*
 908			 * That is if the FS is _not_ mounted and if you
 909			 * are here, that means there is more than one
 910			 * disk with same uuid and devid.We keep the one
 911			 * with larger generation number or the last-in if
 912			 * generation are equal.
 913			 */
 914			mutex_unlock(&fs_devices->device_list_mutex);
 915			btrfs_err(NULL,
 916"device %s already registered with a higher generation, found %llu expect %llu",
 917				  path, found_transid, device->generation);
 918			return ERR_PTR(-EEXIST);
 919		}
 920
 921		/*
 922		 * We are going to replace the device path for a given devid,
 923		 * make sure it's the same device if the device is mounted
 924		 *
 925		 * NOTE: the device->fs_info may not be reliable here so pass
 926		 * in a NULL to message helpers instead. This avoids a possible
 927		 * use-after-free when the fs_info and fs_info->sb are already
 928		 * torn down.
 929		 */
 930		if (device->bdev) {
 931			if (device->devt != path_devt) {
 
 
 
 932				mutex_unlock(&fs_devices->device_list_mutex);
 933				btrfs_warn_in_rcu(NULL,
 934	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
 935						  path, devid, found_transid,
 936						  current->comm,
 937						  task_pid_nr(current));
 
 
 
 
 
 938				return ERR_PTR(-EEXIST);
 939			}
 940			btrfs_info_in_rcu(NULL,
 941	"devid %llu device path %s changed to %s scanned by %s (%d)",
 942					  devid, btrfs_dev_name(device),
 943					  path, current->comm,
 944					  task_pid_nr(current));
 945		}
 946
 947		name = rcu_string_strdup(path, GFP_NOFS);
 948		if (!name) {
 949			mutex_unlock(&fs_devices->device_list_mutex);
 950			return ERR_PTR(-ENOMEM);
 951		}
 952		rcu_string_free(device->name);
 953		rcu_assign_pointer(device->name, name);
 954		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
 955			fs_devices->missing_devices--;
 956			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
 957		}
 958		device->devt = path_devt;
 959	}
 960
 961	/*
 962	 * Unmount does not free the btrfs_device struct but would zero
 963	 * generation along with most of the other members. So just update
 964	 * it back. We need it to pick the disk with largest generation
 965	 * (as above).
 966	 */
 967	if (!fs_devices->opened) {
 968		device->generation = found_transid;
 969		fs_devices->latest_generation = max_t(u64, found_transid,
 970						fs_devices->latest_generation);
 971	}
 972
 973	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
 974
 975	mutex_unlock(&fs_devices->device_list_mutex);
 976	return device;
 977}
 978
 979static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 980{
 981	struct btrfs_fs_devices *fs_devices;
 982	struct btrfs_device *device;
 983	struct btrfs_device *orig_dev;
 984	int ret = 0;
 985
 986	lockdep_assert_held(&uuid_mutex);
 987
 988	fs_devices = alloc_fs_devices(orig->fsid, NULL);
 989	if (IS_ERR(fs_devices))
 990		return fs_devices;
 991
 
 992	fs_devices->total_devices = orig->total_devices;
 993
 994	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 995		const char *dev_path = NULL;
 996
 997		/*
 998		 * This is ok to do without RCU read locked because we hold the
 999		 * uuid mutex so nothing we touch in here is going to disappear.
1000		 */
1001		if (orig_dev->name)
1002			dev_path = orig_dev->name->str;
1003
1004		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1005					    orig_dev->uuid, dev_path);
1006		if (IS_ERR(device)) {
1007			ret = PTR_ERR(device);
1008			goto error;
1009		}
1010
1011		if (orig_dev->zone_info) {
1012			struct btrfs_zoned_device_info *zone_info;
1013
1014			zone_info = btrfs_clone_dev_zone_info(orig_dev);
1015			if (!zone_info) {
 
 
 
1016				btrfs_free_device(device);
1017				ret = -ENOMEM;
1018				goto error;
1019			}
1020			device->zone_info = zone_info;
1021		}
1022
1023		list_add(&device->dev_list, &fs_devices->devices);
1024		device->fs_devices = fs_devices;
1025		fs_devices->num_devices++;
1026	}
 
1027	return fs_devices;
1028error:
 
1029	free_fs_devices(fs_devices);
1030	return ERR_PTR(ret);
1031}
1032
1033static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1034				      struct btrfs_device **latest_dev)
 
 
 
1035{
1036	struct btrfs_device *device, *next;
 
1037
 
 
1038	/* This is the initialized path, it is safe to release the devices. */
1039	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1040		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
 
1041			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1042				      &device->dev_state) &&
1043			    !test_bit(BTRFS_DEV_STATE_MISSING,
1044				      &device->dev_state) &&
1045			    (!*latest_dev ||
1046			     device->generation > (*latest_dev)->generation)) {
1047				*latest_dev = device;
1048			}
1049			continue;
1050		}
1051
1052		/*
1053		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1054		 * in btrfs_init_dev_replace() so just continue.
1055		 */
1056		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1057			continue;
1058
 
 
 
 
 
 
 
 
 
1059		if (device->bdev) {
1060			blkdev_put(device->bdev, device->mode);
1061			device->bdev = NULL;
1062			fs_devices->open_devices--;
1063		}
1064		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1065			list_del_init(&device->dev_alloc_list);
1066			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1067			fs_devices->rw_devices--;
 
 
1068		}
1069		list_del_init(&device->dev_list);
1070		fs_devices->num_devices--;
1071		btrfs_free_device(device);
1072	}
1073
1074}
1075
1076/*
1077 * After we have read the system tree and know devids belonging to this
1078 * filesystem, remove the device which does not belong there.
1079 */
1080void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1081{
1082	struct btrfs_device *latest_dev = NULL;
1083	struct btrfs_fs_devices *seed_dev;
1084
1085	mutex_lock(&uuid_mutex);
1086	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1087
1088	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1089		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1090
1091	fs_devices->latest_dev = latest_dev;
1092
1093	mutex_unlock(&uuid_mutex);
1094}
1095
1096static void btrfs_close_bdev(struct btrfs_device *device)
1097{
1098	if (!device->bdev)
1099		return;
1100
1101	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1102		sync_blockdev(device->bdev);
1103		invalidate_bdev(device->bdev);
1104	}
1105
1106	blkdev_put(device->bdev, device->mode);
1107}
1108
1109static void btrfs_close_one_device(struct btrfs_device *device)
1110{
1111	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1112
1113	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1114	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1115		list_del_init(&device->dev_alloc_list);
1116		fs_devices->rw_devices--;
1117	}
1118
1119	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1120		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1121
1122	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1123		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1124		fs_devices->missing_devices--;
1125	}
1126
1127	btrfs_close_bdev(device);
1128	if (device->bdev) {
1129		fs_devices->open_devices--;
1130		device->bdev = NULL;
1131	}
1132	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1133	btrfs_destroy_dev_zone_info(device);
1134
1135	device->fs_info = NULL;
1136	atomic_set(&device->dev_stats_ccnt, 0);
1137	extent_io_tree_release(&device->alloc_state);
1138
1139	/*
1140	 * Reset the flush error record. We might have a transient flush error
1141	 * in this mount, and if so we aborted the current transaction and set
1142	 * the fs to an error state, guaranteeing no super blocks can be further
1143	 * committed. However that error might be transient and if we unmount the
1144	 * filesystem and mount it again, we should allow the mount to succeed
1145	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1146	 * filesystem again we still get flush errors, then we will again abort
1147	 * any transaction and set the error state, guaranteeing no commits of
1148	 * unsafe super blocks.
1149	 */
1150	device->last_flush_error = 0;
1151
1152	/* Verify the device is back in a pristine state  */
1153	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1154	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1155	ASSERT(list_empty(&device->dev_alloc_list));
1156	ASSERT(list_empty(&device->post_commit_list));
 
1157}
1158
1159static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1160{
1161	struct btrfs_device *device, *tmp;
1162
1163	lockdep_assert_held(&uuid_mutex);
1164
1165	if (--fs_devices->opened > 0)
1166		return;
1167
1168	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
 
1169		btrfs_close_one_device(device);
 
 
1170
1171	WARN_ON(fs_devices->open_devices);
1172	WARN_ON(fs_devices->rw_devices);
1173	fs_devices->opened = 0;
1174	fs_devices->seeding = false;
1175	fs_devices->fs_info = NULL;
 
1176}
1177
1178void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1179{
1180	LIST_HEAD(list);
1181	struct btrfs_fs_devices *tmp;
1182
1183	mutex_lock(&uuid_mutex);
1184	close_fs_devices(fs_devices);
1185	if (!fs_devices->opened) {
1186		list_splice_init(&fs_devices->seed_list, &list);
1187
1188		/*
1189		 * If the struct btrfs_fs_devices is not assembled with any
1190		 * other device, it can be re-initialized during the next mount
1191		 * without the needing device-scan step. Therefore, it can be
1192		 * fully freed.
1193		 */
1194		if (fs_devices->num_devices == 1) {
1195			list_del(&fs_devices->fs_list);
1196			free_fs_devices(fs_devices);
1197		}
1198	}
 
1199
1200
1201	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
 
1202		close_fs_devices(fs_devices);
1203		list_del(&fs_devices->seed_list);
1204		free_fs_devices(fs_devices);
1205	}
1206	mutex_unlock(&uuid_mutex);
1207}
1208
1209static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1210				fmode_t flags, void *holder)
1211{
1212	struct btrfs_device *device;
1213	struct btrfs_device *latest_dev = NULL;
1214	struct btrfs_device *tmp_device;
1215
1216	flags |= FMODE_EXCL;
1217
1218	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1219				 dev_list) {
1220		int ret;
 
1221
1222		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1223		if (ret == 0 &&
1224		    (!latest_dev || device->generation > latest_dev->generation)) {
1225			latest_dev = device;
1226		} else if (ret == -ENODATA) {
1227			fs_devices->num_devices--;
1228			list_del(&device->dev_list);
1229			btrfs_free_device(device);
1230		}
1231	}
1232	if (fs_devices->open_devices == 0)
1233		return -EINVAL;
1234
1235	fs_devices->opened = 1;
1236	fs_devices->latest_dev = latest_dev;
1237	fs_devices->total_rw_bytes = 0;
1238	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1239	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1240
1241	return 0;
1242}
1243
1244static int devid_cmp(void *priv, const struct list_head *a,
1245		     const struct list_head *b)
1246{
1247	const struct btrfs_device *dev1, *dev2;
1248
1249	dev1 = list_entry(a, struct btrfs_device, dev_list);
1250	dev2 = list_entry(b, struct btrfs_device, dev_list);
1251
1252	if (dev1->devid < dev2->devid)
1253		return -1;
1254	else if (dev1->devid > dev2->devid)
1255		return 1;
1256	return 0;
1257}
1258
1259int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1260		       fmode_t flags, void *holder)
1261{
1262	int ret;
1263
1264	lockdep_assert_held(&uuid_mutex);
1265	/*
1266	 * The device_list_mutex cannot be taken here in case opening the
1267	 * underlying device takes further locks like open_mutex.
1268	 *
1269	 * We also don't need the lock here as this is called during mount and
1270	 * exclusion is provided by uuid_mutex
1271	 */
1272
1273	if (fs_devices->opened) {
1274		fs_devices->opened++;
1275		ret = 0;
1276	} else {
1277		list_sort(NULL, &fs_devices->devices, devid_cmp);
1278		ret = open_fs_devices(fs_devices, flags, holder);
1279	}
1280
1281	return ret;
1282}
1283
1284void btrfs_release_disk_super(struct btrfs_super_block *super)
1285{
1286	struct page *page = virt_to_page(super);
1287
1288	put_page(page);
1289}
1290
1291static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1292						       u64 bytenr, u64 bytenr_orig)
1293{
1294	struct btrfs_super_block *disk_super;
1295	struct page *page;
1296	void *p;
1297	pgoff_t index;
1298
1299	/* make sure our super fits in the device */
1300	if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1301		return ERR_PTR(-EINVAL);
1302
1303	/* make sure our super fits in the page */
1304	if (sizeof(*disk_super) > PAGE_SIZE)
1305		return ERR_PTR(-EINVAL);
1306
1307	/* make sure our super doesn't straddle pages on disk */
1308	index = bytenr >> PAGE_SHIFT;
1309	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1310		return ERR_PTR(-EINVAL);
1311
1312	/* pull in the page with our super */
1313	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1314
1315	if (IS_ERR(page))
1316		return ERR_CAST(page);
1317
1318	p = page_address(page);
1319
1320	/* align our pointer to the offset of the super block */
1321	disk_super = p + offset_in_page(bytenr);
1322
1323	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1324	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1325		btrfs_release_disk_super(p);
1326		return ERR_PTR(-EINVAL);
1327	}
1328
1329	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1330		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1331
1332	return disk_super;
1333}
1334
1335int btrfs_forget_devices(dev_t devt)
1336{
1337	int ret;
1338
1339	mutex_lock(&uuid_mutex);
1340	ret = btrfs_free_stale_devices(devt, NULL);
1341	mutex_unlock(&uuid_mutex);
1342
1343	return ret;
1344}
1345
1346/*
1347 * Look for a btrfs signature on a device. This may be called out of the mount path
1348 * and we are not allowed to call set_blocksize during the scan. The superblock
1349 * is read via pagecache
1350 */
1351struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1352					   void *holder)
1353{
1354	struct btrfs_super_block *disk_super;
1355	bool new_device_added = false;
1356	struct btrfs_device *device = NULL;
1357	struct block_device *bdev;
1358	u64 bytenr, bytenr_orig;
1359	int ret;
1360
1361	lockdep_assert_held(&uuid_mutex);
1362
1363	/*
1364	 * we would like to check all the supers, but that would make
1365	 * a btrfs mount succeed after a mkfs from a different FS.
1366	 * So, we need to add a special mount option to scan for
1367	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1368	 */
 
1369	flags |= FMODE_EXCL;
1370
1371	bdev = blkdev_get_by_path(path, flags, holder);
1372	if (IS_ERR(bdev))
1373		return ERR_CAST(bdev);
1374
1375	bytenr_orig = btrfs_sb_offset(0);
1376	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1377	if (ret) {
1378		device = ERR_PTR(ret);
1379		goto error_bdev_put;
1380	}
1381
1382	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1383	if (IS_ERR(disk_super)) {
1384		device = ERR_CAST(disk_super);
1385		goto error_bdev_put;
1386	}
1387
1388	device = device_list_add(path, disk_super, &new_device_added);
1389	if (!IS_ERR(device) && new_device_added)
1390		btrfs_free_stale_devices(device->devt, device);
 
 
1391
1392	btrfs_release_disk_super(disk_super);
1393
1394error_bdev_put:
1395	blkdev_put(bdev, flags);
1396
1397	return device;
1398}
1399
1400/*
1401 * Try to find a chunk that intersects [start, start + len] range and when one
1402 * such is found, record the end of it in *start
1403 */
1404static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1405				    u64 len)
1406{
1407	u64 physical_start, physical_end;
1408
1409	lockdep_assert_held(&device->fs_info->chunk_mutex);
1410
1411	if (!find_first_extent_bit(&device->alloc_state, *start,
1412				   &physical_start, &physical_end,
1413				   CHUNK_ALLOCATED, NULL)) {
1414
1415		if (in_range(physical_start, *start, len) ||
1416		    in_range(*start, physical_start,
1417			     physical_end - physical_start)) {
1418			*start = physical_end + 1;
1419			return true;
1420		}
1421	}
1422	return false;
1423}
1424
1425static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1426{
1427	switch (device->fs_devices->chunk_alloc_policy) {
1428	case BTRFS_CHUNK_ALLOC_REGULAR:
1429		return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
1430	case BTRFS_CHUNK_ALLOC_ZONED:
1431		/*
1432		 * We don't care about the starting region like regular
1433		 * allocator, because we anyway use/reserve the first two zones
1434		 * for superblock logging.
1435		 */
1436		return ALIGN(start, device->zone_info->zone_size);
1437	default:
1438		BUG();
1439	}
1440}
1441
1442static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1443					u64 *hole_start, u64 *hole_size,
1444					u64 num_bytes)
1445{
1446	u64 zone_size = device->zone_info->zone_size;
1447	u64 pos;
1448	int ret;
1449	bool changed = false;
1450
1451	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1452
1453	while (*hole_size > 0) {
1454		pos = btrfs_find_allocatable_zones(device, *hole_start,
1455						   *hole_start + *hole_size,
1456						   num_bytes);
1457		if (pos != *hole_start) {
1458			*hole_size = *hole_start + *hole_size - pos;
1459			*hole_start = pos;
1460			changed = true;
1461			if (*hole_size < num_bytes)
1462				break;
1463		}
1464
1465		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1466
1467		/* Range is ensured to be empty */
1468		if (!ret)
1469			return changed;
1470
1471		/* Given hole range was invalid (outside of device) */
1472		if (ret == -ERANGE) {
1473			*hole_start += *hole_size;
1474			*hole_size = 0;
1475			return true;
1476		}
1477
1478		*hole_start += zone_size;
1479		*hole_size -= zone_size;
1480		changed = true;
1481	}
1482
1483	return changed;
1484}
1485
1486/*
1487 * Check if specified hole is suitable for allocation.
1488 *
1489 * @device:	the device which we have the hole
1490 * @hole_start: starting position of the hole
1491 * @hole_size:	the size of the hole
1492 * @num_bytes:	the size of the free space that we need
1493 *
1494 * This function may modify @hole_start and @hole_size to reflect the suitable
1495 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1496 */
1497static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1498				  u64 *hole_size, u64 num_bytes)
1499{
1500	bool changed = false;
1501	u64 hole_end = *hole_start + *hole_size;
1502
1503	for (;;) {
1504		/*
1505		 * Check before we set max_hole_start, otherwise we could end up
1506		 * sending back this offset anyway.
1507		 */
1508		if (contains_pending_extent(device, hole_start, *hole_size)) {
1509			if (hole_end >= *hole_start)
1510				*hole_size = hole_end - *hole_start;
1511			else
1512				*hole_size = 0;
1513			changed = true;
1514		}
1515
1516		switch (device->fs_devices->chunk_alloc_policy) {
1517		case BTRFS_CHUNK_ALLOC_REGULAR:
1518			/* No extra check */
1519			break;
1520		case BTRFS_CHUNK_ALLOC_ZONED:
1521			if (dev_extent_hole_check_zoned(device, hole_start,
1522							hole_size, num_bytes)) {
1523				changed = true;
1524				/*
1525				 * The changed hole can contain pending extent.
1526				 * Loop again to check that.
1527				 */
1528				continue;
1529			}
1530			break;
1531		default:
1532			BUG();
1533		}
1534
 
 
 
1535		break;
 
 
1536	}
1537
1538	return changed;
1539}
1540
1541/*
1542 * Find free space in the specified device.
1543 *
1544 * @device:	  the device which we search the free space in
1545 * @num_bytes:	  the size of the free space that we need
1546 * @search_start: the position from which to begin the search
1547 * @start:	  store the start of the free space.
1548 * @len:	  the size of the free space. that we find, or the size
1549 *		  of the max free space if we don't find suitable free space
1550 *
1551 * This does a pretty simple search, the expectation is that it is called very
1552 * infrequently and that a given device has a small number of extents.
 
1553 *
1554 * @start is used to store the start of the free space if we find. But if we
1555 * don't find suitable free space, it will be used to store the start position
1556 * of the max free space.
1557 *
1558 * @len is used to store the size of the free space that we find.
1559 * But if we don't find suitable free space, it is used to store the size of
1560 * the max free space.
1561 *
1562 * NOTE: This function will search *commit* root of device tree, and does extra
1563 * check to ensure dev extents are not double allocated.
1564 * This makes the function safe to allocate dev extents but may not report
1565 * correct usable device space, as device extent freed in current transaction
1566 * is not reported as available.
1567 */
1568static int find_free_dev_extent_start(struct btrfs_device *device,
1569				u64 num_bytes, u64 search_start, u64 *start,
1570				u64 *len)
1571{
1572	struct btrfs_fs_info *fs_info = device->fs_info;
1573	struct btrfs_root *root = fs_info->dev_root;
1574	struct btrfs_key key;
1575	struct btrfs_dev_extent *dev_extent;
1576	struct btrfs_path *path;
1577	u64 hole_size;
1578	u64 max_hole_start;
1579	u64 max_hole_size;
1580	u64 extent_end;
1581	u64 search_end = device->total_bytes;
1582	int ret;
1583	int slot;
1584	struct extent_buffer *l;
1585
1586	search_start = dev_extent_search_start(device, search_start);
1587
1588	WARN_ON(device->zone_info &&
1589		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1590
1591	path = btrfs_alloc_path();
1592	if (!path)
1593		return -ENOMEM;
1594
1595	max_hole_start = search_start;
1596	max_hole_size = 0;
1597
1598again:
1599	if (search_start >= search_end ||
1600		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1601		ret = -ENOSPC;
1602		goto out;
1603	}
1604
1605	path->reada = READA_FORWARD;
1606	path->search_commit_root = 1;
1607	path->skip_locking = 1;
1608
1609	key.objectid = device->devid;
1610	key.offset = search_start;
1611	key.type = BTRFS_DEV_EXTENT_KEY;
1612
1613	ret = btrfs_search_backwards(root, &key, path);
1614	if (ret < 0)
1615		goto out;
 
 
 
 
 
1616
1617	while (search_start < search_end) {
1618		l = path->nodes[0];
1619		slot = path->slots[0];
1620		if (slot >= btrfs_header_nritems(l)) {
1621			ret = btrfs_next_leaf(root, path);
1622			if (ret == 0)
1623				continue;
1624			if (ret < 0)
1625				goto out;
1626
1627			break;
1628		}
1629		btrfs_item_key_to_cpu(l, &key, slot);
1630
1631		if (key.objectid < device->devid)
1632			goto next;
1633
1634		if (key.objectid > device->devid)
1635			break;
1636
1637		if (key.type != BTRFS_DEV_EXTENT_KEY)
1638			goto next;
1639
1640		if (key.offset > search_end)
1641			break;
1642
1643		if (key.offset > search_start) {
1644			hole_size = key.offset - search_start;
1645			dev_extent_hole_check(device, &search_start, &hole_size,
1646					      num_bytes);
1647
1648			if (hole_size > max_hole_size) {
1649				max_hole_start = search_start;
1650				max_hole_size = hole_size;
1651			}
1652
1653			/*
1654			 * If this free space is greater than which we need,
1655			 * it must be the max free space that we have found
1656			 * until now, so max_hole_start must point to the start
1657			 * of this free space and the length of this free space
1658			 * is stored in max_hole_size. Thus, we return
1659			 * max_hole_start and max_hole_size and go back to the
1660			 * caller.
1661			 */
1662			if (hole_size >= num_bytes) {
1663				ret = 0;
1664				goto out;
1665			}
1666		}
1667
1668		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1669		extent_end = key.offset + btrfs_dev_extent_length(l,
1670								  dev_extent);
1671		if (extent_end > search_start)
1672			search_start = extent_end;
1673next:
1674		path->slots[0]++;
1675		cond_resched();
1676	}
1677
1678	/*
1679	 * At this point, search_start should be the end of
1680	 * allocated dev extents, and when shrinking the device,
1681	 * search_end may be smaller than search_start.
1682	 */
1683	if (search_end > search_start) {
1684		hole_size = search_end - search_start;
1685		if (dev_extent_hole_check(device, &search_start, &hole_size,
1686					  num_bytes)) {
1687			btrfs_release_path(path);
1688			goto again;
1689		}
1690
1691		if (hole_size > max_hole_size) {
1692			max_hole_start = search_start;
1693			max_hole_size = hole_size;
1694		}
1695	}
1696
1697	/* See above. */
1698	if (max_hole_size < num_bytes)
1699		ret = -ENOSPC;
1700	else
1701		ret = 0;
1702
1703	ASSERT(max_hole_start + max_hole_size <= search_end);
1704out:
1705	btrfs_free_path(path);
1706	*start = max_hole_start;
1707	if (len)
1708		*len = max_hole_size;
1709	return ret;
1710}
1711
1712int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1713			 u64 *start, u64 *len)
1714{
1715	/* FIXME use last free of some kind */
1716	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1717}
1718
1719static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1720			  struct btrfs_device *device,
1721			  u64 start, u64 *dev_extent_len)
1722{
1723	struct btrfs_fs_info *fs_info = device->fs_info;
1724	struct btrfs_root *root = fs_info->dev_root;
1725	int ret;
1726	struct btrfs_path *path;
1727	struct btrfs_key key;
1728	struct btrfs_key found_key;
1729	struct extent_buffer *leaf = NULL;
1730	struct btrfs_dev_extent *extent = NULL;
1731
1732	path = btrfs_alloc_path();
1733	if (!path)
1734		return -ENOMEM;
1735
1736	key.objectid = device->devid;
1737	key.offset = start;
1738	key.type = BTRFS_DEV_EXTENT_KEY;
1739again:
1740	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1741	if (ret > 0) {
1742		ret = btrfs_previous_item(root, path, key.objectid,
1743					  BTRFS_DEV_EXTENT_KEY);
1744		if (ret)
1745			goto out;
1746		leaf = path->nodes[0];
1747		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1748		extent = btrfs_item_ptr(leaf, path->slots[0],
1749					struct btrfs_dev_extent);
1750		BUG_ON(found_key.offset > start || found_key.offset +
1751		       btrfs_dev_extent_length(leaf, extent) < start);
1752		key = found_key;
1753		btrfs_release_path(path);
1754		goto again;
1755	} else if (ret == 0) {
1756		leaf = path->nodes[0];
1757		extent = btrfs_item_ptr(leaf, path->slots[0],
1758					struct btrfs_dev_extent);
1759	} else {
 
1760		goto out;
1761	}
1762
1763	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1764
1765	ret = btrfs_del_item(trans, root, path);
1766	if (ret == 0)
 
 
 
1767		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768out:
1769	btrfs_free_path(path);
1770	return ret;
1771}
1772
1773static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1774{
1775	struct extent_map_tree *em_tree;
1776	struct extent_map *em;
1777	struct rb_node *n;
1778	u64 ret = 0;
1779
1780	em_tree = &fs_info->mapping_tree;
1781	read_lock(&em_tree->lock);
1782	n = rb_last(&em_tree->map.rb_root);
1783	if (n) {
1784		em = rb_entry(n, struct extent_map, rb_node);
1785		ret = em->start + em->len;
1786	}
1787	read_unlock(&em_tree->lock);
1788
1789	return ret;
1790}
1791
1792static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1793				    u64 *devid_ret)
1794{
1795	int ret;
1796	struct btrfs_key key;
1797	struct btrfs_key found_key;
1798	struct btrfs_path *path;
1799
1800	path = btrfs_alloc_path();
1801	if (!path)
1802		return -ENOMEM;
1803
1804	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1805	key.type = BTRFS_DEV_ITEM_KEY;
1806	key.offset = (u64)-1;
1807
1808	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1809	if (ret < 0)
1810		goto error;
1811
1812	if (ret == 0) {
1813		/* Corruption */
1814		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1815		ret = -EUCLEAN;
1816		goto error;
1817	}
1818
1819	ret = btrfs_previous_item(fs_info->chunk_root, path,
1820				  BTRFS_DEV_ITEMS_OBJECTID,
1821				  BTRFS_DEV_ITEM_KEY);
1822	if (ret) {
1823		*devid_ret = 1;
1824	} else {
1825		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1826				      path->slots[0]);
1827		*devid_ret = found_key.offset + 1;
1828	}
1829	ret = 0;
1830error:
1831	btrfs_free_path(path);
1832	return ret;
1833}
1834
1835/*
1836 * the device information is stored in the chunk root
1837 * the btrfs_device struct should be fully filled in
1838 */
1839static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1840			    struct btrfs_device *device)
1841{
1842	int ret;
1843	struct btrfs_path *path;
1844	struct btrfs_dev_item *dev_item;
1845	struct extent_buffer *leaf;
1846	struct btrfs_key key;
1847	unsigned long ptr;
1848
1849	path = btrfs_alloc_path();
1850	if (!path)
1851		return -ENOMEM;
1852
1853	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1854	key.type = BTRFS_DEV_ITEM_KEY;
1855	key.offset = device->devid;
1856
1857	btrfs_reserve_chunk_metadata(trans, true);
1858	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1859				      &key, sizeof(*dev_item));
1860	btrfs_trans_release_chunk_metadata(trans);
1861	if (ret)
1862		goto out;
1863
1864	leaf = path->nodes[0];
1865	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1866
1867	btrfs_set_device_id(leaf, dev_item, device->devid);
1868	btrfs_set_device_generation(leaf, dev_item, 0);
1869	btrfs_set_device_type(leaf, dev_item, device->type);
1870	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1871	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1872	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1873	btrfs_set_device_total_bytes(leaf, dev_item,
1874				     btrfs_device_get_disk_total_bytes(device));
1875	btrfs_set_device_bytes_used(leaf, dev_item,
1876				    btrfs_device_get_bytes_used(device));
1877	btrfs_set_device_group(leaf, dev_item, 0);
1878	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1879	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1880	btrfs_set_device_start_offset(leaf, dev_item, 0);
1881
1882	ptr = btrfs_device_uuid(dev_item);
1883	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1884	ptr = btrfs_device_fsid(dev_item);
1885	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1886			    ptr, BTRFS_FSID_SIZE);
1887	btrfs_mark_buffer_dirty(leaf);
1888
1889	ret = 0;
1890out:
1891	btrfs_free_path(path);
1892	return ret;
1893}
1894
1895/*
1896 * Function to update ctime/mtime for a given device path.
1897 * Mainly used for ctime/mtime based probe like libblkid.
1898 *
1899 * We don't care about errors here, this is just to be kind to userspace.
1900 */
1901static void update_dev_time(const char *device_path)
1902{
1903	struct path path;
1904	struct timespec64 now;
1905	int ret;
1906
1907	ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1908	if (ret)
1909		return;
1910
1911	now = current_time(d_inode(path.dentry));
1912	inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1913	path_put(&path);
1914}
1915
1916static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1917			     struct btrfs_device *device)
1918{
1919	struct btrfs_root *root = device->fs_info->chunk_root;
1920	int ret;
1921	struct btrfs_path *path;
1922	struct btrfs_key key;
 
1923
1924	path = btrfs_alloc_path();
1925	if (!path)
1926		return -ENOMEM;
1927
 
 
 
 
 
1928	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1929	key.type = BTRFS_DEV_ITEM_KEY;
1930	key.offset = device->devid;
1931
1932	btrfs_reserve_chunk_metadata(trans, false);
1933	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1934	btrfs_trans_release_chunk_metadata(trans);
1935	if (ret) {
1936		if (ret > 0)
1937			ret = -ENOENT;
 
 
1938		goto out;
1939	}
1940
1941	ret = btrfs_del_item(trans, root, path);
 
 
 
 
 
1942out:
1943	btrfs_free_path(path);
 
 
1944	return ret;
1945}
1946
1947/*
1948 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1949 * filesystem. It's up to the caller to adjust that number regarding eg. device
1950 * replace.
1951 */
1952static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1953		u64 num_devices)
1954{
1955	u64 all_avail;
1956	unsigned seq;
1957	int i;
1958
1959	do {
1960		seq = read_seqbegin(&fs_info->profiles_lock);
1961
1962		all_avail = fs_info->avail_data_alloc_bits |
1963			    fs_info->avail_system_alloc_bits |
1964			    fs_info->avail_metadata_alloc_bits;
1965	} while (read_seqretry(&fs_info->profiles_lock, seq));
1966
1967	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1968		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1969			continue;
1970
1971		if (num_devices < btrfs_raid_array[i].devs_min)
1972			return btrfs_raid_array[i].mindev_error;
 
 
 
 
1973	}
1974
1975	return 0;
1976}
1977
1978static struct btrfs_device * btrfs_find_next_active_device(
1979		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1980{
1981	struct btrfs_device *next_device;
1982
1983	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1984		if (next_device != device &&
1985		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1986		    && next_device->bdev)
1987			return next_device;
1988	}
1989
1990	return NULL;
1991}
1992
1993/*
1994 * Helper function to check if the given device is part of s_bdev / latest_dev
1995 * and replace it with the provided or the next active device, in the context
1996 * where this function called, there should be always be another device (or
1997 * this_dev) which is active.
1998 */
1999void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2000					    struct btrfs_device *next_device)
2001{
2002	struct btrfs_fs_info *fs_info = device->fs_info;
 
2003
2004	if (!next_device)
 
 
2005		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2006							    device);
2007	ASSERT(next_device);
2008
2009	if (fs_info->sb->s_bdev &&
2010			(fs_info->sb->s_bdev == device->bdev))
2011		fs_info->sb->s_bdev = next_device->bdev;
2012
2013	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2014		fs_info->fs_devices->latest_dev = next_device;
2015}
2016
2017/*
2018 * Return btrfs_fs_devices::num_devices excluding the device that's being
2019 * currently replaced.
2020 */
2021static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2022{
2023	u64 num_devices = fs_info->fs_devices->num_devices;
2024
2025	down_read(&fs_info->dev_replace.rwsem);
2026	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2027		ASSERT(num_devices > 1);
2028		num_devices--;
2029	}
2030	up_read(&fs_info->dev_replace.rwsem);
2031
2032	return num_devices;
2033}
2034
2035static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
2036				     struct block_device *bdev, int copy_num)
2037{
2038	struct btrfs_super_block *disk_super;
2039	const size_t len = sizeof(disk_super->magic);
2040	const u64 bytenr = btrfs_sb_offset(copy_num);
2041	int ret;
2042
2043	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
2044	if (IS_ERR(disk_super))
2045		return;
2046
2047	memset(&disk_super->magic, 0, len);
2048	folio_mark_dirty(virt_to_folio(disk_super));
2049	btrfs_release_disk_super(disk_super);
2050
2051	ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
2052	if (ret)
2053		btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
2054			copy_num, ret);
2055}
2056
2057void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2058			       struct block_device *bdev,
2059			       const char *device_path)
2060{
 
2061	int copy_num;
2062
2063	if (!bdev)
2064		return;
2065
2066	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2067		if (bdev_is_zoned(bdev))
2068			btrfs_reset_sb_log_zones(bdev, copy_num);
2069		else
2070			btrfs_scratch_superblock(fs_info, bdev, copy_num);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071	}
2072
2073	/* Notify udev that device has changed */
2074	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2075
2076	/* Update ctime/mtime for device path for libblkid */
2077	update_dev_time(device_path);
2078}
2079
2080int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2081		    struct btrfs_dev_lookup_args *args,
2082		    struct block_device **bdev, fmode_t *mode)
2083{
2084	struct btrfs_trans_handle *trans;
2085	struct btrfs_device *device;
2086	struct btrfs_fs_devices *cur_devices;
2087	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2088	u64 num_devices;
2089	int ret = 0;
2090
2091	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2092		btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2093		return -EINVAL;
2094	}
2095
2096	/*
2097	 * The device list in fs_devices is accessed without locks (neither
2098	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2099	 * filesystem and another device rm cannot run.
2100	 */
2101	num_devices = btrfs_num_devices(fs_info);
2102
2103	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2104	if (ret)
2105		return ret;
2106
2107	device = btrfs_find_device(fs_info->fs_devices, args);
2108	if (!device) {
2109		if (args->missing)
 
 
2110			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2111		else
2112			ret = -ENOENT;
2113		return ret;
2114	}
2115
2116	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2117		btrfs_warn_in_rcu(fs_info,
2118		  "cannot remove device %s (devid %llu) due to active swapfile",
2119				  btrfs_dev_name(device), device->devid);
2120		return -ETXTBSY;
 
2121	}
2122
2123	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2124		return BTRFS_ERROR_DEV_TGT_REPLACE;
 
 
2125
2126	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2127	    fs_info->fs_devices->rw_devices == 1)
2128		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
 
 
2129
2130	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2131		mutex_lock(&fs_info->chunk_mutex);
2132		list_del_init(&device->dev_alloc_list);
2133		device->fs_devices->rw_devices--;
2134		mutex_unlock(&fs_info->chunk_mutex);
2135	}
2136
 
2137	ret = btrfs_shrink_device(device, 0);
 
2138	if (ret)
2139		goto error_undo;
2140
2141	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2142	if (IS_ERR(trans)) {
2143		ret = PTR_ERR(trans);
 
 
 
 
2144		goto error_undo;
2145	}
2146
2147	ret = btrfs_rm_dev_item(trans, device);
2148	if (ret) {
2149		/* Any error in dev item removal is critical */
2150		btrfs_crit(fs_info,
2151			   "failed to remove device item for devid %llu: %d",
2152			   device->devid, ret);
2153		btrfs_abort_transaction(trans, ret);
2154		btrfs_end_transaction(trans);
2155		return ret;
2156	}
2157
2158	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2159	btrfs_scrub_cancel_dev(device);
2160
2161	/*
2162	 * the device list mutex makes sure that we don't change
2163	 * the device list while someone else is writing out all
2164	 * the device supers. Whoever is writing all supers, should
2165	 * lock the device list mutex before getting the number of
2166	 * devices in the super block (super_copy). Conversely,
2167	 * whoever updates the number of devices in the super block
2168	 * (super_copy) should hold the device list mutex.
2169	 */
2170
2171	/*
2172	 * In normal cases the cur_devices == fs_devices. But in case
2173	 * of deleting a seed device, the cur_devices should point to
2174	 * its own fs_devices listed under the fs_devices->seed_list.
2175	 */
2176	cur_devices = device->fs_devices;
2177	mutex_lock(&fs_devices->device_list_mutex);
2178	list_del_rcu(&device->dev_list);
2179
2180	cur_devices->num_devices--;
2181	cur_devices->total_devices--;
2182	/* Update total_devices of the parent fs_devices if it's seed */
2183	if (cur_devices != fs_devices)
2184		fs_devices->total_devices--;
2185
2186	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2187		cur_devices->missing_devices--;
2188
2189	btrfs_assign_next_active_device(device, NULL);
2190
2191	if (device->bdev) {
2192		cur_devices->open_devices--;
2193		/* remove sysfs entry */
2194		btrfs_sysfs_remove_device(device);
2195	}
2196
2197	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2198	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2199	mutex_unlock(&fs_devices->device_list_mutex);
2200
2201	/*
2202	 * At this point, the device is zero sized and detached from the
2203	 * devices list.  All that's left is to zero out the old supers and
2204	 * free the device.
2205	 *
2206	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2207	 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2208	 * block device and it's dependencies.  Instead just flush the device
2209	 * and let the caller do the final blkdev_put.
2210	 */
2211	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2212		btrfs_scratch_superblocks(fs_info, device->bdev,
2213					  device->name->str);
2214		if (device->bdev) {
2215			sync_blockdev(device->bdev);
2216			invalidate_bdev(device->bdev);
2217		}
2218	}
2219
2220	*bdev = device->bdev;
2221	*mode = device->mode;
2222	synchronize_rcu();
2223	btrfs_free_device(device);
2224
2225	/*
2226	 * This can happen if cur_devices is the private seed devices list.  We
2227	 * cannot call close_fs_devices() here because it expects the uuid_mutex
2228	 * to be held, but in fact we don't need that for the private
2229	 * seed_devices, we can simply decrement cur_devices->opened and then
2230	 * remove it from our list and free the fs_devices.
2231	 */
2232	if (cur_devices->num_devices == 0) {
2233		list_del_init(&cur_devices->seed_list);
2234		ASSERT(cur_devices->opened == 1);
2235		cur_devices->opened--;
2236		free_fs_devices(cur_devices);
2237	}
2238
2239	ret = btrfs_commit_transaction(trans);
2240
2241	return ret;
2242
2243error_undo:
2244	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2245		mutex_lock(&fs_info->chunk_mutex);
2246		list_add(&device->dev_alloc_list,
2247			 &fs_devices->alloc_list);
2248		device->fs_devices->rw_devices++;
2249		mutex_unlock(&fs_info->chunk_mutex);
2250	}
2251	return ret;
2252}
2253
2254void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2255{
2256	struct btrfs_fs_devices *fs_devices;
2257
2258	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2259
2260	/*
2261	 * in case of fs with no seed, srcdev->fs_devices will point
2262	 * to fs_devices of fs_info. However when the dev being replaced is
2263	 * a seed dev it will point to the seed's local fs_devices. In short
2264	 * srcdev will have its correct fs_devices in both the cases.
2265	 */
2266	fs_devices = srcdev->fs_devices;
2267
2268	list_del_rcu(&srcdev->dev_list);
2269	list_del(&srcdev->dev_alloc_list);
2270	fs_devices->num_devices--;
2271	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2272		fs_devices->missing_devices--;
2273
2274	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2275		fs_devices->rw_devices--;
2276
2277	if (srcdev->bdev)
2278		fs_devices->open_devices--;
2279}
2280
2281void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2282{
 
2283	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2284
2285	mutex_lock(&uuid_mutex);
2286
2287	btrfs_close_bdev(srcdev);
2288	synchronize_rcu();
2289	btrfs_free_device(srcdev);
2290
2291	/* if this is no devs we rather delete the fs_devices */
2292	if (!fs_devices->num_devices) {
 
 
2293		/*
2294		 * On a mounted FS, num_devices can't be zero unless it's a
2295		 * seed. In case of a seed device being replaced, the replace
2296		 * target added to the sprout FS, so there will be no more
2297		 * device left under the seed FS.
2298		 */
2299		ASSERT(fs_devices->seeding);
2300
2301		list_del_init(&fs_devices->seed_list);
 
 
 
 
 
 
 
 
2302		close_fs_devices(fs_devices);
2303		free_fs_devices(fs_devices);
2304	}
2305	mutex_unlock(&uuid_mutex);
2306}
2307
2308void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2309{
2310	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2311
2312	mutex_lock(&fs_devices->device_list_mutex);
2313
2314	btrfs_sysfs_remove_device(tgtdev);
2315
2316	if (tgtdev->bdev)
2317		fs_devices->open_devices--;
2318
2319	fs_devices->num_devices--;
2320
2321	btrfs_assign_next_active_device(tgtdev, NULL);
2322
2323	list_del_rcu(&tgtdev->dev_list);
2324
2325	mutex_unlock(&fs_devices->device_list_mutex);
2326
 
 
 
 
 
 
 
2327	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2328				  tgtdev->name->str);
2329
2330	btrfs_close_bdev(tgtdev);
2331	synchronize_rcu();
2332	btrfs_free_device(tgtdev);
2333}
2334
2335/*
2336 * Populate args from device at path.
2337 *
2338 * @fs_info:	the filesystem
2339 * @args:	the args to populate
2340 * @path:	the path to the device
2341 *
2342 * This will read the super block of the device at @path and populate @args with
2343 * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2344 * lookup a device to operate on, but need to do it before we take any locks.
2345 * This properly handles the special case of "missing" that a user may pass in,
2346 * and does some basic sanity checks.  The caller must make sure that @path is
2347 * properly NUL terminated before calling in, and must call
2348 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2349 * uuid buffers.
2350 *
2351 * Return: 0 for success, -errno for failure
2352 */
2353int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2354				 struct btrfs_dev_lookup_args *args,
2355				 const char *path)
2356{
 
2357	struct btrfs_super_block *disk_super;
 
 
2358	struct block_device *bdev;
2359	int ret;
2360
2361	if (!path || !path[0])
2362		return -EINVAL;
2363	if (!strcmp(path, "missing")) {
2364		args->missing = true;
2365		return 0;
2366	}
2367
2368	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2369	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2370	if (!args->uuid || !args->fsid) {
2371		btrfs_put_dev_args_from_path(args);
2372		return -ENOMEM;
2373	}
2374
2375	ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2376				    &bdev, &disk_super);
2377	if (ret) {
2378		btrfs_put_dev_args_from_path(args);
2379		return ret;
2380	}
2381
2382	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2383	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2384	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2385		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
 
2386	else
2387		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
 
 
2388	btrfs_release_disk_super(disk_super);
 
 
2389	blkdev_put(bdev, FMODE_READ);
2390	return 0;
2391}
2392
2393/*
2394 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2395 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2396 * that don't need to be freed.
2397 */
2398void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2399{
2400	kfree(args->uuid);
2401	kfree(args->fsid);
2402	args->uuid = NULL;
2403	args->fsid = NULL;
2404}
2405
2406struct btrfs_device *btrfs_find_device_by_devspec(
2407		struct btrfs_fs_info *fs_info, u64 devid,
2408		const char *device_path)
2409{
2410	BTRFS_DEV_LOOKUP_ARGS(args);
2411	struct btrfs_device *device;
2412	int ret;
2413
2414	if (devid) {
2415		args.devid = devid;
2416		device = btrfs_find_device(fs_info->fs_devices, &args);
2417		if (!device)
2418			return ERR_PTR(-ENOENT);
2419		return device;
2420	}
2421
2422	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2423	if (ret)
2424		return ERR_PTR(ret);
2425	device = btrfs_find_device(fs_info->fs_devices, &args);
2426	btrfs_put_dev_args_from_path(&args);
2427	if (!device)
 
 
 
 
 
2428		return ERR_PTR(-ENOENT);
2429	return device;
 
 
2430}
2431
2432static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
 
 
 
2433{
2434	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2435	struct btrfs_fs_devices *old_devices;
2436	struct btrfs_fs_devices *seed_devices;
 
 
 
2437
2438	lockdep_assert_held(&uuid_mutex);
2439	if (!fs_devices->seeding)
2440		return ERR_PTR(-EINVAL);
2441
2442	/*
2443	 * Private copy of the seed devices, anchored at
2444	 * fs_info->fs_devices->seed_list
2445	 */
2446	seed_devices = alloc_fs_devices(NULL, NULL);
2447	if (IS_ERR(seed_devices))
2448		return seed_devices;
2449
2450	/*
2451	 * It's necessary to retain a copy of the original seed fs_devices in
2452	 * fs_uuids so that filesystems which have been seeded can successfully
2453	 * reference the seed device from open_seed_devices. This also supports
2454	 * multiple fs seed.
2455	 */
2456	old_devices = clone_fs_devices(fs_devices);
2457	if (IS_ERR(old_devices)) {
2458		kfree(seed_devices);
2459		return old_devices;
2460	}
2461
2462	list_add(&old_devices->fs_list, &fs_uuids);
2463
2464	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2465	seed_devices->opened = 1;
2466	INIT_LIST_HEAD(&seed_devices->devices);
2467	INIT_LIST_HEAD(&seed_devices->alloc_list);
2468	mutex_init(&seed_devices->device_list_mutex);
2469
2470	return seed_devices;
2471}
2472
2473/*
2474 * Splice seed devices into the sprout fs_devices.
2475 * Generate a new fsid for the sprouted read-write filesystem.
2476 */
2477static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2478			       struct btrfs_fs_devices *seed_devices)
2479{
2480	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2481	struct btrfs_super_block *disk_super = fs_info->super_copy;
2482	struct btrfs_device *device;
2483	u64 super_flags;
2484
2485	/*
2486	 * We are updating the fsid, the thread leading to device_list_add()
2487	 * could race, so uuid_mutex is needed.
2488	 */
2489	lockdep_assert_held(&uuid_mutex);
2490
2491	/*
2492	 * The threads listed below may traverse dev_list but can do that without
2493	 * device_list_mutex:
2494	 * - All device ops and balance - as we are in btrfs_exclop_start.
2495	 * - Various dev_list readers - are using RCU.
2496	 * - btrfs_ioctl_fitrim() - is using RCU.
2497	 *
2498	 * For-read threads as below are using device_list_mutex:
2499	 * - Readonly scrub btrfs_scrub_dev()
2500	 * - Readonly scrub btrfs_scrub_progress()
2501	 * - btrfs_get_dev_stats()
2502	 */
2503	lockdep_assert_held(&fs_devices->device_list_mutex);
2504
2505	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2506			      synchronize_rcu);
2507	list_for_each_entry(device, &seed_devices->devices, dev_list)
2508		device->fs_devices = seed_devices;
2509
 
 
 
 
2510	fs_devices->seeding = false;
2511	fs_devices->num_devices = 0;
2512	fs_devices->open_devices = 0;
2513	fs_devices->missing_devices = 0;
2514	fs_devices->rotating = false;
2515	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2516
2517	generate_random_uuid(fs_devices->fsid);
2518	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2519	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
 
2520
2521	super_flags = btrfs_super_flags(disk_super) &
2522		      ~BTRFS_SUPER_FLAG_SEEDING;
2523	btrfs_set_super_flags(disk_super, super_flags);
 
 
2524}
2525
2526/*
2527 * Store the expected generation for seed devices in device items.
2528 */
2529static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2530{
2531	BTRFS_DEV_LOOKUP_ARGS(args);
2532	struct btrfs_fs_info *fs_info = trans->fs_info;
2533	struct btrfs_root *root = fs_info->chunk_root;
2534	struct btrfs_path *path;
2535	struct extent_buffer *leaf;
2536	struct btrfs_dev_item *dev_item;
2537	struct btrfs_device *device;
2538	struct btrfs_key key;
2539	u8 fs_uuid[BTRFS_FSID_SIZE];
2540	u8 dev_uuid[BTRFS_UUID_SIZE];
 
2541	int ret;
2542
2543	path = btrfs_alloc_path();
2544	if (!path)
2545		return -ENOMEM;
2546
2547	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2548	key.offset = 0;
2549	key.type = BTRFS_DEV_ITEM_KEY;
2550
2551	while (1) {
2552		btrfs_reserve_chunk_metadata(trans, false);
2553		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2554		btrfs_trans_release_chunk_metadata(trans);
2555		if (ret < 0)
2556			goto error;
2557
2558		leaf = path->nodes[0];
2559next_slot:
2560		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2561			ret = btrfs_next_leaf(root, path);
2562			if (ret > 0)
2563				break;
2564			if (ret < 0)
2565				goto error;
2566			leaf = path->nodes[0];
2567			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2568			btrfs_release_path(path);
2569			continue;
2570		}
2571
2572		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2573		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2574		    key.type != BTRFS_DEV_ITEM_KEY)
2575			break;
2576
2577		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2578					  struct btrfs_dev_item);
2579		args.devid = btrfs_device_id(leaf, dev_item);
2580		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2581				   BTRFS_UUID_SIZE);
2582		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2583				   BTRFS_FSID_SIZE);
2584		args.uuid = dev_uuid;
2585		args.fsid = fs_uuid;
2586		device = btrfs_find_device(fs_info->fs_devices, &args);
2587		BUG_ON(!device); /* Logic error */
2588
2589		if (device->fs_devices->seeding) {
2590			btrfs_set_device_generation(leaf, dev_item,
2591						    device->generation);
2592			btrfs_mark_buffer_dirty(leaf);
2593		}
2594
2595		path->slots[0]++;
2596		goto next_slot;
2597	}
2598	ret = 0;
2599error:
2600	btrfs_free_path(path);
2601	return ret;
2602}
2603
2604int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2605{
2606	struct btrfs_root *root = fs_info->dev_root;
 
2607	struct btrfs_trans_handle *trans;
2608	struct btrfs_device *device;
2609	struct block_device *bdev;
2610	struct super_block *sb = fs_info->sb;
 
2611	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2612	struct btrfs_fs_devices *seed_devices;
2613	u64 orig_super_total_bytes;
2614	u64 orig_super_num_devices;
 
2615	int ret = 0;
2616	bool seeding_dev = false;
2617	bool locked = false;
2618
2619	if (sb_rdonly(sb) && !fs_devices->seeding)
2620		return -EROFS;
2621
2622	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2623				  fs_info->bdev_holder);
2624	if (IS_ERR(bdev))
2625		return PTR_ERR(bdev);
2626
2627	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2628		ret = -EINVAL;
2629		goto error;
2630	}
2631
2632	if (fs_devices->seeding) {
2633		seeding_dev = true;
2634		down_write(&sb->s_umount);
2635		mutex_lock(&uuid_mutex);
2636		locked = true;
2637	}
2638
2639	sync_blockdev(bdev);
2640
2641	rcu_read_lock();
2642	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2643		if (device->bdev == bdev) {
2644			ret = -EEXIST;
2645			rcu_read_unlock();
 
2646			goto error;
2647		}
2648	}
2649	rcu_read_unlock();
2650
2651	device = btrfs_alloc_device(fs_info, NULL, NULL, device_path);
2652	if (IS_ERR(device)) {
2653		/* we can safely leave the fs_devices entry around */
2654		ret = PTR_ERR(device);
2655		goto error;
2656	}
2657
2658	device->fs_info = fs_info;
2659	device->bdev = bdev;
2660	ret = lookup_bdev(device_path, &device->devt);
2661	if (ret)
2662		goto error_free_device;
2663
2664	ret = btrfs_get_dev_zone_info(device, false);
2665	if (ret)
2666		goto error_free_device;
 
 
2667
2668	trans = btrfs_start_transaction(root, 0);
2669	if (IS_ERR(trans)) {
2670		ret = PTR_ERR(trans);
2671		goto error_free_zone;
2672	}
2673
 
2674	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2675	device->generation = trans->transid;
2676	device->io_width = fs_info->sectorsize;
2677	device->io_align = fs_info->sectorsize;
2678	device->sector_size = fs_info->sectorsize;
2679	device->total_bytes =
2680		round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
2681	device->disk_total_bytes = device->total_bytes;
2682	device->commit_total_bytes = device->total_bytes;
 
 
2683	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2684	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2685	device->mode = FMODE_EXCL;
2686	device->dev_stats_valid = 1;
2687	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2688
2689	if (seeding_dev) {
2690		btrfs_clear_sb_rdonly(sb);
2691
2692		/* GFP_KERNEL allocation must not be under device_list_mutex */
2693		seed_devices = btrfs_init_sprout(fs_info);
2694		if (IS_ERR(seed_devices)) {
2695			ret = PTR_ERR(seed_devices);
2696			btrfs_abort_transaction(trans, ret);
2697			goto error_trans;
2698		}
2699	}
2700
2701	mutex_lock(&fs_devices->device_list_mutex);
2702	if (seeding_dev) {
2703		btrfs_setup_sprout(fs_info, seed_devices);
2704		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2705						device);
2706	}
2707
2708	device->fs_devices = fs_devices;
2709
 
2710	mutex_lock(&fs_info->chunk_mutex);
2711	list_add_rcu(&device->dev_list, &fs_devices->devices);
2712	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2713	fs_devices->num_devices++;
2714	fs_devices->open_devices++;
2715	fs_devices->rw_devices++;
2716	fs_devices->total_devices++;
2717	fs_devices->total_rw_bytes += device->total_bytes;
2718
2719	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2720
2721	if (!bdev_nonrot(bdev))
2722		fs_devices->rotating = true;
2723
2724	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2725	btrfs_set_super_total_bytes(fs_info->super_copy,
2726		round_down(orig_super_total_bytes + device->total_bytes,
2727			   fs_info->sectorsize));
2728
2729	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2730	btrfs_set_super_num_devices(fs_info->super_copy,
2731				    orig_super_num_devices + 1);
2732
 
 
 
2733	/*
2734	 * we've got more storage, clear any full flags on the space
2735	 * infos
2736	 */
2737	btrfs_clear_space_info_full(fs_info);
2738
2739	mutex_unlock(&fs_info->chunk_mutex);
2740
2741	/* Add sysfs device entry */
2742	btrfs_sysfs_add_device(device);
2743
2744	mutex_unlock(&fs_devices->device_list_mutex);
2745
2746	if (seeding_dev) {
2747		mutex_lock(&fs_info->chunk_mutex);
2748		ret = init_first_rw_device(trans);
2749		mutex_unlock(&fs_info->chunk_mutex);
2750		if (ret) {
2751			btrfs_abort_transaction(trans, ret);
2752			goto error_sysfs;
2753		}
2754	}
2755
2756	ret = btrfs_add_dev_item(trans, device);
2757	if (ret) {
2758		btrfs_abort_transaction(trans, ret);
2759		goto error_sysfs;
2760	}
2761
2762	if (seeding_dev) {
2763		ret = btrfs_finish_sprout(trans);
2764		if (ret) {
2765			btrfs_abort_transaction(trans, ret);
2766			goto error_sysfs;
2767		}
2768
2769		/*
2770		 * fs_devices now represents the newly sprouted filesystem and
2771		 * its fsid has been changed by btrfs_sprout_splice().
2772		 */
2773		btrfs_sysfs_update_sprout_fsid(fs_devices);
2774	}
2775
2776	ret = btrfs_commit_transaction(trans);
2777
2778	if (seeding_dev) {
2779		mutex_unlock(&uuid_mutex);
2780		up_write(&sb->s_umount);
2781		locked = false;
2782
2783		if (ret) /* transaction commit */
2784			return ret;
2785
2786		ret = btrfs_relocate_sys_chunks(fs_info);
2787		if (ret < 0)
2788			btrfs_handle_fs_error(fs_info, ret,
2789				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2790		trans = btrfs_attach_transaction(root);
2791		if (IS_ERR(trans)) {
2792			if (PTR_ERR(trans) == -ENOENT)
2793				return 0;
2794			ret = PTR_ERR(trans);
2795			trans = NULL;
2796			goto error_sysfs;
2797		}
2798		ret = btrfs_commit_transaction(trans);
2799	}
2800
2801	/*
2802	 * Now that we have written a new super block to this device, check all
2803	 * other fs_devices list if device_path alienates any other scanned
2804	 * device.
2805	 * We can ignore the return value as it typically returns -EINVAL and
2806	 * only succeeds if the device was an alien.
2807	 */
2808	btrfs_forget_devices(device->devt);
2809
2810	/* Update ctime/mtime for blkid or udev */
2811	update_dev_time(device_path);
2812
2813	return ret;
2814
2815error_sysfs:
2816	btrfs_sysfs_remove_device(device);
2817	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2818	mutex_lock(&fs_info->chunk_mutex);
2819	list_del_rcu(&device->dev_list);
2820	list_del(&device->dev_alloc_list);
2821	fs_info->fs_devices->num_devices--;
2822	fs_info->fs_devices->open_devices--;
2823	fs_info->fs_devices->rw_devices--;
2824	fs_info->fs_devices->total_devices--;
2825	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2826	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2827	btrfs_set_super_total_bytes(fs_info->super_copy,
2828				    orig_super_total_bytes);
2829	btrfs_set_super_num_devices(fs_info->super_copy,
2830				    orig_super_num_devices);
2831	mutex_unlock(&fs_info->chunk_mutex);
2832	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2833error_trans:
2834	if (seeding_dev)
2835		btrfs_set_sb_rdonly(sb);
2836	if (trans)
2837		btrfs_end_transaction(trans);
2838error_free_zone:
2839	btrfs_destroy_dev_zone_info(device);
2840error_free_device:
2841	btrfs_free_device(device);
2842error:
2843	blkdev_put(bdev, FMODE_EXCL);
2844	if (locked) {
2845		mutex_unlock(&uuid_mutex);
2846		up_write(&sb->s_umount);
2847	}
2848	return ret;
2849}
2850
2851static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2852					struct btrfs_device *device)
2853{
2854	int ret;
2855	struct btrfs_path *path;
2856	struct btrfs_root *root = device->fs_info->chunk_root;
2857	struct btrfs_dev_item *dev_item;
2858	struct extent_buffer *leaf;
2859	struct btrfs_key key;
2860
2861	path = btrfs_alloc_path();
2862	if (!path)
2863		return -ENOMEM;
2864
2865	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2866	key.type = BTRFS_DEV_ITEM_KEY;
2867	key.offset = device->devid;
2868
2869	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2870	if (ret < 0)
2871		goto out;
2872
2873	if (ret > 0) {
2874		ret = -ENOENT;
2875		goto out;
2876	}
2877
2878	leaf = path->nodes[0];
2879	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2880
2881	btrfs_set_device_id(leaf, dev_item, device->devid);
2882	btrfs_set_device_type(leaf, dev_item, device->type);
2883	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2884	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2885	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2886	btrfs_set_device_total_bytes(leaf, dev_item,
2887				     btrfs_device_get_disk_total_bytes(device));
2888	btrfs_set_device_bytes_used(leaf, dev_item,
2889				    btrfs_device_get_bytes_used(device));
2890	btrfs_mark_buffer_dirty(leaf);
2891
2892out:
2893	btrfs_free_path(path);
2894	return ret;
2895}
2896
2897int btrfs_grow_device(struct btrfs_trans_handle *trans,
2898		      struct btrfs_device *device, u64 new_size)
2899{
2900	struct btrfs_fs_info *fs_info = device->fs_info;
2901	struct btrfs_super_block *super_copy = fs_info->super_copy;
2902	u64 old_total;
2903	u64 diff;
2904	int ret;
2905
2906	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2907		return -EACCES;
2908
2909	new_size = round_down(new_size, fs_info->sectorsize);
2910
2911	mutex_lock(&fs_info->chunk_mutex);
2912	old_total = btrfs_super_total_bytes(super_copy);
2913	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2914
2915	if (new_size <= device->total_bytes ||
2916	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2917		mutex_unlock(&fs_info->chunk_mutex);
2918		return -EINVAL;
2919	}
2920
2921	btrfs_set_super_total_bytes(super_copy,
2922			round_down(old_total + diff, fs_info->sectorsize));
2923	device->fs_devices->total_rw_bytes += diff;
2924
2925	btrfs_device_set_total_bytes(device, new_size);
2926	btrfs_device_set_disk_total_bytes(device, new_size);
2927	btrfs_clear_space_info_full(device->fs_info);
2928	if (list_empty(&device->post_commit_list))
2929		list_add_tail(&device->post_commit_list,
2930			      &trans->transaction->dev_update_list);
2931	mutex_unlock(&fs_info->chunk_mutex);
2932
2933	btrfs_reserve_chunk_metadata(trans, false);
2934	ret = btrfs_update_device(trans, device);
2935	btrfs_trans_release_chunk_metadata(trans);
2936
2937	return ret;
2938}
2939
2940static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2941{
2942	struct btrfs_fs_info *fs_info = trans->fs_info;
2943	struct btrfs_root *root = fs_info->chunk_root;
2944	int ret;
2945	struct btrfs_path *path;
2946	struct btrfs_key key;
2947
2948	path = btrfs_alloc_path();
2949	if (!path)
2950		return -ENOMEM;
2951
2952	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2953	key.offset = chunk_offset;
2954	key.type = BTRFS_CHUNK_ITEM_KEY;
2955
2956	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2957	if (ret < 0)
2958		goto out;
2959	else if (ret > 0) { /* Logic error or corruption */
2960		btrfs_handle_fs_error(fs_info, -ENOENT,
2961				      "Failed lookup while freeing chunk.");
2962		ret = -ENOENT;
2963		goto out;
2964	}
2965
2966	ret = btrfs_del_item(trans, root, path);
2967	if (ret < 0)
2968		btrfs_handle_fs_error(fs_info, ret,
2969				      "Failed to delete chunk item.");
2970out:
2971	btrfs_free_path(path);
2972	return ret;
2973}
2974
2975static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2976{
2977	struct btrfs_super_block *super_copy = fs_info->super_copy;
2978	struct btrfs_disk_key *disk_key;
2979	struct btrfs_chunk *chunk;
2980	u8 *ptr;
2981	int ret = 0;
2982	u32 num_stripes;
2983	u32 array_size;
2984	u32 len = 0;
2985	u32 cur;
2986	struct btrfs_key key;
2987
2988	lockdep_assert_held(&fs_info->chunk_mutex);
2989	array_size = btrfs_super_sys_array_size(super_copy);
2990
2991	ptr = super_copy->sys_chunk_array;
2992	cur = 0;
2993
2994	while (cur < array_size) {
2995		disk_key = (struct btrfs_disk_key *)ptr;
2996		btrfs_disk_key_to_cpu(&key, disk_key);
2997
2998		len = sizeof(*disk_key);
2999
3000		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3001			chunk = (struct btrfs_chunk *)(ptr + len);
3002			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3003			len += btrfs_chunk_item_size(num_stripes);
3004		} else {
3005			ret = -EIO;
3006			break;
3007		}
3008		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3009		    key.offset == chunk_offset) {
3010			memmove(ptr, ptr + len, array_size - (cur + len));
3011			array_size -= len;
3012			btrfs_set_super_sys_array_size(super_copy, array_size);
3013		} else {
3014			ptr += len;
3015			cur += len;
3016		}
3017	}
 
3018	return ret;
3019}
3020
3021/*
3022 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3023 * @logical: Logical block offset in bytes.
3024 * @length: Length of extent in bytes.
3025 *
3026 * Return: Chunk mapping or ERR_PTR.
3027 */
3028struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3029				       u64 logical, u64 length)
3030{
3031	struct extent_map_tree *em_tree;
3032	struct extent_map *em;
3033
3034	em_tree = &fs_info->mapping_tree;
3035	read_lock(&em_tree->lock);
3036	em = lookup_extent_mapping(em_tree, logical, length);
3037	read_unlock(&em_tree->lock);
3038
3039	if (!em) {
3040		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
3041			   logical, length);
3042		return ERR_PTR(-EINVAL);
3043	}
3044
3045	if (em->start > logical || em->start + em->len < logical) {
3046		btrfs_crit(fs_info,
3047			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3048			   logical, length, em->start, em->start + em->len);
3049		free_extent_map(em);
3050		return ERR_PTR(-EINVAL);
3051	}
3052
3053	/* callers are responsible for dropping em's ref. */
3054	return em;
3055}
3056
3057static int remove_chunk_item(struct btrfs_trans_handle *trans,
3058			     struct map_lookup *map, u64 chunk_offset)
3059{
3060	int i;
3061
3062	/*
3063	 * Removing chunk items and updating the device items in the chunks btree
3064	 * requires holding the chunk_mutex.
3065	 * See the comment at btrfs_chunk_alloc() for the details.
3066	 */
3067	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3068
3069	for (i = 0; i < map->num_stripes; i++) {
3070		int ret;
3071
3072		ret = btrfs_update_device(trans, map->stripes[i].dev);
3073		if (ret)
3074			return ret;
3075	}
3076
3077	return btrfs_free_chunk(trans, chunk_offset);
3078}
3079
3080int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3081{
3082	struct btrfs_fs_info *fs_info = trans->fs_info;
3083	struct extent_map *em;
3084	struct map_lookup *map;
3085	u64 dev_extent_len = 0;
3086	int i, ret = 0;
3087	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3088
3089	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3090	if (IS_ERR(em)) {
3091		/*
3092		 * This is a logic error, but we don't want to just rely on the
3093		 * user having built with ASSERT enabled, so if ASSERT doesn't
3094		 * do anything we still error out.
3095		 */
3096		ASSERT(0);
3097		return PTR_ERR(em);
3098	}
3099	map = em->map_lookup;
 
 
 
3100
3101	/*
3102	 * First delete the device extent items from the devices btree.
3103	 * We take the device_list_mutex to avoid racing with the finishing phase
3104	 * of a device replace operation. See the comment below before acquiring
3105	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3106	 * because that can result in a deadlock when deleting the device extent
3107	 * items from the devices btree - COWing an extent buffer from the btree
3108	 * may result in allocating a new metadata chunk, which would attempt to
3109	 * lock again fs_info->chunk_mutex.
3110	 */
3111	mutex_lock(&fs_devices->device_list_mutex);
3112	for (i = 0; i < map->num_stripes; i++) {
3113		struct btrfs_device *device = map->stripes[i].dev;
3114		ret = btrfs_free_dev_extent(trans, device,
3115					    map->stripes[i].physical,
3116					    &dev_extent_len);
3117		if (ret) {
3118			mutex_unlock(&fs_devices->device_list_mutex);
3119			btrfs_abort_transaction(trans, ret);
3120			goto out;
3121		}
3122
3123		if (device->bytes_used > 0) {
3124			mutex_lock(&fs_info->chunk_mutex);
3125			btrfs_device_set_bytes_used(device,
3126					device->bytes_used - dev_extent_len);
3127			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3128			btrfs_clear_space_info_full(fs_info);
3129			mutex_unlock(&fs_info->chunk_mutex);
3130		}
3131	}
3132	mutex_unlock(&fs_devices->device_list_mutex);
3133
3134	/*
3135	 * We acquire fs_info->chunk_mutex for 2 reasons:
3136	 *
3137	 * 1) Just like with the first phase of the chunk allocation, we must
3138	 *    reserve system space, do all chunk btree updates and deletions, and
3139	 *    update the system chunk array in the superblock while holding this
3140	 *    mutex. This is for similar reasons as explained on the comment at
3141	 *    the top of btrfs_chunk_alloc();
3142	 *
3143	 * 2) Prevent races with the final phase of a device replace operation
3144	 *    that replaces the device object associated with the map's stripes,
3145	 *    because the device object's id can change at any time during that
3146	 *    final phase of the device replace operation
3147	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3148	 *    replaced device and then see it with an ID of
3149	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3150	 *    the device item, which does not exists on the chunk btree.
3151	 *    The finishing phase of device replace acquires both the
3152	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3153	 *    safe by just acquiring the chunk_mutex.
3154	 */
3155	trans->removing_chunk = true;
3156	mutex_lock(&fs_info->chunk_mutex);
3157
3158	check_system_chunk(trans, map->type);
3159
3160	ret = remove_chunk_item(trans, map, chunk_offset);
3161	/*
3162	 * Normally we should not get -ENOSPC since we reserved space before
3163	 * through the call to check_system_chunk().
3164	 *
3165	 * Despite our system space_info having enough free space, we may not
3166	 * be able to allocate extents from its block groups, because all have
3167	 * an incompatible profile, which will force us to allocate a new system
3168	 * block group with the right profile, or right after we called
3169	 * check_system_space() above, a scrub turned the only system block group
3170	 * with enough free space into RO mode.
3171	 * This is explained with more detail at do_chunk_alloc().
3172	 *
3173	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3174	 */
3175	if (ret == -ENOSPC) {
3176		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3177		struct btrfs_block_group *sys_bg;
3178
3179		sys_bg = btrfs_create_chunk(trans, sys_flags);
3180		if (IS_ERR(sys_bg)) {
3181			ret = PTR_ERR(sys_bg);
3182			btrfs_abort_transaction(trans, ret);
3183			goto out;
3184		}
3185
3186		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3187		if (ret) {
 
3188			btrfs_abort_transaction(trans, ret);
3189			goto out;
3190		}
 
 
3191
3192		ret = remove_chunk_item(trans, map, chunk_offset);
3193		if (ret) {
3194			btrfs_abort_transaction(trans, ret);
3195			goto out;
3196		}
3197	} else if (ret) {
3198		btrfs_abort_transaction(trans, ret);
3199		goto out;
3200	}
3201
3202	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3203
3204	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3205		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3206		if (ret) {
3207			btrfs_abort_transaction(trans, ret);
3208			goto out;
3209		}
3210	}
3211
3212	mutex_unlock(&fs_info->chunk_mutex);
3213	trans->removing_chunk = false;
3214
3215	/*
3216	 * We are done with chunk btree updates and deletions, so release the
3217	 * system space we previously reserved (with check_system_chunk()).
3218	 */
3219	btrfs_trans_release_chunk_metadata(trans);
3220
3221	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3222	if (ret) {
3223		btrfs_abort_transaction(trans, ret);
3224		goto out;
3225	}
3226
3227out:
3228	if (trans->removing_chunk) {
3229		mutex_unlock(&fs_info->chunk_mutex);
3230		trans->removing_chunk = false;
3231	}
3232	/* once for us */
3233	free_extent_map(em);
3234	return ret;
3235}
3236
3237int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3238{
3239	struct btrfs_root *root = fs_info->chunk_root;
3240	struct btrfs_trans_handle *trans;
3241	struct btrfs_block_group *block_group;
3242	u64 length;
3243	int ret;
3244
3245	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3246		btrfs_err(fs_info,
3247			  "relocate: not supported on extent tree v2 yet");
3248		return -EINVAL;
3249	}
3250
3251	/*
3252	 * Prevent races with automatic removal of unused block groups.
3253	 * After we relocate and before we remove the chunk with offset
3254	 * chunk_offset, automatic removal of the block group can kick in,
3255	 * resulting in a failure when calling btrfs_remove_chunk() below.
3256	 *
3257	 * Make sure to acquire this mutex before doing a tree search (dev
3258	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3259	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3260	 * we release the path used to search the chunk/dev tree and before
3261	 * the current task acquires this mutex and calls us.
3262	 */
3263	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3264
3265	/* step one, relocate all the extents inside this chunk */
3266	btrfs_scrub_pause(fs_info);
3267	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3268	btrfs_scrub_continue(fs_info);
3269	if (ret)
3270		return ret;
3271
3272	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3273	if (!block_group)
3274		return -ENOENT;
3275	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3276	length = block_group->length;
3277	btrfs_put_block_group(block_group);
3278
3279	/*
3280	 * On a zoned file system, discard the whole block group, this will
3281	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3282	 * resetting the zone fails, don't treat it as a fatal problem from the
3283	 * filesystem's point of view.
3284	 */
3285	if (btrfs_is_zoned(fs_info)) {
3286		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3287		if (ret)
3288			btrfs_info(fs_info,
3289				"failed to reset zone %llu after relocation",
3290				chunk_offset);
3291	}
3292
3293	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3294						     chunk_offset);
3295	if (IS_ERR(trans)) {
3296		ret = PTR_ERR(trans);
3297		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3298		return ret;
3299	}
3300
3301	/*
3302	 * step two, delete the device extents and the
3303	 * chunk tree entries
3304	 */
3305	ret = btrfs_remove_chunk(trans, chunk_offset);
3306	btrfs_end_transaction(trans);
3307	return ret;
3308}
3309
3310static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3311{
3312	struct btrfs_root *chunk_root = fs_info->chunk_root;
3313	struct btrfs_path *path;
3314	struct extent_buffer *leaf;
3315	struct btrfs_chunk *chunk;
3316	struct btrfs_key key;
3317	struct btrfs_key found_key;
3318	u64 chunk_type;
3319	bool retried = false;
3320	int failed = 0;
3321	int ret;
3322
3323	path = btrfs_alloc_path();
3324	if (!path)
3325		return -ENOMEM;
3326
3327again:
3328	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3329	key.offset = (u64)-1;
3330	key.type = BTRFS_CHUNK_ITEM_KEY;
3331
3332	while (1) {
3333		mutex_lock(&fs_info->reclaim_bgs_lock);
3334		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3335		if (ret < 0) {
3336			mutex_unlock(&fs_info->reclaim_bgs_lock);
3337			goto error;
3338		}
3339		BUG_ON(ret == 0); /* Corruption */
3340
3341		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3342					  key.type);
3343		if (ret)
3344			mutex_unlock(&fs_info->reclaim_bgs_lock);
3345		if (ret < 0)
3346			goto error;
3347		if (ret > 0)
3348			break;
3349
3350		leaf = path->nodes[0];
3351		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3352
3353		chunk = btrfs_item_ptr(leaf, path->slots[0],
3354				       struct btrfs_chunk);
3355		chunk_type = btrfs_chunk_type(leaf, chunk);
3356		btrfs_release_path(path);
3357
3358		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3359			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3360			if (ret == -ENOSPC)
3361				failed++;
3362			else
3363				BUG_ON(ret);
3364		}
3365		mutex_unlock(&fs_info->reclaim_bgs_lock);
3366
3367		if (found_key.offset == 0)
3368			break;
3369		key.offset = found_key.offset - 1;
3370	}
3371	ret = 0;
3372	if (failed && !retried) {
3373		failed = 0;
3374		retried = true;
3375		goto again;
3376	} else if (WARN_ON(failed && retried)) {
3377		ret = -ENOSPC;
3378	}
3379error:
3380	btrfs_free_path(path);
3381	return ret;
3382}
3383
3384/*
3385 * return 1 : allocate a data chunk successfully,
3386 * return <0: errors during allocating a data chunk,
3387 * return 0 : no need to allocate a data chunk.
3388 */
3389static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3390				      u64 chunk_offset)
3391{
3392	struct btrfs_block_group *cache;
3393	u64 bytes_used;
3394	u64 chunk_type;
3395
3396	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3397	ASSERT(cache);
3398	chunk_type = cache->flags;
3399	btrfs_put_block_group(cache);
3400
3401	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3402		return 0;
3403
3404	spin_lock(&fs_info->data_sinfo->lock);
3405	bytes_used = fs_info->data_sinfo->bytes_used;
3406	spin_unlock(&fs_info->data_sinfo->lock);
3407
3408	if (!bytes_used) {
3409		struct btrfs_trans_handle *trans;
3410		int ret;
3411
3412		trans =	btrfs_join_transaction(fs_info->tree_root);
3413		if (IS_ERR(trans))
3414			return PTR_ERR(trans);
3415
3416		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3417		btrfs_end_transaction(trans);
3418		if (ret < 0)
3419			return ret;
3420		return 1;
3421	}
3422
3423	return 0;
3424}
3425
3426static int insert_balance_item(struct btrfs_fs_info *fs_info,
3427			       struct btrfs_balance_control *bctl)
3428{
3429	struct btrfs_root *root = fs_info->tree_root;
3430	struct btrfs_trans_handle *trans;
3431	struct btrfs_balance_item *item;
3432	struct btrfs_disk_balance_args disk_bargs;
3433	struct btrfs_path *path;
3434	struct extent_buffer *leaf;
3435	struct btrfs_key key;
3436	int ret, err;
3437
3438	path = btrfs_alloc_path();
3439	if (!path)
3440		return -ENOMEM;
3441
3442	trans = btrfs_start_transaction(root, 0);
3443	if (IS_ERR(trans)) {
3444		btrfs_free_path(path);
3445		return PTR_ERR(trans);
3446	}
3447
3448	key.objectid = BTRFS_BALANCE_OBJECTID;
3449	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3450	key.offset = 0;
3451
3452	ret = btrfs_insert_empty_item(trans, root, path, &key,
3453				      sizeof(*item));
3454	if (ret)
3455		goto out;
3456
3457	leaf = path->nodes[0];
3458	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3459
3460	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3461
3462	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3463	btrfs_set_balance_data(leaf, item, &disk_bargs);
3464	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3465	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3466	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3467	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3468
3469	btrfs_set_balance_flags(leaf, item, bctl->flags);
3470
3471	btrfs_mark_buffer_dirty(leaf);
3472out:
3473	btrfs_free_path(path);
3474	err = btrfs_commit_transaction(trans);
3475	if (err && !ret)
3476		ret = err;
3477	return ret;
3478}
3479
3480static int del_balance_item(struct btrfs_fs_info *fs_info)
3481{
3482	struct btrfs_root *root = fs_info->tree_root;
3483	struct btrfs_trans_handle *trans;
3484	struct btrfs_path *path;
3485	struct btrfs_key key;
3486	int ret, err;
3487
3488	path = btrfs_alloc_path();
3489	if (!path)
3490		return -ENOMEM;
3491
3492	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3493	if (IS_ERR(trans)) {
3494		btrfs_free_path(path);
3495		return PTR_ERR(trans);
3496	}
3497
3498	key.objectid = BTRFS_BALANCE_OBJECTID;
3499	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3500	key.offset = 0;
3501
3502	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3503	if (ret < 0)
3504		goto out;
3505	if (ret > 0) {
3506		ret = -ENOENT;
3507		goto out;
3508	}
3509
3510	ret = btrfs_del_item(trans, root, path);
3511out:
3512	btrfs_free_path(path);
3513	err = btrfs_commit_transaction(trans);
3514	if (err && !ret)
3515		ret = err;
3516	return ret;
3517}
3518
3519/*
3520 * This is a heuristic used to reduce the number of chunks balanced on
3521 * resume after balance was interrupted.
3522 */
3523static void update_balance_args(struct btrfs_balance_control *bctl)
3524{
3525	/*
3526	 * Turn on soft mode for chunk types that were being converted.
3527	 */
3528	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3529		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3530	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3531		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3532	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3533		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3534
3535	/*
3536	 * Turn on usage filter if is not already used.  The idea is
3537	 * that chunks that we have already balanced should be
3538	 * reasonably full.  Don't do it for chunks that are being
3539	 * converted - that will keep us from relocating unconverted
3540	 * (albeit full) chunks.
3541	 */
3542	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3543	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3544	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3545		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3546		bctl->data.usage = 90;
3547	}
3548	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3549	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3550	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3551		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3552		bctl->sys.usage = 90;
3553	}
3554	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3555	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3556	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3557		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3558		bctl->meta.usage = 90;
3559	}
3560}
3561
3562/*
3563 * Clear the balance status in fs_info and delete the balance item from disk.
3564 */
3565static void reset_balance_state(struct btrfs_fs_info *fs_info)
3566{
3567	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3568	int ret;
3569
3570	BUG_ON(!fs_info->balance_ctl);
3571
3572	spin_lock(&fs_info->balance_lock);
3573	fs_info->balance_ctl = NULL;
3574	spin_unlock(&fs_info->balance_lock);
3575
3576	kfree(bctl);
3577	ret = del_balance_item(fs_info);
3578	if (ret)
3579		btrfs_handle_fs_error(fs_info, ret, NULL);
3580}
3581
3582/*
3583 * Balance filters.  Return 1 if chunk should be filtered out
3584 * (should not be balanced).
3585 */
3586static int chunk_profiles_filter(u64 chunk_type,
3587				 struct btrfs_balance_args *bargs)
3588{
3589	chunk_type = chunk_to_extended(chunk_type) &
3590				BTRFS_EXTENDED_PROFILE_MASK;
3591
3592	if (bargs->profiles & chunk_type)
3593		return 0;
3594
3595	return 1;
3596}
3597
3598static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3599			      struct btrfs_balance_args *bargs)
3600{
3601	struct btrfs_block_group *cache;
3602	u64 chunk_used;
3603	u64 user_thresh_min;
3604	u64 user_thresh_max;
3605	int ret = 1;
3606
3607	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3608	chunk_used = cache->used;
3609
3610	if (bargs->usage_min == 0)
3611		user_thresh_min = 0;
3612	else
3613		user_thresh_min = mult_perc(cache->length, bargs->usage_min);
 
3614
3615	if (bargs->usage_max == 0)
3616		user_thresh_max = 1;
3617	else if (bargs->usage_max > 100)
3618		user_thresh_max = cache->length;
3619	else
3620		user_thresh_max = mult_perc(cache->length, bargs->usage_max);
 
3621
3622	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3623		ret = 0;
3624
3625	btrfs_put_block_group(cache);
3626	return ret;
3627}
3628
3629static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3630		u64 chunk_offset, struct btrfs_balance_args *bargs)
3631{
3632	struct btrfs_block_group *cache;
3633	u64 chunk_used, user_thresh;
3634	int ret = 1;
3635
3636	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3637	chunk_used = cache->used;
3638
3639	if (bargs->usage_min == 0)
3640		user_thresh = 1;
3641	else if (bargs->usage > 100)
3642		user_thresh = cache->length;
3643	else
3644		user_thresh = mult_perc(cache->length, bargs->usage);
3645
3646	if (chunk_used < user_thresh)
3647		ret = 0;
3648
3649	btrfs_put_block_group(cache);
3650	return ret;
3651}
3652
3653static int chunk_devid_filter(struct extent_buffer *leaf,
3654			      struct btrfs_chunk *chunk,
3655			      struct btrfs_balance_args *bargs)
3656{
3657	struct btrfs_stripe *stripe;
3658	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3659	int i;
3660
3661	for (i = 0; i < num_stripes; i++) {
3662		stripe = btrfs_stripe_nr(chunk, i);
3663		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3664			return 0;
3665	}
3666
3667	return 1;
3668}
3669
3670static u64 calc_data_stripes(u64 type, int num_stripes)
3671{
3672	const int index = btrfs_bg_flags_to_raid_index(type);
3673	const int ncopies = btrfs_raid_array[index].ncopies;
3674	const int nparity = btrfs_raid_array[index].nparity;
3675
3676	return (num_stripes - nparity) / ncopies;
 
 
 
3677}
3678
3679/* [pstart, pend) */
3680static int chunk_drange_filter(struct extent_buffer *leaf,
3681			       struct btrfs_chunk *chunk,
3682			       struct btrfs_balance_args *bargs)
3683{
3684	struct btrfs_stripe *stripe;
3685	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3686	u64 stripe_offset;
3687	u64 stripe_length;
3688	u64 type;
3689	int factor;
3690	int i;
3691
3692	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3693		return 0;
3694
3695	type = btrfs_chunk_type(leaf, chunk);
3696	factor = calc_data_stripes(type, num_stripes);
3697
3698	for (i = 0; i < num_stripes; i++) {
3699		stripe = btrfs_stripe_nr(chunk, i);
3700		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3701			continue;
3702
3703		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3704		stripe_length = btrfs_chunk_length(leaf, chunk);
3705		stripe_length = div_u64(stripe_length, factor);
3706
3707		if (stripe_offset < bargs->pend &&
3708		    stripe_offset + stripe_length > bargs->pstart)
3709			return 0;
3710	}
3711
3712	return 1;
3713}
3714
3715/* [vstart, vend) */
3716static int chunk_vrange_filter(struct extent_buffer *leaf,
3717			       struct btrfs_chunk *chunk,
3718			       u64 chunk_offset,
3719			       struct btrfs_balance_args *bargs)
3720{
3721	if (chunk_offset < bargs->vend &&
3722	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3723		/* at least part of the chunk is inside this vrange */
3724		return 0;
3725
3726	return 1;
3727}
3728
3729static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3730			       struct btrfs_chunk *chunk,
3731			       struct btrfs_balance_args *bargs)
3732{
3733	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3734
3735	if (bargs->stripes_min <= num_stripes
3736			&& num_stripes <= bargs->stripes_max)
3737		return 0;
3738
3739	return 1;
3740}
3741
3742static int chunk_soft_convert_filter(u64 chunk_type,
3743				     struct btrfs_balance_args *bargs)
3744{
3745	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3746		return 0;
3747
3748	chunk_type = chunk_to_extended(chunk_type) &
3749				BTRFS_EXTENDED_PROFILE_MASK;
3750
3751	if (bargs->target == chunk_type)
3752		return 1;
3753
3754	return 0;
3755}
3756
3757static int should_balance_chunk(struct extent_buffer *leaf,
3758				struct btrfs_chunk *chunk, u64 chunk_offset)
3759{
3760	struct btrfs_fs_info *fs_info = leaf->fs_info;
3761	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3762	struct btrfs_balance_args *bargs = NULL;
3763	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3764
3765	/* type filter */
3766	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3767	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3768		return 0;
3769	}
3770
3771	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3772		bargs = &bctl->data;
3773	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3774		bargs = &bctl->sys;
3775	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3776		bargs = &bctl->meta;
3777
3778	/* profiles filter */
3779	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3780	    chunk_profiles_filter(chunk_type, bargs)) {
3781		return 0;
3782	}
3783
3784	/* usage filter */
3785	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3786	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3787		return 0;
3788	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3789	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3790		return 0;
3791	}
3792
3793	/* devid filter */
3794	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3795	    chunk_devid_filter(leaf, chunk, bargs)) {
3796		return 0;
3797	}
3798
3799	/* drange filter, makes sense only with devid filter */
3800	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3801	    chunk_drange_filter(leaf, chunk, bargs)) {
3802		return 0;
3803	}
3804
3805	/* vrange filter */
3806	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3807	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3808		return 0;
3809	}
3810
3811	/* stripes filter */
3812	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3813	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3814		return 0;
3815	}
3816
3817	/* soft profile changing mode */
3818	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3819	    chunk_soft_convert_filter(chunk_type, bargs)) {
3820		return 0;
3821	}
3822
3823	/*
3824	 * limited by count, must be the last filter
3825	 */
3826	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3827		if (bargs->limit == 0)
3828			return 0;
3829		else
3830			bargs->limit--;
3831	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3832		/*
3833		 * Same logic as the 'limit' filter; the minimum cannot be
3834		 * determined here because we do not have the global information
3835		 * about the count of all chunks that satisfy the filters.
3836		 */
3837		if (bargs->limit_max == 0)
3838			return 0;
3839		else
3840			bargs->limit_max--;
3841	}
3842
3843	return 1;
3844}
3845
3846static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3847{
3848	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3849	struct btrfs_root *chunk_root = fs_info->chunk_root;
3850	u64 chunk_type;
3851	struct btrfs_chunk *chunk;
3852	struct btrfs_path *path = NULL;
3853	struct btrfs_key key;
3854	struct btrfs_key found_key;
3855	struct extent_buffer *leaf;
3856	int slot;
3857	int ret;
3858	int enospc_errors = 0;
3859	bool counting = true;
3860	/* The single value limit and min/max limits use the same bytes in the */
3861	u64 limit_data = bctl->data.limit;
3862	u64 limit_meta = bctl->meta.limit;
3863	u64 limit_sys = bctl->sys.limit;
3864	u32 count_data = 0;
3865	u32 count_meta = 0;
3866	u32 count_sys = 0;
3867	int chunk_reserved = 0;
3868
3869	path = btrfs_alloc_path();
3870	if (!path) {
3871		ret = -ENOMEM;
3872		goto error;
3873	}
3874
3875	/* zero out stat counters */
3876	spin_lock(&fs_info->balance_lock);
3877	memset(&bctl->stat, 0, sizeof(bctl->stat));
3878	spin_unlock(&fs_info->balance_lock);
3879again:
3880	if (!counting) {
3881		/*
3882		 * The single value limit and min/max limits use the same bytes
3883		 * in the
3884		 */
3885		bctl->data.limit = limit_data;
3886		bctl->meta.limit = limit_meta;
3887		bctl->sys.limit = limit_sys;
3888	}
3889	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3890	key.offset = (u64)-1;
3891	key.type = BTRFS_CHUNK_ITEM_KEY;
3892
3893	while (1) {
3894		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3895		    atomic_read(&fs_info->balance_cancel_req)) {
3896			ret = -ECANCELED;
3897			goto error;
3898		}
3899
3900		mutex_lock(&fs_info->reclaim_bgs_lock);
3901		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3902		if (ret < 0) {
3903			mutex_unlock(&fs_info->reclaim_bgs_lock);
3904			goto error;
3905		}
3906
3907		/*
3908		 * this shouldn't happen, it means the last relocate
3909		 * failed
3910		 */
3911		if (ret == 0)
3912			BUG(); /* FIXME break ? */
3913
3914		ret = btrfs_previous_item(chunk_root, path, 0,
3915					  BTRFS_CHUNK_ITEM_KEY);
3916		if (ret) {
3917			mutex_unlock(&fs_info->reclaim_bgs_lock);
3918			ret = 0;
3919			break;
3920		}
3921
3922		leaf = path->nodes[0];
3923		slot = path->slots[0];
3924		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3925
3926		if (found_key.objectid != key.objectid) {
3927			mutex_unlock(&fs_info->reclaim_bgs_lock);
3928			break;
3929		}
3930
3931		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3932		chunk_type = btrfs_chunk_type(leaf, chunk);
3933
3934		if (!counting) {
3935			spin_lock(&fs_info->balance_lock);
3936			bctl->stat.considered++;
3937			spin_unlock(&fs_info->balance_lock);
3938		}
3939
3940		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3941
3942		btrfs_release_path(path);
3943		if (!ret) {
3944			mutex_unlock(&fs_info->reclaim_bgs_lock);
3945			goto loop;
3946		}
3947
3948		if (counting) {
3949			mutex_unlock(&fs_info->reclaim_bgs_lock);
3950			spin_lock(&fs_info->balance_lock);
3951			bctl->stat.expected++;
3952			spin_unlock(&fs_info->balance_lock);
3953
3954			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3955				count_data++;
3956			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3957				count_sys++;
3958			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3959				count_meta++;
3960
3961			goto loop;
3962		}
3963
3964		/*
3965		 * Apply limit_min filter, no need to check if the LIMITS
3966		 * filter is used, limit_min is 0 by default
3967		 */
3968		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3969					count_data < bctl->data.limit_min)
3970				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3971					count_meta < bctl->meta.limit_min)
3972				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3973					count_sys < bctl->sys.limit_min)) {
3974			mutex_unlock(&fs_info->reclaim_bgs_lock);
3975			goto loop;
3976		}
3977
3978		if (!chunk_reserved) {
3979			/*
3980			 * We may be relocating the only data chunk we have,
3981			 * which could potentially end up with losing data's
3982			 * raid profile, so lets allocate an empty one in
3983			 * advance.
3984			 */
3985			ret = btrfs_may_alloc_data_chunk(fs_info,
3986							 found_key.offset);
3987			if (ret < 0) {
3988				mutex_unlock(&fs_info->reclaim_bgs_lock);
3989				goto error;
3990			} else if (ret == 1) {
3991				chunk_reserved = 1;
3992			}
3993		}
3994
3995		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3996		mutex_unlock(&fs_info->reclaim_bgs_lock);
3997		if (ret == -ENOSPC) {
3998			enospc_errors++;
3999		} else if (ret == -ETXTBSY) {
4000			btrfs_info(fs_info,
4001	   "skipping relocation of block group %llu due to active swapfile",
4002				   found_key.offset);
4003			ret = 0;
4004		} else if (ret) {
4005			goto error;
4006		} else {
4007			spin_lock(&fs_info->balance_lock);
4008			bctl->stat.completed++;
4009			spin_unlock(&fs_info->balance_lock);
4010		}
4011loop:
4012		if (found_key.offset == 0)
4013			break;
4014		key.offset = found_key.offset - 1;
4015	}
4016
4017	if (counting) {
4018		btrfs_release_path(path);
4019		counting = false;
4020		goto again;
4021	}
4022error:
4023	btrfs_free_path(path);
4024	if (enospc_errors) {
4025		btrfs_info(fs_info, "%d enospc errors during balance",
4026			   enospc_errors);
4027		if (!ret)
4028			ret = -ENOSPC;
4029	}
4030
4031	return ret;
4032}
4033
4034/*
4035 * See if a given profile is valid and reduced.
4036 *
4037 * @flags:     profile to validate
4038 * @extended:  if true @flags is treated as an extended profile
4039 */
4040static int alloc_profile_is_valid(u64 flags, int extended)
4041{
4042	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4043			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
4044
4045	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4046
4047	/* 1) check that all other bits are zeroed */
4048	if (flags & ~mask)
4049		return 0;
4050
4051	/* 2) see if profile is reduced */
4052	if (flags == 0)
4053		return !extended; /* "0" is valid for usual profiles */
4054
4055	return has_single_bit_set(flags);
4056}
4057
4058static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4059{
4060	/* cancel requested || normal exit path */
4061	return atomic_read(&fs_info->balance_cancel_req) ||
4062		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4063		 atomic_read(&fs_info->balance_cancel_req) == 0);
4064}
4065
4066/*
4067 * Validate target profile against allowed profiles and return true if it's OK.
4068 * Otherwise print the error message and return false.
4069 */
4070static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4071		const struct btrfs_balance_args *bargs,
4072		u64 allowed, const char *type)
4073{
4074	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4075		return true;
4076
4077	/* Profile is valid and does not have bits outside of the allowed set */
4078	if (alloc_profile_is_valid(bargs->target, 1) &&
4079	    (bargs->target & ~allowed) == 0)
4080		return true;
4081
4082	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4083			type, btrfs_bg_type_to_raid_name(bargs->target));
4084	return false;
4085}
4086
4087/*
4088 * Fill @buf with textual description of balance filter flags @bargs, up to
4089 * @size_buf including the terminating null. The output may be trimmed if it
4090 * does not fit into the provided buffer.
4091 */
4092static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4093				 u32 size_buf)
4094{
4095	int ret;
4096	u32 size_bp = size_buf;
4097	char *bp = buf;
4098	u64 flags = bargs->flags;
4099	char tmp_buf[128] = {'\0'};
4100
4101	if (!flags)
4102		return;
4103
4104#define CHECK_APPEND_NOARG(a)						\
4105	do {								\
4106		ret = snprintf(bp, size_bp, (a));			\
4107		if (ret < 0 || ret >= size_bp)				\
4108			goto out_overflow;				\
4109		size_bp -= ret;						\
4110		bp += ret;						\
4111	} while (0)
4112
4113#define CHECK_APPEND_1ARG(a, v1)					\
4114	do {								\
4115		ret = snprintf(bp, size_bp, (a), (v1));			\
4116		if (ret < 0 || ret >= size_bp)				\
4117			goto out_overflow;				\
4118		size_bp -= ret;						\
4119		bp += ret;						\
4120	} while (0)
4121
4122#define CHECK_APPEND_2ARG(a, v1, v2)					\
4123	do {								\
4124		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4125		if (ret < 0 || ret >= size_bp)				\
4126			goto out_overflow;				\
4127		size_bp -= ret;						\
4128		bp += ret;						\
4129	} while (0)
4130
4131	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4132		CHECK_APPEND_1ARG("convert=%s,",
4133				  btrfs_bg_type_to_raid_name(bargs->target));
4134
4135	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4136		CHECK_APPEND_NOARG("soft,");
4137
4138	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4139		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4140					    sizeof(tmp_buf));
4141		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4142	}
4143
4144	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4145		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4146
4147	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4148		CHECK_APPEND_2ARG("usage=%u..%u,",
4149				  bargs->usage_min, bargs->usage_max);
4150
4151	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4152		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4153
4154	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4155		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4156				  bargs->pstart, bargs->pend);
4157
4158	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4159		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4160				  bargs->vstart, bargs->vend);
4161
4162	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4163		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4164
4165	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4166		CHECK_APPEND_2ARG("limit=%u..%u,",
4167				bargs->limit_min, bargs->limit_max);
4168
4169	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4170		CHECK_APPEND_2ARG("stripes=%u..%u,",
4171				  bargs->stripes_min, bargs->stripes_max);
4172
4173#undef CHECK_APPEND_2ARG
4174#undef CHECK_APPEND_1ARG
4175#undef CHECK_APPEND_NOARG
4176
4177out_overflow:
4178
4179	if (size_bp < size_buf)
4180		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4181	else
4182		buf[0] = '\0';
4183}
4184
4185static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4186{
4187	u32 size_buf = 1024;
4188	char tmp_buf[192] = {'\0'};
4189	char *buf;
4190	char *bp;
4191	u32 size_bp = size_buf;
4192	int ret;
4193	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4194
4195	buf = kzalloc(size_buf, GFP_KERNEL);
4196	if (!buf)
4197		return;
4198
4199	bp = buf;
4200
4201#define CHECK_APPEND_1ARG(a, v1)					\
4202	do {								\
4203		ret = snprintf(bp, size_bp, (a), (v1));			\
4204		if (ret < 0 || ret >= size_bp)				\
4205			goto out_overflow;				\
4206		size_bp -= ret;						\
4207		bp += ret;						\
4208	} while (0)
4209
4210	if (bctl->flags & BTRFS_BALANCE_FORCE)
4211		CHECK_APPEND_1ARG("%s", "-f ");
4212
4213	if (bctl->flags & BTRFS_BALANCE_DATA) {
4214		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4215		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4216	}
4217
4218	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4219		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4220		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4221	}
4222
4223	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4224		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4225		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4226	}
4227
4228#undef CHECK_APPEND_1ARG
4229
4230out_overflow:
4231
4232	if (size_bp < size_buf)
4233		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4234	btrfs_info(fs_info, "balance: %s %s",
4235		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4236		   "resume" : "start", buf);
4237
4238	kfree(buf);
4239}
4240
4241/*
4242 * Should be called with balance mutexe held
4243 */
4244int btrfs_balance(struct btrfs_fs_info *fs_info,
4245		  struct btrfs_balance_control *bctl,
4246		  struct btrfs_ioctl_balance_args *bargs)
4247{
4248	u64 meta_target, data_target;
4249	u64 allowed;
4250	int mixed = 0;
4251	int ret;
4252	u64 num_devices;
4253	unsigned seq;
4254	bool reducing_redundancy;
4255	int i;
4256
4257	if (btrfs_fs_closing(fs_info) ||
4258	    atomic_read(&fs_info->balance_pause_req) ||
4259	    btrfs_should_cancel_balance(fs_info)) {
4260		ret = -EINVAL;
4261		goto out;
4262	}
4263
4264	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4265	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4266		mixed = 1;
4267
4268	/*
4269	 * In case of mixed groups both data and meta should be picked,
4270	 * and identical options should be given for both of them.
4271	 */
4272	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4273	if (mixed && (bctl->flags & allowed)) {
4274		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4275		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4276		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4277			btrfs_err(fs_info,
4278	  "balance: mixed groups data and metadata options must be the same");
4279			ret = -EINVAL;
4280			goto out;
4281		}
4282	}
4283
4284	/*
4285	 * rw_devices will not change at the moment, device add/delete/replace
4286	 * are exclusive
4287	 */
4288	num_devices = fs_info->fs_devices->rw_devices;
4289
4290	/*
4291	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4292	 * special bit for it, to make it easier to distinguish.  Thus we need
4293	 * to set it manually, or balance would refuse the profile.
4294	 */
4295	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4296	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4297		if (num_devices >= btrfs_raid_array[i].devs_min)
4298			allowed |= btrfs_raid_array[i].bg_flag;
4299
4300	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4301	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4302	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4303		ret = -EINVAL;
4304		goto out;
4305	}
4306
4307	/*
4308	 * Allow to reduce metadata or system integrity only if force set for
4309	 * profiles with redundancy (copies, parity)
4310	 */
4311	allowed = 0;
4312	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4313		if (btrfs_raid_array[i].ncopies >= 2 ||
4314		    btrfs_raid_array[i].tolerated_failures >= 1)
4315			allowed |= btrfs_raid_array[i].bg_flag;
4316	}
4317	do {
4318		seq = read_seqbegin(&fs_info->profiles_lock);
4319
4320		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4321		     (fs_info->avail_system_alloc_bits & allowed) &&
4322		     !(bctl->sys.target & allowed)) ||
4323		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4324		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4325		     !(bctl->meta.target & allowed)))
4326			reducing_redundancy = true;
4327		else
4328			reducing_redundancy = false;
4329
4330		/* if we're not converting, the target field is uninitialized */
4331		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4332			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4333		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4334			bctl->data.target : fs_info->avail_data_alloc_bits;
4335	} while (read_seqretry(&fs_info->profiles_lock, seq));
4336
4337	if (reducing_redundancy) {
4338		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4339			btrfs_info(fs_info,
4340			   "balance: force reducing metadata redundancy");
4341		} else {
4342			btrfs_err(fs_info,
4343	"balance: reduces metadata redundancy, use --force if you want this");
4344			ret = -EINVAL;
4345			goto out;
4346		}
4347	}
4348
4349	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4350		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4351		btrfs_warn(fs_info,
4352	"balance: metadata profile %s has lower redundancy than data profile %s",
4353				btrfs_bg_type_to_raid_name(meta_target),
4354				btrfs_bg_type_to_raid_name(data_target));
4355	}
4356
 
 
 
 
 
 
 
 
4357	ret = insert_balance_item(fs_info, bctl);
4358	if (ret && ret != -EEXIST)
4359		goto out;
4360
4361	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4362		BUG_ON(ret == -EEXIST);
4363		BUG_ON(fs_info->balance_ctl);
4364		spin_lock(&fs_info->balance_lock);
4365		fs_info->balance_ctl = bctl;
4366		spin_unlock(&fs_info->balance_lock);
4367	} else {
4368		BUG_ON(ret != -EEXIST);
4369		spin_lock(&fs_info->balance_lock);
4370		update_balance_args(bctl);
4371		spin_unlock(&fs_info->balance_lock);
4372	}
4373
4374	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4375	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4376	describe_balance_start_or_resume(fs_info);
4377	mutex_unlock(&fs_info->balance_mutex);
4378
4379	ret = __btrfs_balance(fs_info);
4380
4381	mutex_lock(&fs_info->balance_mutex);
4382	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4383		btrfs_info(fs_info, "balance: paused");
4384		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4385	}
4386	/*
4387	 * Balance can be canceled by:
4388	 *
4389	 * - Regular cancel request
4390	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4391	 *
4392	 * - Fatal signal to "btrfs" process
4393	 *   Either the signal caught by wait_reserve_ticket() and callers
4394	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4395	 *   got -ECANCELED.
4396	 *   Either way, in this case balance_cancel_req = 0, and
4397	 *   ret == -EINTR or ret == -ECANCELED.
4398	 *
4399	 * So here we only check the return value to catch canceled balance.
4400	 */
4401	else if (ret == -ECANCELED || ret == -EINTR)
4402		btrfs_info(fs_info, "balance: canceled");
4403	else
4404		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4405
4406	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4407
4408	if (bargs) {
4409		memset(bargs, 0, sizeof(*bargs));
4410		btrfs_update_ioctl_balance_args(fs_info, bargs);
4411	}
4412
4413	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4414	    balance_need_close(fs_info)) {
4415		reset_balance_state(fs_info);
4416		btrfs_exclop_finish(fs_info);
4417	}
4418
4419	wake_up(&fs_info->balance_wait_q);
4420
4421	return ret;
4422out:
4423	if (bctl->flags & BTRFS_BALANCE_RESUME)
4424		reset_balance_state(fs_info);
4425	else
4426		kfree(bctl);
4427	btrfs_exclop_finish(fs_info);
4428
4429	return ret;
4430}
4431
4432static int balance_kthread(void *data)
4433{
4434	struct btrfs_fs_info *fs_info = data;
4435	int ret = 0;
4436
4437	sb_start_write(fs_info->sb);
4438	mutex_lock(&fs_info->balance_mutex);
4439	if (fs_info->balance_ctl)
4440		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4441	mutex_unlock(&fs_info->balance_mutex);
4442	sb_end_write(fs_info->sb);
4443
4444	return ret;
4445}
4446
4447int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4448{
4449	struct task_struct *tsk;
4450
4451	mutex_lock(&fs_info->balance_mutex);
4452	if (!fs_info->balance_ctl) {
4453		mutex_unlock(&fs_info->balance_mutex);
4454		return 0;
4455	}
4456	mutex_unlock(&fs_info->balance_mutex);
4457
4458	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4459		btrfs_info(fs_info, "balance: resume skipped");
4460		return 0;
4461	}
4462
4463	spin_lock(&fs_info->super_lock);
4464	ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4465	fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4466	spin_unlock(&fs_info->super_lock);
4467	/*
4468	 * A ro->rw remount sequence should continue with the paused balance
4469	 * regardless of who pauses it, system or the user as of now, so set
4470	 * the resume flag.
4471	 */
4472	spin_lock(&fs_info->balance_lock);
4473	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4474	spin_unlock(&fs_info->balance_lock);
4475
4476	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4477	return PTR_ERR_OR_ZERO(tsk);
4478}
4479
4480int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4481{
4482	struct btrfs_balance_control *bctl;
4483	struct btrfs_balance_item *item;
4484	struct btrfs_disk_balance_args disk_bargs;
4485	struct btrfs_path *path;
4486	struct extent_buffer *leaf;
4487	struct btrfs_key key;
4488	int ret;
4489
4490	path = btrfs_alloc_path();
4491	if (!path)
4492		return -ENOMEM;
4493
4494	key.objectid = BTRFS_BALANCE_OBJECTID;
4495	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4496	key.offset = 0;
4497
4498	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4499	if (ret < 0)
4500		goto out;
4501	if (ret > 0) { /* ret = -ENOENT; */
4502		ret = 0;
4503		goto out;
4504	}
4505
4506	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4507	if (!bctl) {
4508		ret = -ENOMEM;
4509		goto out;
4510	}
4511
4512	leaf = path->nodes[0];
4513	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4514
4515	bctl->flags = btrfs_balance_flags(leaf, item);
4516	bctl->flags |= BTRFS_BALANCE_RESUME;
4517
4518	btrfs_balance_data(leaf, item, &disk_bargs);
4519	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4520	btrfs_balance_meta(leaf, item, &disk_bargs);
4521	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4522	btrfs_balance_sys(leaf, item, &disk_bargs);
4523	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4524
4525	/*
4526	 * This should never happen, as the paused balance state is recovered
4527	 * during mount without any chance of other exclusive ops to collide.
4528	 *
4529	 * This gives the exclusive op status to balance and keeps in paused
4530	 * state until user intervention (cancel or umount). If the ownership
4531	 * cannot be assigned, show a message but do not fail. The balance
4532	 * is in a paused state and must have fs_info::balance_ctl properly
4533	 * set up.
4534	 */
4535	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4536		btrfs_warn(fs_info,
4537	"balance: cannot set exclusive op status, resume manually");
4538
4539	btrfs_release_path(path);
4540
4541	mutex_lock(&fs_info->balance_mutex);
4542	BUG_ON(fs_info->balance_ctl);
4543	spin_lock(&fs_info->balance_lock);
4544	fs_info->balance_ctl = bctl;
4545	spin_unlock(&fs_info->balance_lock);
4546	mutex_unlock(&fs_info->balance_mutex);
4547out:
4548	btrfs_free_path(path);
4549	return ret;
4550}
4551
4552int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4553{
4554	int ret = 0;
4555
4556	mutex_lock(&fs_info->balance_mutex);
4557	if (!fs_info->balance_ctl) {
4558		mutex_unlock(&fs_info->balance_mutex);
4559		return -ENOTCONN;
4560	}
4561
4562	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4563		atomic_inc(&fs_info->balance_pause_req);
4564		mutex_unlock(&fs_info->balance_mutex);
4565
4566		wait_event(fs_info->balance_wait_q,
4567			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4568
4569		mutex_lock(&fs_info->balance_mutex);
4570		/* we are good with balance_ctl ripped off from under us */
4571		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4572		atomic_dec(&fs_info->balance_pause_req);
4573	} else {
4574		ret = -ENOTCONN;
4575	}
4576
4577	mutex_unlock(&fs_info->balance_mutex);
4578	return ret;
4579}
4580
4581int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4582{
4583	mutex_lock(&fs_info->balance_mutex);
4584	if (!fs_info->balance_ctl) {
4585		mutex_unlock(&fs_info->balance_mutex);
4586		return -ENOTCONN;
4587	}
4588
4589	/*
4590	 * A paused balance with the item stored on disk can be resumed at
4591	 * mount time if the mount is read-write. Otherwise it's still paused
4592	 * and we must not allow cancelling as it deletes the item.
4593	 */
4594	if (sb_rdonly(fs_info->sb)) {
4595		mutex_unlock(&fs_info->balance_mutex);
4596		return -EROFS;
4597	}
4598
4599	atomic_inc(&fs_info->balance_cancel_req);
4600	/*
4601	 * if we are running just wait and return, balance item is
4602	 * deleted in btrfs_balance in this case
4603	 */
4604	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4605		mutex_unlock(&fs_info->balance_mutex);
4606		wait_event(fs_info->balance_wait_q,
4607			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4608		mutex_lock(&fs_info->balance_mutex);
4609	} else {
4610		mutex_unlock(&fs_info->balance_mutex);
4611		/*
4612		 * Lock released to allow other waiters to continue, we'll
4613		 * reexamine the status again.
4614		 */
4615		mutex_lock(&fs_info->balance_mutex);
4616
4617		if (fs_info->balance_ctl) {
4618			reset_balance_state(fs_info);
4619			btrfs_exclop_finish(fs_info);
4620			btrfs_info(fs_info, "balance: canceled");
4621		}
4622	}
4623
4624	BUG_ON(fs_info->balance_ctl ||
4625		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4626	atomic_dec(&fs_info->balance_cancel_req);
4627	mutex_unlock(&fs_info->balance_mutex);
4628	return 0;
4629}
4630
4631int btrfs_uuid_scan_kthread(void *data)
4632{
4633	struct btrfs_fs_info *fs_info = data;
4634	struct btrfs_root *root = fs_info->tree_root;
4635	struct btrfs_key key;
4636	struct btrfs_path *path = NULL;
4637	int ret = 0;
4638	struct extent_buffer *eb;
4639	int slot;
4640	struct btrfs_root_item root_item;
4641	u32 item_size;
4642	struct btrfs_trans_handle *trans = NULL;
4643	bool closing = false;
4644
4645	path = btrfs_alloc_path();
4646	if (!path) {
4647		ret = -ENOMEM;
4648		goto out;
4649	}
4650
4651	key.objectid = 0;
4652	key.type = BTRFS_ROOT_ITEM_KEY;
4653	key.offset = 0;
4654
4655	while (1) {
4656		if (btrfs_fs_closing(fs_info)) {
4657			closing = true;
4658			break;
4659		}
4660		ret = btrfs_search_forward(root, &key, path,
4661				BTRFS_OLDEST_GENERATION);
4662		if (ret) {
4663			if (ret > 0)
4664				ret = 0;
4665			break;
4666		}
4667
4668		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4669		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4670		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4671		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4672			goto skip;
4673
4674		eb = path->nodes[0];
4675		slot = path->slots[0];
4676		item_size = btrfs_item_size(eb, slot);
4677		if (item_size < sizeof(root_item))
4678			goto skip;
4679
4680		read_extent_buffer(eb, &root_item,
4681				   btrfs_item_ptr_offset(eb, slot),
4682				   (int)sizeof(root_item));
4683		if (btrfs_root_refs(&root_item) == 0)
4684			goto skip;
4685
4686		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4687		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4688			if (trans)
4689				goto update_tree;
4690
4691			btrfs_release_path(path);
4692			/*
4693			 * 1 - subvol uuid item
4694			 * 1 - received_subvol uuid item
4695			 */
4696			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4697			if (IS_ERR(trans)) {
4698				ret = PTR_ERR(trans);
4699				break;
4700			}
4701			continue;
4702		} else {
4703			goto skip;
4704		}
4705update_tree:
4706		btrfs_release_path(path);
4707		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4708			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4709						  BTRFS_UUID_KEY_SUBVOL,
4710						  key.objectid);
4711			if (ret < 0) {
4712				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4713					ret);
4714				break;
4715			}
4716		}
4717
4718		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4719			ret = btrfs_uuid_tree_add(trans,
4720						  root_item.received_uuid,
4721						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4722						  key.objectid);
4723			if (ret < 0) {
4724				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4725					ret);
4726				break;
4727			}
4728		}
4729
4730skip:
4731		btrfs_release_path(path);
4732		if (trans) {
4733			ret = btrfs_end_transaction(trans);
4734			trans = NULL;
4735			if (ret)
4736				break;
4737		}
4738
4739		if (key.offset < (u64)-1) {
4740			key.offset++;
4741		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4742			key.offset = 0;
4743			key.type = BTRFS_ROOT_ITEM_KEY;
4744		} else if (key.objectid < (u64)-1) {
4745			key.offset = 0;
4746			key.type = BTRFS_ROOT_ITEM_KEY;
4747			key.objectid++;
4748		} else {
4749			break;
4750		}
4751		cond_resched();
4752	}
4753
4754out:
4755	btrfs_free_path(path);
4756	if (trans && !IS_ERR(trans))
4757		btrfs_end_transaction(trans);
4758	if (ret)
4759		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4760	else if (!closing)
4761		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4762	up(&fs_info->uuid_tree_rescan_sem);
4763	return 0;
4764}
4765
4766int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4767{
4768	struct btrfs_trans_handle *trans;
4769	struct btrfs_root *tree_root = fs_info->tree_root;
4770	struct btrfs_root *uuid_root;
4771	struct task_struct *task;
4772	int ret;
4773
4774	/*
4775	 * 1 - root node
4776	 * 1 - root item
4777	 */
4778	trans = btrfs_start_transaction(tree_root, 2);
4779	if (IS_ERR(trans))
4780		return PTR_ERR(trans);
4781
4782	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4783	if (IS_ERR(uuid_root)) {
4784		ret = PTR_ERR(uuid_root);
4785		btrfs_abort_transaction(trans, ret);
4786		btrfs_end_transaction(trans);
4787		return ret;
4788	}
4789
4790	fs_info->uuid_root = uuid_root;
4791
4792	ret = btrfs_commit_transaction(trans);
4793	if (ret)
4794		return ret;
4795
4796	down(&fs_info->uuid_tree_rescan_sem);
4797	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4798	if (IS_ERR(task)) {
4799		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4800		btrfs_warn(fs_info, "failed to start uuid_scan task");
4801		up(&fs_info->uuid_tree_rescan_sem);
4802		return PTR_ERR(task);
4803	}
4804
4805	return 0;
4806}
4807
4808/*
4809 * shrinking a device means finding all of the device extents past
4810 * the new size, and then following the back refs to the chunks.
4811 * The chunk relocation code actually frees the device extent
4812 */
4813int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4814{
4815	struct btrfs_fs_info *fs_info = device->fs_info;
4816	struct btrfs_root *root = fs_info->dev_root;
4817	struct btrfs_trans_handle *trans;
4818	struct btrfs_dev_extent *dev_extent = NULL;
4819	struct btrfs_path *path;
4820	u64 length;
4821	u64 chunk_offset;
4822	int ret;
4823	int slot;
4824	int failed = 0;
4825	bool retried = false;
4826	struct extent_buffer *l;
4827	struct btrfs_key key;
4828	struct btrfs_super_block *super_copy = fs_info->super_copy;
4829	u64 old_total = btrfs_super_total_bytes(super_copy);
4830	u64 old_size = btrfs_device_get_total_bytes(device);
4831	u64 diff;
4832	u64 start;
4833
4834	new_size = round_down(new_size, fs_info->sectorsize);
4835	start = new_size;
4836	diff = round_down(old_size - new_size, fs_info->sectorsize);
4837
4838	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4839		return -EINVAL;
4840
4841	path = btrfs_alloc_path();
4842	if (!path)
4843		return -ENOMEM;
4844
4845	path->reada = READA_BACK;
4846
4847	trans = btrfs_start_transaction(root, 0);
4848	if (IS_ERR(trans)) {
4849		btrfs_free_path(path);
4850		return PTR_ERR(trans);
4851	}
4852
4853	mutex_lock(&fs_info->chunk_mutex);
4854
4855	btrfs_device_set_total_bytes(device, new_size);
4856	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4857		device->fs_devices->total_rw_bytes -= diff;
4858		atomic64_sub(diff, &fs_info->free_chunk_space);
4859	}
4860
4861	/*
4862	 * Once the device's size has been set to the new size, ensure all
4863	 * in-memory chunks are synced to disk so that the loop below sees them
4864	 * and relocates them accordingly.
4865	 */
4866	if (contains_pending_extent(device, &start, diff)) {
4867		mutex_unlock(&fs_info->chunk_mutex);
4868		ret = btrfs_commit_transaction(trans);
4869		if (ret)
4870			goto done;
4871	} else {
4872		mutex_unlock(&fs_info->chunk_mutex);
4873		btrfs_end_transaction(trans);
4874	}
4875
4876again:
4877	key.objectid = device->devid;
4878	key.offset = (u64)-1;
4879	key.type = BTRFS_DEV_EXTENT_KEY;
4880
4881	do {
4882		mutex_lock(&fs_info->reclaim_bgs_lock);
4883		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4884		if (ret < 0) {
4885			mutex_unlock(&fs_info->reclaim_bgs_lock);
4886			goto done;
4887		}
4888
4889		ret = btrfs_previous_item(root, path, 0, key.type);
 
 
 
 
4890		if (ret) {
4891			mutex_unlock(&fs_info->reclaim_bgs_lock);
4892			if (ret < 0)
4893				goto done;
4894			ret = 0;
4895			btrfs_release_path(path);
4896			break;
4897		}
4898
4899		l = path->nodes[0];
4900		slot = path->slots[0];
4901		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4902
4903		if (key.objectid != device->devid) {
4904			mutex_unlock(&fs_info->reclaim_bgs_lock);
4905			btrfs_release_path(path);
4906			break;
4907		}
4908
4909		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4910		length = btrfs_dev_extent_length(l, dev_extent);
4911
4912		if (key.offset + length <= new_size) {
4913			mutex_unlock(&fs_info->reclaim_bgs_lock);
4914			btrfs_release_path(path);
4915			break;
4916		}
4917
4918		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4919		btrfs_release_path(path);
4920
4921		/*
4922		 * We may be relocating the only data chunk we have,
4923		 * which could potentially end up with losing data's
4924		 * raid profile, so lets allocate an empty one in
4925		 * advance.
4926		 */
4927		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4928		if (ret < 0) {
4929			mutex_unlock(&fs_info->reclaim_bgs_lock);
4930			goto done;
4931		}
4932
4933		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4934		mutex_unlock(&fs_info->reclaim_bgs_lock);
4935		if (ret == -ENOSPC) {
4936			failed++;
4937		} else if (ret) {
4938			if (ret == -ETXTBSY) {
4939				btrfs_warn(fs_info,
4940		   "could not shrink block group %llu due to active swapfile",
4941					   chunk_offset);
4942			}
4943			goto done;
4944		}
4945	} while (key.offset-- > 0);
4946
4947	if (failed && !retried) {
4948		failed = 0;
4949		retried = true;
4950		goto again;
4951	} else if (failed && retried) {
4952		ret = -ENOSPC;
4953		goto done;
4954	}
4955
4956	/* Shrinking succeeded, else we would be at "done". */
4957	trans = btrfs_start_transaction(root, 0);
4958	if (IS_ERR(trans)) {
4959		ret = PTR_ERR(trans);
4960		goto done;
4961	}
4962
4963	mutex_lock(&fs_info->chunk_mutex);
4964	/* Clear all state bits beyond the shrunk device size */
4965	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4966			  CHUNK_STATE_MASK);
4967
4968	btrfs_device_set_disk_total_bytes(device, new_size);
4969	if (list_empty(&device->post_commit_list))
4970		list_add_tail(&device->post_commit_list,
4971			      &trans->transaction->dev_update_list);
4972
4973	WARN_ON(diff > old_total);
4974	btrfs_set_super_total_bytes(super_copy,
4975			round_down(old_total - diff, fs_info->sectorsize));
4976	mutex_unlock(&fs_info->chunk_mutex);
4977
4978	btrfs_reserve_chunk_metadata(trans, false);
4979	/* Now btrfs_update_device() will change the on-disk size. */
4980	ret = btrfs_update_device(trans, device);
4981	btrfs_trans_release_chunk_metadata(trans);
4982	if (ret < 0) {
4983		btrfs_abort_transaction(trans, ret);
4984		btrfs_end_transaction(trans);
4985	} else {
4986		ret = btrfs_commit_transaction(trans);
4987	}
4988done:
4989	btrfs_free_path(path);
4990	if (ret) {
4991		mutex_lock(&fs_info->chunk_mutex);
4992		btrfs_device_set_total_bytes(device, old_size);
4993		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4994			device->fs_devices->total_rw_bytes += diff;
4995		atomic64_add(diff, &fs_info->free_chunk_space);
4996		mutex_unlock(&fs_info->chunk_mutex);
4997	}
4998	return ret;
4999}
5000
5001static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5002			   struct btrfs_key *key,
5003			   struct btrfs_chunk *chunk, int item_size)
5004{
5005	struct btrfs_super_block *super_copy = fs_info->super_copy;
5006	struct btrfs_disk_key disk_key;
5007	u32 array_size;
5008	u8 *ptr;
5009
5010	lockdep_assert_held(&fs_info->chunk_mutex);
5011
5012	array_size = btrfs_super_sys_array_size(super_copy);
5013	if (array_size + item_size + sizeof(disk_key)
5014			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
 
5015		return -EFBIG;
 
5016
5017	ptr = super_copy->sys_chunk_array + array_size;
5018	btrfs_cpu_key_to_disk(&disk_key, key);
5019	memcpy(ptr, &disk_key, sizeof(disk_key));
5020	ptr += sizeof(disk_key);
5021	memcpy(ptr, chunk, item_size);
5022	item_size += sizeof(disk_key);
5023	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
 
5024
5025	return 0;
5026}
5027
5028/*
5029 * sort the devices in descending order by max_avail, total_avail
5030 */
5031static int btrfs_cmp_device_info(const void *a, const void *b)
5032{
5033	const struct btrfs_device_info *di_a = a;
5034	const struct btrfs_device_info *di_b = b;
5035
5036	if (di_a->max_avail > di_b->max_avail)
5037		return -1;
5038	if (di_a->max_avail < di_b->max_avail)
5039		return 1;
5040	if (di_a->total_avail > di_b->total_avail)
5041		return -1;
5042	if (di_a->total_avail < di_b->total_avail)
5043		return 1;
5044	return 0;
5045}
5046
5047static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5048{
5049	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5050		return;
5051
5052	btrfs_set_fs_incompat(info, RAID56);
5053}
5054
5055static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5056{
5057	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5058		return;
5059
5060	btrfs_set_fs_incompat(info, RAID1C34);
5061}
5062
5063/*
5064 * Structure used internally for btrfs_create_chunk() function.
5065 * Wraps needed parameters.
5066 */
5067struct alloc_chunk_ctl {
5068	u64 start;
5069	u64 type;
5070	/* Total number of stripes to allocate */
5071	int num_stripes;
5072	/* sub_stripes info for map */
5073	int sub_stripes;
5074	/* Stripes per device */
5075	int dev_stripes;
5076	/* Maximum number of devices to use */
5077	int devs_max;
5078	/* Minimum number of devices to use */
5079	int devs_min;
5080	/* ndevs has to be a multiple of this */
5081	int devs_increment;
5082	/* Number of copies */
5083	int ncopies;
5084	/* Number of stripes worth of bytes to store parity information */
5085	int nparity;
5086	u64 max_stripe_size;
5087	u64 max_chunk_size;
5088	u64 dev_extent_min;
5089	u64 stripe_size;
5090	u64 chunk_size;
5091	int ndevs;
5092};
5093
5094static void init_alloc_chunk_ctl_policy_regular(
5095				struct btrfs_fs_devices *fs_devices,
5096				struct alloc_chunk_ctl *ctl)
5097{
5098	struct btrfs_space_info *space_info;
5099
5100	space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5101	ASSERT(space_info);
5102
5103	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5104	ctl->max_stripe_size = ctl->max_chunk_size;
5105
5106	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5107		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5108
5109	/* We don't want a chunk larger than 10% of writable space */
5110	ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
5111				  ctl->max_chunk_size);
5112	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5113}
5114
5115static void init_alloc_chunk_ctl_policy_zoned(
5116				      struct btrfs_fs_devices *fs_devices,
5117				      struct alloc_chunk_ctl *ctl)
5118{
5119	u64 zone_size = fs_devices->fs_info->zone_size;
5120	u64 limit;
5121	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5122	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5123	u64 min_chunk_size = min_data_stripes * zone_size;
5124	u64 type = ctl->type;
5125
5126	ctl->max_stripe_size = zone_size;
5127	if (type & BTRFS_BLOCK_GROUP_DATA) {
5128		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5129						 zone_size);
5130	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
 
 
 
 
 
5131		ctl->max_chunk_size = ctl->max_stripe_size;
5132	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
 
5133		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5134		ctl->devs_max = min_t(int, ctl->devs_max,
5135				      BTRFS_MAX_DEVS_SYS_CHUNK);
5136	} else {
5137		BUG();
5138	}
5139
5140	/* We don't want a chunk larger than 10% of writable space */
5141	limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
5142			       zone_size),
5143		    min_chunk_size);
5144	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5145	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5146}
5147
5148static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5149				 struct alloc_chunk_ctl *ctl)
5150{
5151	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5152
5153	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5154	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5155	ctl->devs_max = btrfs_raid_array[index].devs_max;
5156	if (!ctl->devs_max)
5157		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5158	ctl->devs_min = btrfs_raid_array[index].devs_min;
5159	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5160	ctl->ncopies = btrfs_raid_array[index].ncopies;
5161	ctl->nparity = btrfs_raid_array[index].nparity;
5162	ctl->ndevs = 0;
5163
5164	switch (fs_devices->chunk_alloc_policy) {
5165	case BTRFS_CHUNK_ALLOC_REGULAR:
5166		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5167		break;
5168	case BTRFS_CHUNK_ALLOC_ZONED:
5169		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5170		break;
5171	default:
5172		BUG();
5173	}
5174}
5175
5176static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5177			      struct alloc_chunk_ctl *ctl,
5178			      struct btrfs_device_info *devices_info)
5179{
5180	struct btrfs_fs_info *info = fs_devices->fs_info;
5181	struct btrfs_device *device;
5182	u64 total_avail;
5183	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5184	int ret;
5185	int ndevs = 0;
5186	u64 max_avail;
5187	u64 dev_offset;
5188
5189	/*
5190	 * in the first pass through the devices list, we gather information
5191	 * about the available holes on each device.
5192	 */
5193	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5194		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5195			WARN(1, KERN_ERR
5196			       "BTRFS: read-only device in alloc_list\n");
5197			continue;
5198		}
5199
5200		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5201					&device->dev_state) ||
5202		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5203			continue;
5204
5205		if (device->total_bytes > device->bytes_used)
5206			total_avail = device->total_bytes - device->bytes_used;
5207		else
5208			total_avail = 0;
5209
5210		/* If there is no space on this device, skip it. */
5211		if (total_avail < ctl->dev_extent_min)
5212			continue;
5213
5214		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5215					   &max_avail);
5216		if (ret && ret != -ENOSPC)
5217			return ret;
5218
5219		if (ret == 0)
5220			max_avail = dev_extent_want;
5221
5222		if (max_avail < ctl->dev_extent_min) {
5223			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5224				btrfs_debug(info,
5225			"%s: devid %llu has no free space, have=%llu want=%llu",
5226					    __func__, device->devid, max_avail,
5227					    ctl->dev_extent_min);
5228			continue;
5229		}
5230
5231		if (ndevs == fs_devices->rw_devices) {
5232			WARN(1, "%s: found more than %llu devices\n",
5233			     __func__, fs_devices->rw_devices);
5234			break;
5235		}
5236		devices_info[ndevs].dev_offset = dev_offset;
5237		devices_info[ndevs].max_avail = max_avail;
5238		devices_info[ndevs].total_avail = total_avail;
5239		devices_info[ndevs].dev = device;
5240		++ndevs;
5241	}
5242	ctl->ndevs = ndevs;
5243
5244	/*
5245	 * now sort the devices by hole size / available space
5246	 */
5247	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5248	     btrfs_cmp_device_info, NULL);
5249
5250	return 0;
5251}
5252
5253static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5254				      struct btrfs_device_info *devices_info)
5255{
5256	/* Number of stripes that count for block group size */
5257	int data_stripes;
5258
5259	/*
5260	 * The primary goal is to maximize the number of stripes, so use as
5261	 * many devices as possible, even if the stripes are not maximum sized.
5262	 *
5263	 * The DUP profile stores more than one stripe per device, the
5264	 * max_avail is the total size so we have to adjust.
5265	 */
5266	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5267				   ctl->dev_stripes);
5268	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5269
5270	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5271	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5272
5273	/*
5274	 * Use the number of data stripes to figure out how big this chunk is
5275	 * really going to be in terms of logical address space, and compare
5276	 * that answer with the max chunk size. If it's higher, we try to
5277	 * reduce stripe_size.
5278	 */
5279	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5280		/*
5281		 * Reduce stripe_size, round it up to a 16MB boundary again and
5282		 * then use it, unless it ends up being even bigger than the
5283		 * previous value we had already.
5284		 */
5285		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5286							data_stripes), SZ_16M),
5287				       ctl->stripe_size);
5288	}
5289
5290	/* Stripe size should not go beyond 1G. */
5291	ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5292
5293	/* Align to BTRFS_STRIPE_LEN */
5294	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5295	ctl->chunk_size = ctl->stripe_size * data_stripes;
5296
5297	return 0;
5298}
5299
5300static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5301				    struct btrfs_device_info *devices_info)
5302{
5303	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5304	/* Number of stripes that count for block group size */
5305	int data_stripes;
5306
5307	/*
5308	 * It should hold because:
5309	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5310	 */
5311	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5312
5313	ctl->stripe_size = zone_size;
5314	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5315	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5316
5317	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5318	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5319		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5320					     ctl->stripe_size) + ctl->nparity,
5321				     ctl->dev_stripes);
5322		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5323		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5324		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5325	}
5326
5327	ctl->chunk_size = ctl->stripe_size * data_stripes;
5328
5329	return 0;
5330}
5331
5332static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5333			      struct alloc_chunk_ctl *ctl,
5334			      struct btrfs_device_info *devices_info)
5335{
5336	struct btrfs_fs_info *info = fs_devices->fs_info;
5337
5338	/*
5339	 * Round down to number of usable stripes, devs_increment can be any
5340	 * number so we can't use round_down() that requires power of 2, while
5341	 * rounddown is safe.
5342	 */
5343	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5344
5345	if (ctl->ndevs < ctl->devs_min) {
5346		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5347			btrfs_debug(info,
5348	"%s: not enough devices with free space: have=%d minimum required=%d",
5349				    __func__, ctl->ndevs, ctl->devs_min);
5350		}
5351		return -ENOSPC;
5352	}
5353
5354	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5355
5356	switch (fs_devices->chunk_alloc_policy) {
5357	case BTRFS_CHUNK_ALLOC_REGULAR:
5358		return decide_stripe_size_regular(ctl, devices_info);
5359	case BTRFS_CHUNK_ALLOC_ZONED:
5360		return decide_stripe_size_zoned(ctl, devices_info);
5361	default:
5362		BUG();
5363	}
5364}
5365
5366static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5367			struct alloc_chunk_ctl *ctl,
5368			struct btrfs_device_info *devices_info)
5369{
5370	struct btrfs_fs_info *info = trans->fs_info;
5371	struct map_lookup *map = NULL;
5372	struct extent_map_tree *em_tree;
5373	struct btrfs_block_group *block_group;
5374	struct extent_map *em;
5375	u64 start = ctl->start;
5376	u64 type = ctl->type;
5377	int ret;
5378	int i;
5379	int j;
5380
5381	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5382	if (!map)
5383		return ERR_PTR(-ENOMEM);
5384	map->num_stripes = ctl->num_stripes;
5385
5386	for (i = 0; i < ctl->ndevs; ++i) {
5387		for (j = 0; j < ctl->dev_stripes; ++j) {
5388			int s = i * ctl->dev_stripes + j;
5389			map->stripes[s].dev = devices_info[i].dev;
5390			map->stripes[s].physical = devices_info[i].dev_offset +
5391						   j * ctl->stripe_size;
5392		}
5393	}
5394	map->stripe_len = BTRFS_STRIPE_LEN;
5395	map->io_align = BTRFS_STRIPE_LEN;
5396	map->io_width = BTRFS_STRIPE_LEN;
5397	map->type = type;
5398	map->sub_stripes = ctl->sub_stripes;
5399
5400	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5401
5402	em = alloc_extent_map();
5403	if (!em) {
5404		kfree(map);
5405		return ERR_PTR(-ENOMEM);
5406	}
5407	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5408	em->map_lookup = map;
5409	em->start = start;
5410	em->len = ctl->chunk_size;
5411	em->block_start = 0;
5412	em->block_len = em->len;
5413	em->orig_block_len = ctl->stripe_size;
5414
5415	em_tree = &info->mapping_tree;
5416	write_lock(&em_tree->lock);
5417	ret = add_extent_mapping(em_tree, em, 0);
5418	if (ret) {
5419		write_unlock(&em_tree->lock);
5420		free_extent_map(em);
5421		return ERR_PTR(ret);
5422	}
5423	write_unlock(&em_tree->lock);
5424
5425	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5426	if (IS_ERR(block_group))
5427		goto error_del_extent;
5428
5429	for (i = 0; i < map->num_stripes; i++) {
5430		struct btrfs_device *dev = map->stripes[i].dev;
5431
5432		btrfs_device_set_bytes_used(dev,
5433					    dev->bytes_used + ctl->stripe_size);
5434		if (list_empty(&dev->post_commit_list))
5435			list_add_tail(&dev->post_commit_list,
5436				      &trans->transaction->dev_update_list);
5437	}
5438
5439	atomic64_sub(ctl->stripe_size * map->num_stripes,
5440		     &info->free_chunk_space);
5441
5442	free_extent_map(em);
5443	check_raid56_incompat_flag(info, type);
5444	check_raid1c34_incompat_flag(info, type);
5445
5446	return block_group;
5447
5448error_del_extent:
5449	write_lock(&em_tree->lock);
5450	remove_extent_mapping(em_tree, em);
5451	write_unlock(&em_tree->lock);
5452
5453	/* One for our allocation */
5454	free_extent_map(em);
5455	/* One for the tree reference */
5456	free_extent_map(em);
5457
5458	return block_group;
5459}
5460
5461struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5462					    u64 type)
5463{
5464	struct btrfs_fs_info *info = trans->fs_info;
5465	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5466	struct btrfs_device_info *devices_info = NULL;
5467	struct alloc_chunk_ctl ctl;
5468	struct btrfs_block_group *block_group;
5469	int ret;
5470
5471	lockdep_assert_held(&info->chunk_mutex);
5472
5473	if (!alloc_profile_is_valid(type, 0)) {
5474		ASSERT(0);
5475		return ERR_PTR(-EINVAL);
5476	}
5477
5478	if (list_empty(&fs_devices->alloc_list)) {
5479		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5480			btrfs_debug(info, "%s: no writable device", __func__);
5481		return ERR_PTR(-ENOSPC);
5482	}
5483
5484	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5485		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5486		ASSERT(0);
5487		return ERR_PTR(-EINVAL);
5488	}
5489
5490	ctl.start = find_next_chunk(info);
5491	ctl.type = type;
5492	init_alloc_chunk_ctl(fs_devices, &ctl);
5493
5494	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5495			       GFP_NOFS);
5496	if (!devices_info)
5497		return ERR_PTR(-ENOMEM);
5498
5499	ret = gather_device_info(fs_devices, &ctl, devices_info);
5500	if (ret < 0) {
5501		block_group = ERR_PTR(ret);
5502		goto out;
5503	}
5504
5505	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5506	if (ret < 0) {
5507		block_group = ERR_PTR(ret);
5508		goto out;
5509	}
5510
5511	block_group = create_chunk(trans, &ctl, devices_info);
5512
5513out:
5514	kfree(devices_info);
5515	return block_group;
5516}
5517
5518/*
5519 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5520 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5521 * chunks.
5522 *
5523 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5524 * phases.
5525 */
5526int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5527				     struct btrfs_block_group *bg)
5528{
5529	struct btrfs_fs_info *fs_info = trans->fs_info;
 
5530	struct btrfs_root *chunk_root = fs_info->chunk_root;
5531	struct btrfs_key key;
 
5532	struct btrfs_chunk *chunk;
5533	struct btrfs_stripe *stripe;
5534	struct extent_map *em;
5535	struct map_lookup *map;
5536	size_t item_size;
5537	int i;
5538	int ret;
5539
5540	/*
5541	 * We take the chunk_mutex for 2 reasons:
5542	 *
5543	 * 1) Updates and insertions in the chunk btree must be done while holding
5544	 *    the chunk_mutex, as well as updating the system chunk array in the
5545	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5546	 *    details;
5547	 *
5548	 * 2) To prevent races with the final phase of a device replace operation
5549	 *    that replaces the device object associated with the map's stripes,
5550	 *    because the device object's id can change at any time during that
5551	 *    final phase of the device replace operation
5552	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5553	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5554	 *    which would cause a failure when updating the device item, which does
5555	 *    not exists, or persisting a stripe of the chunk item with such ID.
5556	 *    Here we can't use the device_list_mutex because our caller already
5557	 *    has locked the chunk_mutex, and the final phase of device replace
5558	 *    acquires both mutexes - first the device_list_mutex and then the
5559	 *    chunk_mutex. Using any of those two mutexes protects us from a
5560	 *    concurrent device replace.
5561	 */
5562	lockdep_assert_held(&fs_info->chunk_mutex);
5563
5564	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5565	if (IS_ERR(em)) {
5566		ret = PTR_ERR(em);
5567		btrfs_abort_transaction(trans, ret);
5568		return ret;
5569	}
5570
5571	map = em->map_lookup;
5572	item_size = btrfs_chunk_item_size(map->num_stripes);
 
5573
5574	chunk = kzalloc(item_size, GFP_NOFS);
5575	if (!chunk) {
5576		ret = -ENOMEM;
5577		btrfs_abort_transaction(trans, ret);
5578		goto out;
5579	}
5580
 
 
 
 
 
 
 
 
5581	for (i = 0; i < map->num_stripes; i++) {
5582		struct btrfs_device *device = map->stripes[i].dev;
 
5583
5584		ret = btrfs_update_device(trans, device);
5585		if (ret)
5586			goto out;
 
 
 
 
 
 
 
 
5587	}
5588
5589	stripe = &chunk->stripe;
5590	for (i = 0; i < map->num_stripes; i++) {
5591		struct btrfs_device *device = map->stripes[i].dev;
5592		const u64 dev_offset = map->stripes[i].physical;
5593
5594		btrfs_set_stack_stripe_devid(stripe, device->devid);
5595		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5596		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5597		stripe++;
5598	}
 
5599
5600	btrfs_set_stack_chunk_length(chunk, bg->length);
5601	btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5602	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5603	btrfs_set_stack_chunk_type(chunk, map->type);
5604	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5605	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5606	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5607	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5608	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5609
5610	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5611	key.type = BTRFS_CHUNK_ITEM_KEY;
5612	key.offset = bg->start;
5613
5614	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5615	if (ret)
5616		goto out;
5617
5618	set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
5619
5620	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5621		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5622		if (ret)
5623			goto out;
5624	}
5625
5626out:
5627	kfree(chunk);
5628	free_extent_map(em);
5629	return ret;
5630}
5631
5632static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5633{
5634	struct btrfs_fs_info *fs_info = trans->fs_info;
5635	u64 alloc_profile;
5636	struct btrfs_block_group *meta_bg;
5637	struct btrfs_block_group *sys_bg;
5638
5639	/*
5640	 * When adding a new device for sprouting, the seed device is read-only
5641	 * so we must first allocate a metadata and a system chunk. But before
5642	 * adding the block group items to the extent, device and chunk btrees,
5643	 * we must first:
5644	 *
5645	 * 1) Create both chunks without doing any changes to the btrees, as
5646	 *    otherwise we would get -ENOSPC since the block groups from the
5647	 *    seed device are read-only;
5648	 *
5649	 * 2) Add the device item for the new sprout device - finishing the setup
5650	 *    of a new block group requires updating the device item in the chunk
5651	 *    btree, so it must exist when we attempt to do it. The previous step
5652	 *    ensures this does not fail with -ENOSPC.
5653	 *
5654	 * After that we can add the block group items to their btrees:
5655	 * update existing device item in the chunk btree, add a new block group
5656	 * item to the extent btree, add a new chunk item to the chunk btree and
5657	 * finally add the new device extent items to the devices btree.
5658	 */
5659
5660	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5661	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5662	if (IS_ERR(meta_bg))
5663		return PTR_ERR(meta_bg);
5664
5665	alloc_profile = btrfs_system_alloc_profile(fs_info);
5666	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5667	if (IS_ERR(sys_bg))
5668		return PTR_ERR(sys_bg);
5669
5670	return 0;
5671}
5672
5673static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5674{
5675	const int index = btrfs_bg_flags_to_raid_index(map->type);
5676
5677	return btrfs_raid_array[index].tolerated_failures;
5678}
5679
5680bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5681{
5682	struct extent_map *em;
5683	struct map_lookup *map;
 
5684	int miss_ndevs = 0;
5685	int i;
5686	bool ret = true;
5687
5688	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5689	if (IS_ERR(em))
5690		return false;
5691
5692	map = em->map_lookup;
5693	for (i = 0; i < map->num_stripes; i++) {
5694		if (test_bit(BTRFS_DEV_STATE_MISSING,
5695					&map->stripes[i].dev->dev_state)) {
5696			miss_ndevs++;
5697			continue;
5698		}
5699		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5700					&map->stripes[i].dev->dev_state)) {
5701			ret = false;
5702			goto end;
5703		}
5704	}
5705
5706	/*
5707	 * If the number of missing devices is larger than max errors, we can
5708	 * not write the data into that chunk successfully.
 
5709	 */
5710	if (miss_ndevs > btrfs_chunk_max_errors(map))
5711		ret = false;
5712end:
5713	free_extent_map(em);
5714	return ret;
5715}
5716
5717void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5718{
5719	struct extent_map *em;
5720
5721	while (1) {
5722		write_lock(&tree->lock);
5723		em = lookup_extent_mapping(tree, 0, (u64)-1);
5724		if (em)
5725			remove_extent_mapping(tree, em);
5726		write_unlock(&tree->lock);
5727		if (!em)
5728			break;
5729		/* once for us */
5730		free_extent_map(em);
5731		/* once for the tree */
5732		free_extent_map(em);
5733	}
5734}
5735
5736int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5737{
5738	struct extent_map *em;
5739	struct map_lookup *map;
5740	enum btrfs_raid_types index;
5741	int ret = 1;
5742
5743	em = btrfs_get_chunk_map(fs_info, logical, len);
5744	if (IS_ERR(em))
5745		/*
5746		 * We could return errors for these cases, but that could get
5747		 * ugly and we'd probably do the same thing which is just not do
5748		 * anything else and exit, so return 1 so the callers don't try
5749		 * to use other copies.
5750		 */
5751		return 1;
5752
5753	map = em->map_lookup;
5754	index = btrfs_bg_flags_to_raid_index(map->type);
5755
5756	/* Non-RAID56, use their ncopies from btrfs_raid_array. */
5757	if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5758		ret = btrfs_raid_array[index].ncopies;
5759	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5760		ret = 2;
5761	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5762		/*
5763		 * There could be two corrupted data stripes, we need
5764		 * to loop retry in order to rebuild the correct data.
5765		 *
5766		 * Fail a stripe at a time on every retry except the
5767		 * stripe under reconstruction.
5768		 */
5769		ret = map->num_stripes;
 
 
5770	free_extent_map(em);
5771
5772	down_read(&fs_info->dev_replace.rwsem);
5773	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5774	    fs_info->dev_replace.tgtdev)
5775		ret++;
5776	up_read(&fs_info->dev_replace.rwsem);
5777
5778	return ret;
5779}
5780
5781unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5782				    u64 logical)
5783{
5784	struct extent_map *em;
5785	struct map_lookup *map;
5786	unsigned long len = fs_info->sectorsize;
5787
5788	if (!btrfs_fs_incompat(fs_info, RAID56))
5789		return len;
5790
5791	em = btrfs_get_chunk_map(fs_info, logical, len);
5792
5793	if (!WARN_ON(IS_ERR(em))) {
5794		map = em->map_lookup;
5795		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5796			len = map->stripe_len * nr_data_stripes(map);
5797		free_extent_map(em);
5798	}
5799	return len;
5800}
5801
5802int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5803{
5804	struct extent_map *em;
5805	struct map_lookup *map;
5806	int ret = 0;
5807
5808	if (!btrfs_fs_incompat(fs_info, RAID56))
5809		return 0;
5810
5811	em = btrfs_get_chunk_map(fs_info, logical, len);
5812
5813	if(!WARN_ON(IS_ERR(em))) {
5814		map = em->map_lookup;
5815		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5816			ret = 1;
5817		free_extent_map(em);
5818	}
5819	return ret;
5820}
5821
5822static int find_live_mirror(struct btrfs_fs_info *fs_info,
5823			    struct map_lookup *map, int first,
5824			    int dev_replace_is_ongoing)
5825{
5826	int i;
5827	int num_stripes;
5828	int preferred_mirror;
5829	int tolerance;
5830	struct btrfs_device *srcdev;
5831
5832	ASSERT((map->type &
5833		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5834
5835	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5836		num_stripes = map->sub_stripes;
5837	else
5838		num_stripes = map->num_stripes;
5839
5840	switch (fs_info->fs_devices->read_policy) {
5841	default:
5842		/* Shouldn't happen, just warn and use pid instead of failing */
5843		btrfs_warn_rl(fs_info,
5844			      "unknown read_policy type %u, reset to pid",
5845			      fs_info->fs_devices->read_policy);
5846		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5847		fallthrough;
5848	case BTRFS_READ_POLICY_PID:
5849		preferred_mirror = first + (current->pid % num_stripes);
5850		break;
5851	}
5852
5853	if (dev_replace_is_ongoing &&
5854	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5855	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5856		srcdev = fs_info->dev_replace.srcdev;
5857	else
5858		srcdev = NULL;
5859
5860	/*
5861	 * try to avoid the drive that is the source drive for a
5862	 * dev-replace procedure, only choose it if no other non-missing
5863	 * mirror is available
5864	 */
5865	for (tolerance = 0; tolerance < 2; tolerance++) {
5866		if (map->stripes[preferred_mirror].dev->bdev &&
5867		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5868			return preferred_mirror;
5869		for (i = first; i < first + num_stripes; i++) {
5870			if (map->stripes[i].dev->bdev &&
5871			    (tolerance || map->stripes[i].dev != srcdev))
5872				return i;
5873		}
5874	}
5875
5876	/* we couldn't find one that doesn't fail.  Just return something
5877	 * and the io error handling code will clean up eventually
5878	 */
5879	return preferred_mirror;
5880}
5881
5882/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5883static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5884{
5885	int i;
5886	int again = 1;
5887
5888	while (again) {
5889		again = 0;
5890		for (i = 0; i < num_stripes - 1; i++) {
5891			/* Swap if parity is on a smaller index */
5892			if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5893				swap(bioc->stripes[i], bioc->stripes[i + 1]);
5894				swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5895				again = 1;
5896			}
5897		}
5898	}
5899}
5900
5901static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
5902						       int total_stripes,
5903						       int real_stripes)
5904{
5905	struct btrfs_io_context *bioc = kzalloc(
5906		 /* The size of btrfs_io_context */
5907		sizeof(struct btrfs_io_context) +
5908		/* Plus the variable array for the stripes */
5909		sizeof(struct btrfs_io_stripe) * (total_stripes) +
5910		/* Plus the variable array for the tgt dev */
5911		sizeof(int) * (real_stripes) +
5912		/*
5913		 * Plus the raid_map, which includes both the tgt dev
5914		 * and the stripes.
5915		 */
5916		sizeof(u64) * (total_stripes),
5917		GFP_NOFS);
5918
5919	if (!bioc)
5920		return NULL;
5921
5922	refcount_set(&bioc->refs, 1);
 
5923
5924	bioc->fs_info = fs_info;
5925	bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5926	bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5927
5928	return bioc;
5929}
5930
5931void btrfs_get_bioc(struct btrfs_io_context *bioc)
5932{
5933	WARN_ON(!refcount_read(&bioc->refs));
5934	refcount_inc(&bioc->refs);
5935}
5936
5937void btrfs_put_bioc(struct btrfs_io_context *bioc)
5938{
5939	if (!bioc)
5940		return;
5941	if (refcount_dec_and_test(&bioc->refs))
5942		kfree(bioc);
5943}
5944
 
5945/*
5946 * Please note that, discard won't be sent to target device of device
5947 * replace.
5948 */
5949struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
5950					       u64 logical, u64 *length_ret,
5951					       u32 *num_stripes)
5952{
5953	struct extent_map *em;
5954	struct map_lookup *map;
5955	struct btrfs_discard_stripe *stripes;
5956	u64 length = *length_ret;
5957	u64 offset;
5958	u64 stripe_nr;
5959	u64 stripe_nr_end;
5960	u64 stripe_end_offset;
5961	u64 stripe_cnt;
5962	u64 stripe_len;
5963	u64 stripe_offset;
 
5964	u32 stripe_index;
5965	u32 factor = 0;
5966	u32 sub_stripes = 0;
5967	u64 stripes_per_dev = 0;
5968	u32 remaining_stripes = 0;
5969	u32 last_stripe = 0;
5970	int ret;
5971	int i;
5972
 
 
 
5973	em = btrfs_get_chunk_map(fs_info, logical, length);
5974	if (IS_ERR(em))
5975		return ERR_CAST(em);
5976
5977	map = em->map_lookup;
5978
5979	/* we don't discard raid56 yet */
5980	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5981		ret = -EOPNOTSUPP;
5982		goto out_free_map;
5983}
5984
5985	offset = logical - em->start;
5986	length = min_t(u64, em->start + em->len - logical, length);
5987	*length_ret = length;
5988
5989	stripe_len = map->stripe_len;
5990	/*
5991	 * stripe_nr counts the total number of stripes we have to stride
5992	 * to get to this block
5993	 */
5994	stripe_nr = div64_u64(offset, stripe_len);
5995
5996	/* stripe_offset is the offset of this block in its stripe */
5997	stripe_offset = offset - stripe_nr * stripe_len;
5998
5999	stripe_nr_end = round_up(offset + length, map->stripe_len);
6000	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
6001	stripe_cnt = stripe_nr_end - stripe_nr;
6002	stripe_end_offset = stripe_nr_end * map->stripe_len -
6003			    (offset + length);
6004	/*
6005	 * after this, stripe_nr is the number of stripes on this
6006	 * device we have to walk to find the data, and stripe_index is
6007	 * the number of our device in the stripe array
6008	 */
6009	*num_stripes = 1;
6010	stripe_index = 0;
6011	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6012			 BTRFS_BLOCK_GROUP_RAID10)) {
6013		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6014			sub_stripes = 1;
6015		else
6016			sub_stripes = map->sub_stripes;
6017
6018		factor = map->num_stripes / sub_stripes;
6019		*num_stripes = min_t(u64, map->num_stripes,
6020				    sub_stripes * stripe_cnt);
6021		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6022		stripe_index *= sub_stripes;
6023		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6024					      &remaining_stripes);
6025		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6026		last_stripe *= sub_stripes;
6027	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6028				BTRFS_BLOCK_GROUP_DUP)) {
6029		*num_stripes = map->num_stripes;
6030	} else {
6031		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6032					&stripe_index);
6033	}
6034
6035	stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
6036	if (!stripes) {
6037		ret = -ENOMEM;
6038		goto out_free_map;
6039	}
6040
6041	for (i = 0; i < *num_stripes; i++) {
6042		stripes[i].physical =
6043			map->stripes[stripe_index].physical +
6044			stripe_offset + stripe_nr * map->stripe_len;
6045		stripes[i].dev = map->stripes[stripe_index].dev;
6046
6047		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6048				 BTRFS_BLOCK_GROUP_RAID10)) {
6049			stripes[i].length = stripes_per_dev * map->stripe_len;
 
6050
6051			if (i / sub_stripes < remaining_stripes)
6052				stripes[i].length += map->stripe_len;
 
6053
6054			/*
6055			 * Special for the first stripe and
6056			 * the last stripe:
6057			 *
6058			 * |-------|...|-------|
6059			 *     |----------|
6060			 *    off     end_off
6061			 */
6062			if (i < sub_stripes)
6063				stripes[i].length -= stripe_offset;
 
6064
6065			if (stripe_index >= last_stripe &&
6066			    stripe_index <= (last_stripe +
6067					     sub_stripes - 1))
6068				stripes[i].length -= stripe_end_offset;
 
6069
6070			if (i == sub_stripes - 1)
6071				stripe_offset = 0;
6072		} else {
6073			stripes[i].length = length;
6074		}
6075
6076		stripe_index++;
6077		if (stripe_index == map->num_stripes) {
6078			stripe_index = 0;
6079			stripe_nr++;
6080		}
6081	}
6082
 
 
 
 
6083	free_extent_map(em);
6084	return stripes;
6085out_free_map:
6086	free_extent_map(em);
6087	return ERR_PTR(ret);
6088}
6089
6090/*
6091 * In dev-replace case, for repair case (that's the only case where the mirror
6092 * is selected explicitly when calling btrfs_map_block), blocks left of the
6093 * left cursor can also be read from the target drive.
6094 *
6095 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6096 * array of stripes.
6097 * For READ, it also needs to be supported using the same mirror number.
6098 *
6099 * If the requested block is not left of the left cursor, EIO is returned. This
6100 * can happen because btrfs_num_copies() returns one more in the dev-replace
6101 * case.
6102 */
6103static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6104					 u64 logical, u64 length,
6105					 u64 srcdev_devid, int *mirror_num,
6106					 u64 *physical)
6107{
6108	struct btrfs_io_context *bioc = NULL;
6109	int num_stripes;
6110	int index_srcdev = 0;
6111	int found = 0;
6112	u64 physical_of_found = 0;
6113	int i;
6114	int ret = 0;
6115
6116	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6117				logical, &length, &bioc, NULL, NULL, 0);
6118	if (ret) {
6119		ASSERT(bioc == NULL);
6120		return ret;
6121	}
6122
6123	num_stripes = bioc->num_stripes;
6124	if (*mirror_num > num_stripes) {
6125		/*
6126		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6127		 * that means that the requested area is not left of the left
6128		 * cursor
6129		 */
6130		btrfs_put_bioc(bioc);
6131		return -EIO;
6132	}
6133
6134	/*
6135	 * process the rest of the function using the mirror_num of the source
6136	 * drive. Therefore look it up first.  At the end, patch the device
6137	 * pointer to the one of the target drive.
6138	 */
6139	for (i = 0; i < num_stripes; i++) {
6140		if (bioc->stripes[i].dev->devid != srcdev_devid)
6141			continue;
6142
6143		/*
6144		 * In case of DUP, in order to keep it simple, only add the
6145		 * mirror with the lowest physical address
6146		 */
6147		if (found &&
6148		    physical_of_found <= bioc->stripes[i].physical)
6149			continue;
6150
6151		index_srcdev = i;
6152		found = 1;
6153		physical_of_found = bioc->stripes[i].physical;
6154	}
6155
6156	btrfs_put_bioc(bioc);
6157
6158	ASSERT(found);
6159	if (!found)
6160		return -EIO;
6161
6162	*mirror_num = index_srcdev + 1;
6163	*physical = physical_of_found;
6164	return ret;
6165}
6166
6167static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6168{
6169	struct btrfs_block_group *cache;
6170	bool ret;
6171
6172	/* Non zoned filesystem does not use "to_copy" flag */
6173	if (!btrfs_is_zoned(fs_info))
6174		return false;
6175
6176	cache = btrfs_lookup_block_group(fs_info, logical);
6177
6178	ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
6179
6180	btrfs_put_block_group(cache);
6181	return ret;
6182}
6183
6184static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6185				      struct btrfs_io_context **bioc_ret,
6186				      struct btrfs_dev_replace *dev_replace,
6187				      u64 logical,
6188				      int *num_stripes_ret, int *max_errors_ret)
6189{
6190	struct btrfs_io_context *bioc = *bioc_ret;
6191	u64 srcdev_devid = dev_replace->srcdev->devid;
6192	int tgtdev_indexes = 0;
6193	int num_stripes = *num_stripes_ret;
6194	int max_errors = *max_errors_ret;
6195	int i;
6196
6197	if (op == BTRFS_MAP_WRITE) {
6198		int index_where_to_add;
6199
6200		/*
6201		 * A block group which have "to_copy" set will eventually
6202		 * copied by dev-replace process. We can avoid cloning IO here.
6203		 */
6204		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6205			return;
6206
6207		/*
6208		 * duplicate the write operations while the dev replace
6209		 * procedure is running. Since the copying of the old disk to
6210		 * the new disk takes place at run time while the filesystem is
6211		 * mounted writable, the regular write operations to the old
6212		 * disk have to be duplicated to go to the new disk as well.
6213		 *
6214		 * Note that device->missing is handled by the caller, and that
6215		 * the write to the old disk is already set up in the stripes
6216		 * array.
6217		 */
6218		index_where_to_add = num_stripes;
6219		for (i = 0; i < num_stripes; i++) {
6220			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6221				/* write to new disk, too */
6222				struct btrfs_io_stripe *new =
6223					bioc->stripes + index_where_to_add;
6224				struct btrfs_io_stripe *old =
6225					bioc->stripes + i;
6226
6227				new->physical = old->physical;
 
6228				new->dev = dev_replace->tgtdev;
6229				bioc->tgtdev_map[i] = index_where_to_add;
6230				index_where_to_add++;
6231				max_errors++;
6232				tgtdev_indexes++;
6233			}
6234		}
6235		num_stripes = index_where_to_add;
6236	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6237		int index_srcdev = 0;
6238		int found = 0;
6239		u64 physical_of_found = 0;
6240
6241		/*
6242		 * During the dev-replace procedure, the target drive can also
6243		 * be used to read data in case it is needed to repair a corrupt
6244		 * block elsewhere. This is possible if the requested area is
6245		 * left of the left cursor. In this area, the target drive is a
6246		 * full copy of the source drive.
6247		 */
6248		for (i = 0; i < num_stripes; i++) {
6249			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6250				/*
6251				 * In case of DUP, in order to keep it simple,
6252				 * only add the mirror with the lowest physical
6253				 * address
6254				 */
6255				if (found &&
6256				    physical_of_found <= bioc->stripes[i].physical)
 
6257					continue;
6258				index_srcdev = i;
6259				found = 1;
6260				physical_of_found = bioc->stripes[i].physical;
6261			}
6262		}
6263		if (found) {
6264			struct btrfs_io_stripe *tgtdev_stripe =
6265				bioc->stripes + num_stripes;
6266
6267			tgtdev_stripe->physical = physical_of_found;
 
 
6268			tgtdev_stripe->dev = dev_replace->tgtdev;
6269			bioc->tgtdev_map[index_srcdev] = num_stripes;
6270
6271			tgtdev_indexes++;
6272			num_stripes++;
6273		}
6274	}
6275
6276	*num_stripes_ret = num_stripes;
6277	*max_errors_ret = max_errors;
6278	bioc->num_tgtdevs = tgtdev_indexes;
6279	*bioc_ret = bioc;
6280}
6281
6282static bool need_full_stripe(enum btrfs_map_op op)
6283{
6284	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6285}
6286
6287/*
6288 * Calculate the geometry of a particular (address, len) tuple. This
6289 * information is used to calculate how big a particular bio can get before it
6290 * straddles a stripe.
6291 *
6292 * @fs_info: the filesystem
6293 * @em:      mapping containing the logical extent
6294 * @op:      type of operation - write or read
6295 * @logical: address that we want to figure out the geometry of
6296 * @io_geom: pointer used to return values
6297 *
6298 * Returns < 0 in case a chunk for the given logical address cannot be found,
6299 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6300 */
6301int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6302			  enum btrfs_map_op op, u64 logical,
6303			  struct btrfs_io_geometry *io_geom)
6304{
 
6305	struct map_lookup *map;
6306	u64 len;
6307	u64 offset;
6308	u64 stripe_offset;
6309	u64 stripe_nr;
6310	u32 stripe_len;
6311	u64 raid56_full_stripe_start = (u64)-1;
6312	int data_stripes;
 
6313
6314	ASSERT(op != BTRFS_MAP_DISCARD);
6315
 
 
 
 
6316	map = em->map_lookup;
6317	/* Offset of this logical address in the chunk */
6318	offset = logical - em->start;
6319	/* Len of a stripe in a chunk */
6320	stripe_len = map->stripe_len;
6321	/*
6322	 * Stripe_nr is where this block falls in
6323	 * stripe_offset is the offset of this block in its stripe.
6324	 */
6325	stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset);
6326	ASSERT(stripe_offset < U32_MAX);
 
 
 
 
 
6327
 
 
6328	data_stripes = nr_data_stripes(map);
6329
6330	/* Only stripe based profiles needs to check against stripe length. */
6331	if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) {
6332		u64 max_len = stripe_len - stripe_offset;
6333
6334		/*
6335		 * In case of raid56, we need to know the stripe aligned start
6336		 */
6337		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6338			unsigned long full_stripe_len = stripe_len * data_stripes;
6339			raid56_full_stripe_start = offset;
6340
6341			/*
6342			 * Allow a write of a full stripe, but make sure we
6343			 * don't allow straddling of stripes
6344			 */
6345			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6346					full_stripe_len);
6347			raid56_full_stripe_start *= full_stripe_len;
6348
6349			/*
6350			 * For writes to RAID[56], allow a full stripeset across
6351			 * all disks. For other RAID types and for RAID[56]
6352			 * reads, just allow a single stripe (on a single disk).
6353			 */
6354			if (op == BTRFS_MAP_WRITE) {
6355				max_len = stripe_len * data_stripes -
6356					  (offset - raid56_full_stripe_start);
6357			}
6358		}
6359		len = min_t(u64, em->len - offset, max_len);
6360	} else {
6361		len = em->len - offset;
6362	}
6363
6364	io_geom->len = len;
6365	io_geom->offset = offset;
6366	io_geom->stripe_len = stripe_len;
6367	io_geom->stripe_nr = stripe_nr;
6368	io_geom->stripe_offset = stripe_offset;
6369	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6370
6371	return 0;
6372}
6373
6374static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
6375		          u32 stripe_index, u64 stripe_offset, u64 stripe_nr)
6376{
6377	dst->dev = map->stripes[stripe_index].dev;
6378	dst->physical = map->stripes[stripe_index].physical +
6379			stripe_offset + stripe_nr * map->stripe_len;
6380}
6381
6382int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6383		      u64 logical, u64 *length,
6384		      struct btrfs_io_context **bioc_ret,
6385		      struct btrfs_io_stripe *smap, int *mirror_num_ret,
6386		      int need_raid_map)
6387{
6388	struct extent_map *em;
6389	struct map_lookup *map;
6390	u64 stripe_offset;
6391	u64 stripe_nr;
6392	u64 stripe_len;
6393	u32 stripe_index;
6394	int data_stripes;
6395	int i;
6396	int ret = 0;
6397	int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
6398	int num_stripes;
6399	int max_errors = 0;
6400	int tgtdev_indexes = 0;
6401	struct btrfs_io_context *bioc = NULL;
6402	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6403	int dev_replace_is_ongoing = 0;
6404	int num_alloc_stripes;
6405	int patch_the_first_stripe_for_dev_replace = 0;
6406	u64 physical_to_patch_in_first_stripe = 0;
6407	u64 raid56_full_stripe_start = (u64)-1;
6408	struct btrfs_io_geometry geom;
6409
6410	ASSERT(bioc_ret);
6411	ASSERT(op != BTRFS_MAP_DISCARD);
6412
6413	em = btrfs_get_chunk_map(fs_info, logical, *length);
6414	ASSERT(!IS_ERR(em));
6415
6416	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6417	if (ret < 0)
6418		return ret;
6419
 
 
6420	map = em->map_lookup;
6421
6422	*length = geom.len;
6423	stripe_len = geom.stripe_len;
6424	stripe_nr = geom.stripe_nr;
6425	stripe_offset = geom.stripe_offset;
6426	raid56_full_stripe_start = geom.raid56_stripe_offset;
6427	data_stripes = nr_data_stripes(map);
6428
6429	down_read(&dev_replace->rwsem);
6430	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6431	/*
6432	 * Hold the semaphore for read during the whole operation, write is
6433	 * requested at commit time but must wait.
6434	 */
6435	if (!dev_replace_is_ongoing)
6436		up_read(&dev_replace->rwsem);
6437
6438	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6439	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6440		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6441						    dev_replace->srcdev->devid,
6442						    &mirror_num,
6443					    &physical_to_patch_in_first_stripe);
6444		if (ret)
6445			goto out;
6446		else
6447			patch_the_first_stripe_for_dev_replace = 1;
6448	} else if (mirror_num > map->num_stripes) {
6449		mirror_num = 0;
6450	}
6451
6452	num_stripes = 1;
6453	stripe_index = 0;
6454	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6455		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6456				&stripe_index);
6457		if (!need_full_stripe(op))
6458			mirror_num = 1;
6459	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6460		if (need_full_stripe(op))
6461			num_stripes = map->num_stripes;
6462		else if (mirror_num)
6463			stripe_index = mirror_num - 1;
6464		else {
6465			stripe_index = find_live_mirror(fs_info, map, 0,
6466					    dev_replace_is_ongoing);
6467			mirror_num = stripe_index + 1;
6468		}
6469
6470	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6471		if (need_full_stripe(op)) {
6472			num_stripes = map->num_stripes;
6473		} else if (mirror_num) {
6474			stripe_index = mirror_num - 1;
6475		} else {
6476			mirror_num = 1;
6477		}
6478
6479	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6480		u32 factor = map->num_stripes / map->sub_stripes;
6481
6482		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6483		stripe_index *= map->sub_stripes;
6484
6485		if (need_full_stripe(op))
6486			num_stripes = map->sub_stripes;
6487		else if (mirror_num)
6488			stripe_index += mirror_num - 1;
6489		else {
6490			int old_stripe_index = stripe_index;
6491			stripe_index = find_live_mirror(fs_info, map,
6492					      stripe_index,
6493					      dev_replace_is_ongoing);
6494			mirror_num = stripe_index - old_stripe_index + 1;
6495		}
6496
6497	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6498		ASSERT(map->stripe_len == BTRFS_STRIPE_LEN);
6499		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6500			/* push stripe_nr back to the start of the full stripe */
6501			stripe_nr = div64_u64(raid56_full_stripe_start,
6502					stripe_len * data_stripes);
6503
6504			/* RAID[56] write or recovery. Return all stripes */
6505			num_stripes = map->num_stripes;
6506			max_errors = btrfs_chunk_max_errors(map);
6507
6508			/* Return the length to the full stripe end */
6509			*length = min(logical + *length,
6510				      raid56_full_stripe_start + em->start +
6511				      data_stripes * stripe_len) - logical;
6512			stripe_index = 0;
6513			stripe_offset = 0;
6514		} else {
6515			/*
6516			 * Mirror #0 or #1 means the original data block.
6517			 * Mirror #2 is RAID5 parity block.
6518			 * Mirror #3 is RAID6 Q block.
6519			 */
6520			stripe_nr = div_u64_rem(stripe_nr,
6521					data_stripes, &stripe_index);
6522			if (mirror_num > 1)
6523				stripe_index = data_stripes + mirror_num - 2;
6524
6525			/* We distribute the parity blocks across stripes */
6526			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6527					&stripe_index);
6528			if (!need_full_stripe(op) && mirror_num <= 1)
6529				mirror_num = 1;
6530		}
6531	} else {
6532		/*
6533		 * after this, stripe_nr is the number of stripes on this
6534		 * device we have to walk to find the data, and stripe_index is
6535		 * the number of our device in the stripe array
6536		 */
6537		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6538				&stripe_index);
6539		mirror_num = stripe_index + 1;
6540	}
6541	if (stripe_index >= map->num_stripes) {
6542		btrfs_crit(fs_info,
6543			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6544			   stripe_index, map->num_stripes);
6545		ret = -EINVAL;
6546		goto out;
6547	}
6548
6549	num_alloc_stripes = num_stripes;
6550	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6551		if (op == BTRFS_MAP_WRITE)
6552			num_alloc_stripes <<= 1;
6553		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6554			num_alloc_stripes++;
6555		tgtdev_indexes = num_stripes;
6556	}
6557
6558	/*
6559	 * If this I/O maps to a single device, try to return the device and
6560	 * physical block information on the stack instead of allocating an
6561	 * I/O context structure.
6562	 */
6563	if (smap && num_alloc_stripes == 1 &&
6564	    !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) &&
6565	    (!need_full_stripe(op) || !dev_replace_is_ongoing ||
6566	     !dev_replace->tgtdev)) {
6567		if (patch_the_first_stripe_for_dev_replace) {
6568			smap->dev = dev_replace->tgtdev;
6569			smap->physical = physical_to_patch_in_first_stripe;
6570			*mirror_num_ret = map->num_stripes + 1;
6571		} else {
6572			set_io_stripe(smap, map, stripe_index, stripe_offset,
6573				      stripe_nr);
6574			*mirror_num_ret = mirror_num;
6575		}
6576		*bioc_ret = NULL;
6577		ret = 0;
6578		goto out;
6579	}
6580
6581	bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
6582	if (!bioc) {
6583		ret = -ENOMEM;
6584		goto out;
6585	}
6586
6587	for (i = 0; i < num_stripes; i++) {
6588		set_io_stripe(&bioc->stripes[i], map, stripe_index, stripe_offset,
6589			      stripe_nr);
 
6590		stripe_index++;
6591	}
6592
6593	/* Build raid_map */
6594	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6595	    (need_full_stripe(op) || mirror_num > 1)) {
6596		u64 tmp;
6597		unsigned rot;
6598
6599		/* Work out the disk rotation on this stripe-set */
6600		div_u64_rem(stripe_nr, num_stripes, &rot);
6601
6602		/* Fill in the logical address of each stripe */
6603		tmp = stripe_nr * data_stripes;
6604		for (i = 0; i < data_stripes; i++)
6605			bioc->raid_map[(i + rot) % num_stripes] =
6606				em->start + (tmp + i) * map->stripe_len;
6607
6608		bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6609		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6610			bioc->raid_map[(i + rot + 1) % num_stripes] =
6611				RAID6_Q_STRIPE;
6612
6613		sort_parity_stripes(bioc, num_stripes);
6614	}
6615
6616	if (need_full_stripe(op))
6617		max_errors = btrfs_chunk_max_errors(map);
6618
6619	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6620	    need_full_stripe(op)) {
6621		handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6622					  &num_stripes, &max_errors);
6623	}
6624
6625	*bioc_ret = bioc;
6626	bioc->map_type = map->type;
6627	bioc->num_stripes = num_stripes;
6628	bioc->max_errors = max_errors;
6629	bioc->mirror_num = mirror_num;
6630
6631	/*
6632	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6633	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6634	 * available as a mirror
6635	 */
6636	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6637		WARN_ON(num_stripes > 1);
6638		bioc->stripes[0].dev = dev_replace->tgtdev;
6639		bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6640		bioc->mirror_num = map->num_stripes + 1;
6641	}
6642out:
6643	if (dev_replace_is_ongoing) {
6644		lockdep_assert_held(&dev_replace->rwsem);
6645		/* Unlock and let waiting writers proceed */
6646		up_read(&dev_replace->rwsem);
6647	}
6648	free_extent_map(em);
6649	return ret;
6650}
6651
6652int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6653		      u64 logical, u64 *length,
6654		      struct btrfs_io_context **bioc_ret, int mirror_num)
6655{
6656	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6657				 NULL, &mirror_num, 0);
 
 
 
 
6658}
6659
6660/* For Scrub/replace */
6661int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6662		     u64 logical, u64 *length,
6663		     struct btrfs_io_context **bioc_ret)
6664{
6665	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6666				 NULL, NULL, 1);
6667}
6668
6669static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6670				      const struct btrfs_fs_devices *fs_devices)
6671{
6672	if (args->fsid == NULL)
6673		return true;
6674	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6675		return true;
6676	return false;
6677}
6678
6679static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6680				  const struct btrfs_device *device)
6681{
6682	if (args->missing) {
6683		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6684		    !device->bdev)
6685			return true;
6686		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6687	}
6688
6689	if (device->devid != args->devid)
6690		return false;
6691	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6692		return false;
6693	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6694}
6695
6696/*
6697 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6698 * return NULL.
6699 *
6700 * If devid and uuid are both specified, the match must be exact, otherwise
6701 * only devid is used.
 
 
6702 */
6703struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6704				       const struct btrfs_dev_lookup_args *args)
 
6705{
6706	struct btrfs_device *device;
6707	struct btrfs_fs_devices *seed_devs;
6708
6709	if (dev_args_match_fs_devices(args, fs_devices)) {
6710		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6711			if (dev_args_match_device(args, device))
6712				return device;
6713		}
6714	}
6715
6716	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6717		if (!dev_args_match_fs_devices(args, seed_devs))
6718			continue;
6719		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6720			if (dev_args_match_device(args, device))
6721				return device;
6722		}
 
 
 
 
6723	}
6724
6725	return NULL;
6726}
6727
6728static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6729					    u64 devid, u8 *dev_uuid)
6730{
6731	struct btrfs_device *device;
6732	unsigned int nofs_flag;
6733
6734	/*
6735	 * We call this under the chunk_mutex, so we want to use NOFS for this
6736	 * allocation, however we don't want to change btrfs_alloc_device() to
6737	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6738	 * places.
6739	 */
6740
6741	nofs_flag = memalloc_nofs_save();
6742	device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL);
6743	memalloc_nofs_restore(nofs_flag);
6744	if (IS_ERR(device))
6745		return device;
6746
6747	list_add(&device->dev_list, &fs_devices->devices);
6748	device->fs_devices = fs_devices;
6749	fs_devices->num_devices++;
6750
6751	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6752	fs_devices->missing_devices++;
6753
6754	return device;
6755}
6756
6757/*
6758 * Allocate new device struct, set up devid and UUID.
6759 *
6760 * @fs_info:	used only for generating a new devid, can be NULL if
6761 *		devid is provided (i.e. @devid != NULL).
6762 * @devid:	a pointer to devid for this device.  If NULL a new devid
6763 *		is generated.
6764 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6765 *		is generated.
6766 * @path:	a pointer to device path if available, NULL otherwise.
6767 *
6768 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6769 * on error.  Returned struct is not linked onto any lists and must be
6770 * destroyed with btrfs_free_device.
6771 */
6772struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6773					const u64 *devid, const u8 *uuid,
6774					const char *path)
6775{
6776	struct btrfs_device *dev;
6777	u64 tmp;
6778
6779	if (WARN_ON(!devid && !fs_info))
6780		return ERR_PTR(-EINVAL);
6781
6782	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6783	if (!dev)
6784		return ERR_PTR(-ENOMEM);
6785
6786	INIT_LIST_HEAD(&dev->dev_list);
6787	INIT_LIST_HEAD(&dev->dev_alloc_list);
6788	INIT_LIST_HEAD(&dev->post_commit_list);
6789
6790	atomic_set(&dev->dev_stats_ccnt, 0);
6791	btrfs_device_data_ordered_init(dev);
6792	extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
6793
6794	if (devid)
6795		tmp = *devid;
6796	else {
6797		int ret;
6798
6799		ret = find_next_devid(fs_info, &tmp);
6800		if (ret) {
6801			btrfs_free_device(dev);
6802			return ERR_PTR(ret);
6803		}
6804	}
6805	dev->devid = tmp;
6806
6807	if (uuid)
6808		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6809	else
6810		generate_random_uuid(dev->uuid);
6811
6812	if (path) {
6813		struct rcu_string *name;
6814
6815		name = rcu_string_strdup(path, GFP_KERNEL);
6816		if (!name) {
6817			btrfs_free_device(dev);
6818			return ERR_PTR(-ENOMEM);
6819		}
6820		rcu_assign_pointer(dev->name, name);
6821	}
6822
6823	return dev;
6824}
6825
6826static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6827					u64 devid, u8 *uuid, bool error)
6828{
6829	if (error)
6830		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6831			      devid, uuid);
6832	else
6833		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6834			      devid, uuid);
6835}
6836
6837u64 btrfs_calc_stripe_length(const struct extent_map *em)
6838{
6839	const struct map_lookup *map = em->map_lookup;
6840	const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
6841
6842	return div_u64(em->len, data_stripes);
6843}
6844
6845#if BITS_PER_LONG == 32
6846/*
6847 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6848 * can't be accessed on 32bit systems.
6849 *
6850 * This function do mount time check to reject the fs if it already has
6851 * metadata chunk beyond that limit.
6852 */
6853static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6854				  u64 logical, u64 length, u64 type)
6855{
6856	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6857		return 0;
6858
6859	if (logical + length < MAX_LFS_FILESIZE)
6860		return 0;
6861
6862	btrfs_err_32bit_limit(fs_info);
6863	return -EOVERFLOW;
6864}
6865
6866/*
6867 * This is to give early warning for any metadata chunk reaching
6868 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6869 * Although we can still access the metadata, it's not going to be possible
6870 * once the limit is reached.
6871 */
6872static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6873				  u64 logical, u64 length, u64 type)
6874{
6875	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6876		return;
6877
6878	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6879		return;
6880
6881	btrfs_warn_32bit_limit(fs_info);
6882}
6883#endif
6884
6885static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
6886						  u64 devid, u8 *uuid)
6887{
6888	struct btrfs_device *dev;
6889
6890	if (!btrfs_test_opt(fs_info, DEGRADED)) {
6891		btrfs_report_missing_device(fs_info, devid, uuid, true);
6892		return ERR_PTR(-ENOENT);
6893	}
6894
6895	dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
6896	if (IS_ERR(dev)) {
6897		btrfs_err(fs_info, "failed to init missing device %llu: %ld",
6898			  devid, PTR_ERR(dev));
6899		return dev;
6900	}
6901	btrfs_report_missing_device(fs_info, devid, uuid, false);
6902
6903	return dev;
6904}
6905
6906static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6907			  struct btrfs_chunk *chunk)
6908{
6909	BTRFS_DEV_LOOKUP_ARGS(args);
6910	struct btrfs_fs_info *fs_info = leaf->fs_info;
6911	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6912	struct map_lookup *map;
6913	struct extent_map *em;
6914	u64 logical;
6915	u64 length;
6916	u64 devid;
6917	u64 type;
6918	u8 uuid[BTRFS_UUID_SIZE];
6919	int index;
6920	int num_stripes;
6921	int ret;
6922	int i;
6923
6924	logical = key->offset;
6925	length = btrfs_chunk_length(leaf, chunk);
6926	type = btrfs_chunk_type(leaf, chunk);
6927	index = btrfs_bg_flags_to_raid_index(type);
6928	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6929
6930#if BITS_PER_LONG == 32
6931	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
6932	if (ret < 0)
6933		return ret;
6934	warn_32bit_meta_chunk(fs_info, logical, length, type);
6935#endif
6936
6937	/*
6938	 * Only need to verify chunk item if we're reading from sys chunk array,
6939	 * as chunk item in tree block is already verified by tree-checker.
6940	 */
6941	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6942		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6943		if (ret)
6944			return ret;
6945	}
6946
6947	read_lock(&map_tree->lock);
6948	em = lookup_extent_mapping(map_tree, logical, 1);
6949	read_unlock(&map_tree->lock);
6950
6951	/* already mapped? */
6952	if (em && em->start <= logical && em->start + em->len > logical) {
6953		free_extent_map(em);
6954		return 0;
6955	} else if (em) {
6956		free_extent_map(em);
6957	}
6958
6959	em = alloc_extent_map();
6960	if (!em)
6961		return -ENOMEM;
6962	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6963	if (!map) {
6964		free_extent_map(em);
6965		return -ENOMEM;
6966	}
6967
6968	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6969	em->map_lookup = map;
6970	em->start = logical;
6971	em->len = length;
6972	em->orig_start = 0;
6973	em->block_start = 0;
6974	em->block_len = em->len;
6975
6976	map->num_stripes = num_stripes;
6977	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6978	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6979	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6980	map->type = type;
6981	/*
6982	 * We can't use the sub_stripes value, as for profiles other than
6983	 * RAID10, they may have 0 as sub_stripes for filesystems created by
6984	 * older mkfs (<v5.4).
6985	 * In that case, it can cause divide-by-zero errors later.
6986	 * Since currently sub_stripes is fixed for each profile, let's
6987	 * use the trusted value instead.
6988	 */
6989	map->sub_stripes = btrfs_raid_array[index].sub_stripes;
6990	map->verified_stripes = 0;
6991	em->orig_block_len = btrfs_calc_stripe_length(em);
 
6992	for (i = 0; i < num_stripes; i++) {
6993		map->stripes[i].physical =
6994			btrfs_stripe_offset_nr(leaf, chunk, i);
6995		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6996		args.devid = devid;
6997		read_extent_buffer(leaf, uuid, (unsigned long)
6998				   btrfs_stripe_dev_uuid_nr(chunk, i),
6999				   BTRFS_UUID_SIZE);
7000		args.uuid = uuid;
7001		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
 
 
 
 
 
 
7002		if (!map->stripes[i].dev) {
7003			map->stripes[i].dev = handle_missing_device(fs_info,
7004								    devid, uuid);
 
7005			if (IS_ERR(map->stripes[i].dev)) {
7006				ret = PTR_ERR(map->stripes[i].dev);
7007				free_extent_map(em);
7008				return ret;
 
 
 
7009			}
 
7010		}
7011
7012		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7013				&(map->stripes[i].dev->dev_state));
 
7014	}
7015
7016	write_lock(&map_tree->lock);
7017	ret = add_extent_mapping(map_tree, em, 0);
7018	write_unlock(&map_tree->lock);
7019	if (ret < 0) {
7020		btrfs_err(fs_info,
7021			  "failed to add chunk map, start=%llu len=%llu: %d",
7022			  em->start, em->len, ret);
7023	}
7024	free_extent_map(em);
7025
7026	return ret;
7027}
7028
7029static void fill_device_from_item(struct extent_buffer *leaf,
7030				 struct btrfs_dev_item *dev_item,
7031				 struct btrfs_device *device)
7032{
7033	unsigned long ptr;
7034
7035	device->devid = btrfs_device_id(leaf, dev_item);
7036	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7037	device->total_bytes = device->disk_total_bytes;
7038	device->commit_total_bytes = device->disk_total_bytes;
7039	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7040	device->commit_bytes_used = device->bytes_used;
7041	device->type = btrfs_device_type(leaf, dev_item);
7042	device->io_align = btrfs_device_io_align(leaf, dev_item);
7043	device->io_width = btrfs_device_io_width(leaf, dev_item);
7044	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7045	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7046	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7047
7048	ptr = btrfs_device_uuid(dev_item);
7049	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7050}
7051
7052static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7053						  u8 *fsid)
7054{
7055	struct btrfs_fs_devices *fs_devices;
7056	int ret;
7057
7058	lockdep_assert_held(&uuid_mutex);
7059	ASSERT(fsid);
7060
7061	/* This will match only for multi-device seed fs */
7062	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7063		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7064			return fs_devices;
7065
 
 
7066
7067	fs_devices = find_fsid(fsid, NULL);
7068	if (!fs_devices) {
7069		if (!btrfs_test_opt(fs_info, DEGRADED))
7070			return ERR_PTR(-ENOENT);
7071
7072		fs_devices = alloc_fs_devices(fsid, NULL);
7073		if (IS_ERR(fs_devices))
7074			return fs_devices;
7075
7076		fs_devices->seeding = true;
7077		fs_devices->opened = 1;
7078		return fs_devices;
7079	}
7080
7081	/*
7082	 * Upon first call for a seed fs fsid, just create a private copy of the
7083	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7084	 */
7085	fs_devices = clone_fs_devices(fs_devices);
7086	if (IS_ERR(fs_devices))
7087		return fs_devices;
7088
7089	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7090	if (ret) {
7091		free_fs_devices(fs_devices);
7092		return ERR_PTR(ret);
 
7093	}
7094
7095	if (!fs_devices->seeding) {
7096		close_fs_devices(fs_devices);
7097		free_fs_devices(fs_devices);
7098		return ERR_PTR(-EINVAL);
 
7099	}
7100
7101	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7102
 
7103	return fs_devices;
7104}
7105
7106static int read_one_dev(struct extent_buffer *leaf,
7107			struct btrfs_dev_item *dev_item)
7108{
7109	BTRFS_DEV_LOOKUP_ARGS(args);
7110	struct btrfs_fs_info *fs_info = leaf->fs_info;
7111	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7112	struct btrfs_device *device;
7113	u64 devid;
7114	int ret;
7115	u8 fs_uuid[BTRFS_FSID_SIZE];
7116	u8 dev_uuid[BTRFS_UUID_SIZE];
7117
7118	devid = btrfs_device_id(leaf, dev_item);
7119	args.devid = devid;
7120	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7121			   BTRFS_UUID_SIZE);
7122	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7123			   BTRFS_FSID_SIZE);
7124	args.uuid = dev_uuid;
7125	args.fsid = fs_uuid;
7126
7127	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7128		fs_devices = open_seed_devices(fs_info, fs_uuid);
7129		if (IS_ERR(fs_devices))
7130			return PTR_ERR(fs_devices);
7131	}
7132
7133	device = btrfs_find_device(fs_info->fs_devices, &args);
 
7134	if (!device) {
7135		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7136			btrfs_report_missing_device(fs_info, devid,
7137							dev_uuid, true);
7138			return -ENOENT;
7139		}
7140
7141		device = add_missing_dev(fs_devices, devid, dev_uuid);
7142		if (IS_ERR(device)) {
7143			btrfs_err(fs_info,
7144				"failed to add missing dev %llu: %ld",
7145				devid, PTR_ERR(device));
7146			return PTR_ERR(device);
7147		}
7148		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7149	} else {
7150		if (!device->bdev) {
7151			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7152				btrfs_report_missing_device(fs_info,
7153						devid, dev_uuid, true);
7154				return -ENOENT;
7155			}
7156			btrfs_report_missing_device(fs_info, devid,
7157							dev_uuid, false);
7158		}
7159
7160		if (!device->bdev &&
7161		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7162			/*
7163			 * this happens when a device that was properly setup
7164			 * in the device info lists suddenly goes bad.
7165			 * device->bdev is NULL, and so we have to set
7166			 * device->missing to one here
7167			 */
7168			device->fs_devices->missing_devices++;
7169			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7170		}
7171
7172		/* Move the device to its own fs_devices */
7173		if (device->fs_devices != fs_devices) {
7174			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7175							&device->dev_state));
7176
7177			list_move(&device->dev_list, &fs_devices->devices);
7178			device->fs_devices->num_devices--;
7179			fs_devices->num_devices++;
7180
7181			device->fs_devices->missing_devices--;
7182			fs_devices->missing_devices++;
7183
7184			device->fs_devices = fs_devices;
7185		}
7186	}
7187
7188	if (device->fs_devices != fs_info->fs_devices) {
7189		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7190		if (device->generation !=
7191		    btrfs_device_generation(leaf, dev_item))
7192			return -EINVAL;
7193	}
7194
7195	fill_device_from_item(leaf, dev_item, device);
7196	if (device->bdev) {
7197		u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7198
7199		if (device->total_bytes > max_total_bytes) {
7200			btrfs_err(fs_info,
7201			"device total_bytes should be at most %llu but found %llu",
7202				  max_total_bytes, device->total_bytes);
7203			return -EINVAL;
7204		}
7205	}
7206	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7207	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7208	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7209		device->fs_devices->total_rw_bytes += device->total_bytes;
7210		atomic64_add(device->total_bytes - device->bytes_used,
7211				&fs_info->free_chunk_space);
7212	}
7213	ret = 0;
7214	return ret;
7215}
7216
7217int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7218{
 
7219	struct btrfs_super_block *super_copy = fs_info->super_copy;
7220	struct extent_buffer *sb;
7221	struct btrfs_disk_key *disk_key;
7222	struct btrfs_chunk *chunk;
7223	u8 *array_ptr;
7224	unsigned long sb_array_offset;
7225	int ret = 0;
7226	u32 num_stripes;
7227	u32 array_size;
7228	u32 len = 0;
7229	u32 cur_offset;
7230	u64 type;
7231	struct btrfs_key key;
7232
7233	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7234
7235	/*
7236	 * We allocated a dummy extent, just to use extent buffer accessors.
7237	 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7238	 * that's fine, we will not go beyond system chunk array anyway.
7239	 */
7240	sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7241	if (!sb)
7242		return -ENOMEM;
7243	set_extent_buffer_uptodate(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7244
7245	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7246	array_size = btrfs_super_sys_array_size(super_copy);
7247
7248	array_ptr = super_copy->sys_chunk_array;
7249	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7250	cur_offset = 0;
7251
7252	while (cur_offset < array_size) {
7253		disk_key = (struct btrfs_disk_key *)array_ptr;
7254		len = sizeof(*disk_key);
7255		if (cur_offset + len > array_size)
7256			goto out_short_read;
7257
7258		btrfs_disk_key_to_cpu(&key, disk_key);
7259
7260		array_ptr += len;
7261		sb_array_offset += len;
7262		cur_offset += len;
7263
7264		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7265			btrfs_err(fs_info,
7266			    "unexpected item type %u in sys_array at offset %u",
7267				  (u32)key.type, cur_offset);
7268			ret = -EIO;
7269			break;
7270		}
7271
7272		chunk = (struct btrfs_chunk *)sb_array_offset;
7273		/*
7274		 * At least one btrfs_chunk with one stripe must be present,
7275		 * exact stripe count check comes afterwards
7276		 */
7277		len = btrfs_chunk_item_size(1);
7278		if (cur_offset + len > array_size)
7279			goto out_short_read;
7280
7281		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7282		if (!num_stripes) {
7283			btrfs_err(fs_info,
7284			"invalid number of stripes %u in sys_array at offset %u",
7285				  num_stripes, cur_offset);
7286			ret = -EIO;
7287			break;
7288		}
7289
7290		type = btrfs_chunk_type(sb, chunk);
7291		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7292			btrfs_err(fs_info,
7293			"invalid chunk type %llu in sys_array at offset %u",
7294				  type, cur_offset);
7295			ret = -EIO;
7296			break;
7297		}
7298
7299		len = btrfs_chunk_item_size(num_stripes);
7300		if (cur_offset + len > array_size)
7301			goto out_short_read;
7302
7303		ret = read_one_chunk(&key, sb, chunk);
7304		if (ret)
7305			break;
7306
7307		array_ptr += len;
7308		sb_array_offset += len;
7309		cur_offset += len;
7310	}
7311	clear_extent_buffer_uptodate(sb);
7312	free_extent_buffer_stale(sb);
7313	return ret;
7314
7315out_short_read:
7316	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7317			len, cur_offset);
7318	clear_extent_buffer_uptodate(sb);
7319	free_extent_buffer_stale(sb);
7320	return -EIO;
7321}
7322
7323/*
7324 * Check if all chunks in the fs are OK for read-write degraded mount
7325 *
7326 * If the @failing_dev is specified, it's accounted as missing.
7327 *
7328 * Return true if all chunks meet the minimal RW mount requirements.
7329 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7330 */
7331bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7332					struct btrfs_device *failing_dev)
7333{
7334	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7335	struct extent_map *em;
7336	u64 next_start = 0;
7337	bool ret = true;
7338
7339	read_lock(&map_tree->lock);
7340	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7341	read_unlock(&map_tree->lock);
7342	/* No chunk at all? Return false anyway */
7343	if (!em) {
7344		ret = false;
7345		goto out;
7346	}
7347	while (em) {
7348		struct map_lookup *map;
7349		int missing = 0;
7350		int max_tolerated;
7351		int i;
7352
7353		map = em->map_lookup;
7354		max_tolerated =
7355			btrfs_get_num_tolerated_disk_barrier_failures(
7356					map->type);
7357		for (i = 0; i < map->num_stripes; i++) {
7358			struct btrfs_device *dev = map->stripes[i].dev;
7359
7360			if (!dev || !dev->bdev ||
7361			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7362			    dev->last_flush_error)
7363				missing++;
7364			else if (failing_dev && failing_dev == dev)
7365				missing++;
7366		}
7367		if (missing > max_tolerated) {
7368			if (!failing_dev)
7369				btrfs_warn(fs_info,
7370	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7371				   em->start, missing, max_tolerated);
7372			free_extent_map(em);
7373			ret = false;
7374			goto out;
7375		}
7376		next_start = extent_map_end(em);
7377		free_extent_map(em);
7378
7379		read_lock(&map_tree->lock);
7380		em = lookup_extent_mapping(map_tree, next_start,
7381					   (u64)(-1) - next_start);
7382		read_unlock(&map_tree->lock);
7383	}
7384out:
7385	return ret;
7386}
7387
7388static void readahead_tree_node_children(struct extent_buffer *node)
7389{
7390	int i;
7391	const int nr_items = btrfs_header_nritems(node);
7392
7393	for (i = 0; i < nr_items; i++)
7394		btrfs_readahead_node_child(node, i);
 
 
 
 
7395}
7396
7397int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7398{
7399	struct btrfs_root *root = fs_info->chunk_root;
7400	struct btrfs_path *path;
7401	struct extent_buffer *leaf;
7402	struct btrfs_key key;
7403	struct btrfs_key found_key;
7404	int ret;
7405	int slot;
7406	int iter_ret = 0;
7407	u64 total_dev = 0;
7408	u64 last_ra_node = 0;
7409
7410	path = btrfs_alloc_path();
7411	if (!path)
7412		return -ENOMEM;
7413
7414	/*
7415	 * uuid_mutex is needed only if we are mounting a sprout FS
7416	 * otherwise we don't need it.
7417	 */
7418	mutex_lock(&uuid_mutex);
7419
7420	/*
7421	 * It is possible for mount and umount to race in such a way that
7422	 * we execute this code path, but open_fs_devices failed to clear
7423	 * total_rw_bytes. We certainly want it cleared before reading the
7424	 * device items, so clear it here.
7425	 */
7426	fs_info->fs_devices->total_rw_bytes = 0;
7427
7428	/*
7429	 * Lockdep complains about possible circular locking dependency between
7430	 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7431	 * used for freeze procection of a fs (struct super_block.s_writers),
7432	 * which we take when starting a transaction, and extent buffers of the
7433	 * chunk tree if we call read_one_dev() while holding a lock on an
7434	 * extent buffer of the chunk tree. Since we are mounting the filesystem
7435	 * and at this point there can't be any concurrent task modifying the
7436	 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7437	 */
7438	ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7439	path->skip_locking = 1;
7440
7441	/*
7442	 * Read all device items, and then all the chunk items. All
7443	 * device items are found before any chunk item (their object id
7444	 * is smaller than the lowest possible object id for a chunk
7445	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7446	 */
7447	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7448	key.offset = 0;
7449	key.type = 0;
7450	btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7451		struct extent_buffer *node = path->nodes[1];
 
 
 
7452
7453		leaf = path->nodes[0];
7454		slot = path->slots[0];
7455
 
 
 
 
 
 
 
 
 
 
 
 
7456		if (node) {
7457			if (last_ra_node != node->start) {
7458				readahead_tree_node_children(node);
7459				last_ra_node = node->start;
7460			}
7461		}
 
7462		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7463			struct btrfs_dev_item *dev_item;
7464			dev_item = btrfs_item_ptr(leaf, slot,
7465						  struct btrfs_dev_item);
7466			ret = read_one_dev(leaf, dev_item);
7467			if (ret)
7468				goto error;
7469			total_dev++;
7470		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7471			struct btrfs_chunk *chunk;
7472
7473			/*
7474			 * We are only called at mount time, so no need to take
7475			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7476			 * we always lock first fs_info->chunk_mutex before
7477			 * acquiring any locks on the chunk tree. This is a
7478			 * requirement for chunk allocation, see the comment on
7479			 * top of btrfs_chunk_alloc() for details.
7480			 */
7481			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
 
7482			ret = read_one_chunk(&found_key, leaf, chunk);
 
7483			if (ret)
7484				goto error;
7485		}
7486	}
7487	/* Catch error found during iteration */
7488	if (iter_ret < 0) {
7489		ret = iter_ret;
7490		goto error;
7491	}
7492
7493	/*
7494	 * After loading chunk tree, we've got all device information,
7495	 * do another round of validation checks.
7496	 */
7497	if (total_dev != fs_info->fs_devices->total_devices) {
7498		btrfs_warn(fs_info,
7499"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7500			  btrfs_super_num_devices(fs_info->super_copy),
7501			  total_dev);
7502		fs_info->fs_devices->total_devices = total_dev;
7503		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7504	}
7505	if (btrfs_super_total_bytes(fs_info->super_copy) <
7506	    fs_info->fs_devices->total_rw_bytes) {
7507		btrfs_err(fs_info,
7508	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7509			  btrfs_super_total_bytes(fs_info->super_copy),
7510			  fs_info->fs_devices->total_rw_bytes);
7511		ret = -EINVAL;
7512		goto error;
7513	}
7514	ret = 0;
7515error:
7516	mutex_unlock(&uuid_mutex);
7517
7518	btrfs_free_path(path);
7519	return ret;
7520}
7521
7522int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7523{
7524	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7525	struct btrfs_device *device;
7526	int ret = 0;
7527
7528	fs_devices->fs_info = fs_info;
7529
7530	mutex_lock(&fs_devices->device_list_mutex);
7531	list_for_each_entry(device, &fs_devices->devices, dev_list)
7532		device->fs_info = fs_info;
7533
7534	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7535		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7536			device->fs_info = fs_info;
7537			ret = btrfs_get_dev_zone_info(device, false);
7538			if (ret)
7539				break;
7540		}
7541
7542		seed_devs->fs_info = fs_info;
7543	}
7544	mutex_unlock(&fs_devices->device_list_mutex);
7545
7546	return ret;
7547}
7548
7549static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7550				 const struct btrfs_dev_stats_item *ptr,
7551				 int index)
7552{
7553	u64 val;
7554
7555	read_extent_buffer(eb, &val,
7556			   offsetof(struct btrfs_dev_stats_item, values) +
7557			    ((unsigned long)ptr) + (index * sizeof(u64)),
7558			   sizeof(val));
7559	return val;
7560}
7561
7562static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7563				      struct btrfs_dev_stats_item *ptr,
7564				      int index, u64 val)
7565{
7566	write_extent_buffer(eb, &val,
7567			    offsetof(struct btrfs_dev_stats_item, values) +
7568			     ((unsigned long)ptr) + (index * sizeof(u64)),
7569			    sizeof(val));
7570}
7571
7572static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7573				       struct btrfs_path *path)
7574{
7575	struct btrfs_dev_stats_item *ptr;
7576	struct extent_buffer *eb;
7577	struct btrfs_key key;
7578	int item_size;
7579	int i, ret, slot;
7580
7581	if (!device->fs_info->dev_root)
7582		return 0;
7583
7584	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7585	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7586	key.offset = device->devid;
7587	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7588	if (ret) {
7589		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7590			btrfs_dev_stat_set(device, i, 0);
7591		device->dev_stats_valid = 1;
7592		btrfs_release_path(path);
7593		return ret < 0 ? ret : 0;
7594	}
7595	slot = path->slots[0];
7596	eb = path->nodes[0];
7597	item_size = btrfs_item_size(eb, slot);
7598
7599	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7600
7601	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7602		if (item_size >= (1 + i) * sizeof(__le64))
7603			btrfs_dev_stat_set(device, i,
7604					   btrfs_dev_stats_value(eb, ptr, i));
7605		else
7606			btrfs_dev_stat_set(device, i, 0);
7607	}
7608
7609	device->dev_stats_valid = 1;
7610	btrfs_dev_stat_print_on_load(device);
7611	btrfs_release_path(path);
7612
7613	return 0;
7614}
7615
7616int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7617{
7618	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
 
 
 
 
 
7619	struct btrfs_device *device;
7620	struct btrfs_path *path = NULL;
7621	int ret = 0;
7622
7623	path = btrfs_alloc_path();
7624	if (!path)
7625		return -ENOMEM;
7626
7627	mutex_lock(&fs_devices->device_list_mutex);
7628	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7629		ret = btrfs_device_init_dev_stats(device, path);
7630		if (ret)
7631			goto out;
7632	}
7633	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7634		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7635			ret = btrfs_device_init_dev_stats(device, path);
7636			if (ret)
7637				goto out;
 
 
 
 
7638		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7639	}
7640out:
7641	mutex_unlock(&fs_devices->device_list_mutex);
7642
7643	btrfs_free_path(path);
7644	return ret;
7645}
7646
7647static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7648				struct btrfs_device *device)
7649{
7650	struct btrfs_fs_info *fs_info = trans->fs_info;
7651	struct btrfs_root *dev_root = fs_info->dev_root;
7652	struct btrfs_path *path;
7653	struct btrfs_key key;
7654	struct extent_buffer *eb;
7655	struct btrfs_dev_stats_item *ptr;
7656	int ret;
7657	int i;
7658
7659	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7660	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7661	key.offset = device->devid;
7662
7663	path = btrfs_alloc_path();
7664	if (!path)
7665		return -ENOMEM;
7666	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7667	if (ret < 0) {
7668		btrfs_warn_in_rcu(fs_info,
7669			"error %d while searching for dev_stats item for device %s",
7670				  ret, btrfs_dev_name(device));
7671		goto out;
7672	}
7673
7674	if (ret == 0 &&
7675	    btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7676		/* need to delete old one and insert a new one */
7677		ret = btrfs_del_item(trans, dev_root, path);
7678		if (ret != 0) {
7679			btrfs_warn_in_rcu(fs_info,
7680				"delete too small dev_stats item for device %s failed %d",
7681					  btrfs_dev_name(device), ret);
7682			goto out;
7683		}
7684		ret = 1;
7685	}
7686
7687	if (ret == 1) {
7688		/* need to insert a new item */
7689		btrfs_release_path(path);
7690		ret = btrfs_insert_empty_item(trans, dev_root, path,
7691					      &key, sizeof(*ptr));
7692		if (ret < 0) {
7693			btrfs_warn_in_rcu(fs_info,
7694				"insert dev_stats item for device %s failed %d",
7695				btrfs_dev_name(device), ret);
7696			goto out;
7697		}
7698	}
7699
7700	eb = path->nodes[0];
7701	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7702	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7703		btrfs_set_dev_stats_value(eb, ptr, i,
7704					  btrfs_dev_stat_read(device, i));
7705	btrfs_mark_buffer_dirty(eb);
7706
7707out:
7708	btrfs_free_path(path);
7709	return ret;
7710}
7711
7712/*
7713 * called from commit_transaction. Writes all changed device stats to disk.
7714 */
7715int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7716{
7717	struct btrfs_fs_info *fs_info = trans->fs_info;
7718	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7719	struct btrfs_device *device;
7720	int stats_cnt;
7721	int ret = 0;
7722
7723	mutex_lock(&fs_devices->device_list_mutex);
7724	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7725		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7726		if (!device->dev_stats_valid || stats_cnt == 0)
7727			continue;
7728
7729
7730		/*
7731		 * There is a LOAD-LOAD control dependency between the value of
7732		 * dev_stats_ccnt and updating the on-disk values which requires
7733		 * reading the in-memory counters. Such control dependencies
7734		 * require explicit read memory barriers.
7735		 *
7736		 * This memory barriers pairs with smp_mb__before_atomic in
7737		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7738		 * barrier implied by atomic_xchg in
7739		 * btrfs_dev_stats_read_and_reset
7740		 */
7741		smp_rmb();
7742
7743		ret = update_dev_stat_item(trans, device);
7744		if (!ret)
7745			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7746	}
7747	mutex_unlock(&fs_devices->device_list_mutex);
7748
7749	return ret;
7750}
7751
7752void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7753{
7754	btrfs_dev_stat_inc(dev, index);
 
 
7755
 
 
7756	if (!dev->dev_stats_valid)
7757		return;
7758	btrfs_err_rl_in_rcu(dev->fs_info,
7759		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7760			   btrfs_dev_name(dev),
7761			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7762			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7763			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7764			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7765			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7766}
7767
7768static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7769{
7770	int i;
7771
7772	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7773		if (btrfs_dev_stat_read(dev, i) != 0)
7774			break;
7775	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7776		return; /* all values == 0, suppress message */
7777
7778	btrfs_info_in_rcu(dev->fs_info,
7779		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7780	       btrfs_dev_name(dev),
7781	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7782	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7783	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7784	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7785	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7786}
7787
7788int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7789			struct btrfs_ioctl_get_dev_stats *stats)
7790{
7791	BTRFS_DEV_LOOKUP_ARGS(args);
7792	struct btrfs_device *dev;
7793	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7794	int i;
7795
7796	mutex_lock(&fs_devices->device_list_mutex);
7797	args.devid = stats->devid;
7798	dev = btrfs_find_device(fs_info->fs_devices, &args);
7799	mutex_unlock(&fs_devices->device_list_mutex);
7800
7801	if (!dev) {
7802		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7803		return -ENODEV;
7804	} else if (!dev->dev_stats_valid) {
7805		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7806		return -ENODEV;
7807	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7808		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7809			if (stats->nr_items > i)
7810				stats->values[i] =
7811					btrfs_dev_stat_read_and_reset(dev, i);
7812			else
7813				btrfs_dev_stat_set(dev, i, 0);
7814		}
7815		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7816			   current->comm, task_pid_nr(current));
7817	} else {
7818		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7819			if (stats->nr_items > i)
7820				stats->values[i] = btrfs_dev_stat_read(dev, i);
7821	}
7822	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7823		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7824	return 0;
7825}
7826
7827/*
7828 * Update the size and bytes used for each device where it changed.  This is
7829 * delayed since we would otherwise get errors while writing out the
7830 * superblocks.
7831 *
7832 * Must be invoked during transaction commit.
7833 */
7834void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7835{
7836	struct btrfs_device *curr, *next;
7837
7838	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7839
7840	if (list_empty(&trans->dev_update_list))
7841		return;
7842
7843	/*
7844	 * We don't need the device_list_mutex here.  This list is owned by the
7845	 * transaction and the transaction must complete before the device is
7846	 * released.
7847	 */
7848	mutex_lock(&trans->fs_info->chunk_mutex);
7849	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7850				 post_commit_list) {
7851		list_del_init(&curr->post_commit_list);
7852		curr->commit_total_bytes = curr->disk_total_bytes;
7853		curr->commit_bytes_used = curr->bytes_used;
7854	}
7855	mutex_unlock(&trans->fs_info->chunk_mutex);
7856}
7857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7858/*
7859 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7860 */
7861int btrfs_bg_type_to_factor(u64 flags)
7862{
7863	const int index = btrfs_bg_flags_to_raid_index(flags);
7864
7865	return btrfs_raid_array[index].ncopies;
7866}
7867
7868
7869
7870static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7871				 u64 chunk_offset, u64 devid,
7872				 u64 physical_offset, u64 physical_len)
7873{
7874	struct btrfs_dev_lookup_args args = { .devid = devid };
7875	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7876	struct extent_map *em;
7877	struct map_lookup *map;
7878	struct btrfs_device *dev;
7879	u64 stripe_len;
7880	bool found = false;
7881	int ret = 0;
7882	int i;
7883
7884	read_lock(&em_tree->lock);
7885	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7886	read_unlock(&em_tree->lock);
7887
7888	if (!em) {
7889		btrfs_err(fs_info,
7890"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7891			  physical_offset, devid);
7892		ret = -EUCLEAN;
7893		goto out;
7894	}
7895
7896	map = em->map_lookup;
7897	stripe_len = btrfs_calc_stripe_length(em);
7898	if (physical_len != stripe_len) {
7899		btrfs_err(fs_info,
7900"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7901			  physical_offset, devid, em->start, physical_len,
7902			  stripe_len);
7903		ret = -EUCLEAN;
7904		goto out;
7905	}
7906
7907	/*
7908	 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7909	 * space. Although kernel can handle it without problem, better to warn
7910	 * the users.
7911	 */
7912	if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
7913		btrfs_warn(fs_info,
7914		"devid %llu physical %llu len %llu inside the reserved space",
7915			   devid, physical_offset, physical_len);
7916
7917	for (i = 0; i < map->num_stripes; i++) {
7918		if (map->stripes[i].dev->devid == devid &&
7919		    map->stripes[i].physical == physical_offset) {
7920			found = true;
7921			if (map->verified_stripes >= map->num_stripes) {
7922				btrfs_err(fs_info,
7923				"too many dev extents for chunk %llu found",
7924					  em->start);
7925				ret = -EUCLEAN;
7926				goto out;
7927			}
7928			map->verified_stripes++;
7929			break;
7930		}
7931	}
7932	if (!found) {
7933		btrfs_err(fs_info,
7934	"dev extent physical offset %llu devid %llu has no corresponding chunk",
7935			physical_offset, devid);
7936		ret = -EUCLEAN;
7937	}
7938
7939	/* Make sure no dev extent is beyond device boundary */
7940	dev = btrfs_find_device(fs_info->fs_devices, &args);
7941	if (!dev) {
7942		btrfs_err(fs_info, "failed to find devid %llu", devid);
7943		ret = -EUCLEAN;
7944		goto out;
7945	}
7946
 
 
 
 
 
 
 
 
 
 
 
 
7947	if (physical_offset + physical_len > dev->disk_total_bytes) {
7948		btrfs_err(fs_info,
7949"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7950			  devid, physical_offset, physical_len,
7951			  dev->disk_total_bytes);
7952		ret = -EUCLEAN;
7953		goto out;
7954	}
7955
7956	if (dev->zone_info) {
7957		u64 zone_size = dev->zone_info->zone_size;
7958
7959		if (!IS_ALIGNED(physical_offset, zone_size) ||
7960		    !IS_ALIGNED(physical_len, zone_size)) {
7961			btrfs_err(fs_info,
7962"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7963				  devid, physical_offset, physical_len);
7964			ret = -EUCLEAN;
7965			goto out;
7966		}
7967	}
7968
7969out:
7970	free_extent_map(em);
7971	return ret;
7972}
7973
7974static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7975{
7976	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7977	struct extent_map *em;
7978	struct rb_node *node;
7979	int ret = 0;
7980
7981	read_lock(&em_tree->lock);
7982	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7983		em = rb_entry(node, struct extent_map, rb_node);
7984		if (em->map_lookup->num_stripes !=
7985		    em->map_lookup->verified_stripes) {
7986			btrfs_err(fs_info,
7987			"chunk %llu has missing dev extent, have %d expect %d",
7988				  em->start, em->map_lookup->verified_stripes,
7989				  em->map_lookup->num_stripes);
7990			ret = -EUCLEAN;
7991			goto out;
7992		}
7993	}
7994out:
7995	read_unlock(&em_tree->lock);
7996	return ret;
7997}
7998
7999/*
8000 * Ensure that all dev extents are mapped to correct chunk, otherwise
8001 * later chunk allocation/free would cause unexpected behavior.
8002 *
8003 * NOTE: This will iterate through the whole device tree, which should be of
8004 * the same size level as the chunk tree.  This slightly increases mount time.
8005 */
8006int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8007{
8008	struct btrfs_path *path;
8009	struct btrfs_root *root = fs_info->dev_root;
8010	struct btrfs_key key;
8011	u64 prev_devid = 0;
8012	u64 prev_dev_ext_end = 0;
8013	int ret = 0;
8014
8015	/*
8016	 * We don't have a dev_root because we mounted with ignorebadroots and
8017	 * failed to load the root, so we want to skip the verification in this
8018	 * case for sure.
8019	 *
8020	 * However if the dev root is fine, but the tree itself is corrupted
8021	 * we'd still fail to mount.  This verification is only to make sure
8022	 * writes can happen safely, so instead just bypass this check
8023	 * completely in the case of IGNOREBADROOTS.
8024	 */
8025	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8026		return 0;
8027
8028	key.objectid = 1;
8029	key.type = BTRFS_DEV_EXTENT_KEY;
8030	key.offset = 0;
8031
8032	path = btrfs_alloc_path();
8033	if (!path)
8034		return -ENOMEM;
8035
8036	path->reada = READA_FORWARD;
8037	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8038	if (ret < 0)
8039		goto out;
8040
8041	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8042		ret = btrfs_next_leaf(root, path);
8043		if (ret < 0)
8044			goto out;
8045		/* No dev extents at all? Not good */
8046		if (ret > 0) {
8047			ret = -EUCLEAN;
8048			goto out;
8049		}
8050	}
8051	while (1) {
8052		struct extent_buffer *leaf = path->nodes[0];
8053		struct btrfs_dev_extent *dext;
8054		int slot = path->slots[0];
8055		u64 chunk_offset;
8056		u64 physical_offset;
8057		u64 physical_len;
8058		u64 devid;
8059
8060		btrfs_item_key_to_cpu(leaf, &key, slot);
8061		if (key.type != BTRFS_DEV_EXTENT_KEY)
8062			break;
8063		devid = key.objectid;
8064		physical_offset = key.offset;
8065
8066		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8067		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8068		physical_len = btrfs_dev_extent_length(leaf, dext);
8069
8070		/* Check if this dev extent overlaps with the previous one */
8071		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8072			btrfs_err(fs_info,
8073"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8074				  devid, physical_offset, prev_dev_ext_end);
8075			ret = -EUCLEAN;
8076			goto out;
8077		}
8078
8079		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8080					    physical_offset, physical_len);
8081		if (ret < 0)
8082			goto out;
8083		prev_devid = devid;
8084		prev_dev_ext_end = physical_offset + physical_len;
8085
8086		ret = btrfs_next_item(root, path);
8087		if (ret < 0)
8088			goto out;
8089		if (ret > 0) {
8090			ret = 0;
8091			break;
8092		}
8093	}
8094
8095	/* Ensure all chunks have corresponding dev extents */
8096	ret = verify_chunk_dev_extent_mapping(fs_info);
8097out:
8098	btrfs_free_path(path);
8099	return ret;
8100}
8101
8102/*
8103 * Check whether the given block group or device is pinned by any inode being
8104 * used as a swapfile.
8105 */
8106bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8107{
8108	struct btrfs_swapfile_pin *sp;
8109	struct rb_node *node;
8110
8111	spin_lock(&fs_info->swapfile_pins_lock);
8112	node = fs_info->swapfile_pins.rb_node;
8113	while (node) {
8114		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8115		if (ptr < sp->ptr)
8116			node = node->rb_left;
8117		else if (ptr > sp->ptr)
8118			node = node->rb_right;
8119		else
8120			break;
8121	}
8122	spin_unlock(&fs_info->swapfile_pins_lock);
8123	return node != NULL;
8124}
8125
8126static int relocating_repair_kthread(void *data)
8127{
8128	struct btrfs_block_group *cache = data;
8129	struct btrfs_fs_info *fs_info = cache->fs_info;
8130	u64 target;
8131	int ret = 0;
8132
8133	target = cache->start;
8134	btrfs_put_block_group(cache);
8135
8136	sb_start_write(fs_info->sb);
8137	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8138		btrfs_info(fs_info,
8139			   "zoned: skip relocating block group %llu to repair: EBUSY",
8140			   target);
8141		sb_end_write(fs_info->sb);
8142		return -EBUSY;
8143	}
8144
8145	mutex_lock(&fs_info->reclaim_bgs_lock);
8146
8147	/* Ensure block group still exists */
8148	cache = btrfs_lookup_block_group(fs_info, target);
8149	if (!cache)
8150		goto out;
8151
8152	if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
8153		goto out;
8154
8155	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8156	if (ret < 0)
8157		goto out;
8158
8159	btrfs_info(fs_info,
8160		   "zoned: relocating block group %llu to repair IO failure",
8161		   target);
8162	ret = btrfs_relocate_chunk(fs_info, target);
8163
8164out:
8165	if (cache)
8166		btrfs_put_block_group(cache);
8167	mutex_unlock(&fs_info->reclaim_bgs_lock);
8168	btrfs_exclop_finish(fs_info);
8169	sb_end_write(fs_info->sb);
8170
8171	return ret;
8172}
8173
8174bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8175{
8176	struct btrfs_block_group *cache;
8177
8178	if (!btrfs_is_zoned(fs_info))
8179		return false;
8180
8181	/* Do not attempt to repair in degraded state */
8182	if (btrfs_test_opt(fs_info, DEGRADED))
8183		return true;
8184
8185	cache = btrfs_lookup_block_group(fs_info, logical);
8186	if (!cache)
8187		return true;
8188
8189	if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
8190		btrfs_put_block_group(cache);
8191		return true;
8192	}
8193
8194	kthread_run(relocating_repair_kthread, cache,
8195		    "btrfs-relocating-repair");
8196
8197	return true;
8198}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/sched/mm.h>
   8#include <linux/bio.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/ratelimit.h>
  12#include <linux/kthread.h>
  13#include <linux/raid/pq.h>
  14#include <linux/semaphore.h>
  15#include <linux/uuid.h>
  16#include <linux/list_sort.h>
 
  17#include "misc.h"
  18#include "ctree.h"
  19#include "extent_map.h"
  20#include "disk-io.h"
  21#include "transaction.h"
  22#include "print-tree.h"
  23#include "volumes.h"
  24#include "raid56.h"
  25#include "async-thread.h"
  26#include "check-integrity.h"
  27#include "rcu-string.h"
  28#include "dev-replace.h"
  29#include "sysfs.h"
  30#include "tree-checker.h"
  31#include "space-info.h"
  32#include "block-group.h"
  33#include "discard.h"
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
  36	[BTRFS_RAID_RAID10] = {
  37		.sub_stripes	= 2,
  38		.dev_stripes	= 1,
  39		.devs_max	= 0,	/* 0 == as many as possible */
  40		.devs_min	= 4,
  41		.tolerated_failures = 1,
  42		.devs_increment	= 2,
  43		.ncopies	= 2,
  44		.nparity        = 0,
  45		.raid_name	= "raid10",
  46		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
  47		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
  48	},
  49	[BTRFS_RAID_RAID1] = {
  50		.sub_stripes	= 1,
  51		.dev_stripes	= 1,
  52		.devs_max	= 2,
  53		.devs_min	= 2,
  54		.tolerated_failures = 1,
  55		.devs_increment	= 2,
  56		.ncopies	= 2,
  57		.nparity        = 0,
  58		.raid_name	= "raid1",
  59		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
  60		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
  61	},
  62	[BTRFS_RAID_RAID1C3] = {
  63		.sub_stripes	= 1,
  64		.dev_stripes	= 1,
  65		.devs_max	= 3,
  66		.devs_min	= 3,
  67		.tolerated_failures = 2,
  68		.devs_increment	= 3,
  69		.ncopies	= 3,
  70		.nparity        = 0,
  71		.raid_name	= "raid1c3",
  72		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
  73		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
  74	},
  75	[BTRFS_RAID_RAID1C4] = {
  76		.sub_stripes	= 1,
  77		.dev_stripes	= 1,
  78		.devs_max	= 4,
  79		.devs_min	= 4,
  80		.tolerated_failures = 3,
  81		.devs_increment	= 4,
  82		.ncopies	= 4,
  83		.nparity        = 0,
  84		.raid_name	= "raid1c4",
  85		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
  86		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
  87	},
  88	[BTRFS_RAID_DUP] = {
  89		.sub_stripes	= 1,
  90		.dev_stripes	= 2,
  91		.devs_max	= 1,
  92		.devs_min	= 1,
  93		.tolerated_failures = 0,
  94		.devs_increment	= 1,
  95		.ncopies	= 2,
  96		.nparity        = 0,
  97		.raid_name	= "dup",
  98		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
  99		.mindev_error	= 0,
 100	},
 101	[BTRFS_RAID_RAID0] = {
 102		.sub_stripes	= 1,
 103		.dev_stripes	= 1,
 104		.devs_max	= 0,
 105		.devs_min	= 2,
 106		.tolerated_failures = 0,
 107		.devs_increment	= 1,
 108		.ncopies	= 1,
 109		.nparity        = 0,
 110		.raid_name	= "raid0",
 111		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
 112		.mindev_error	= 0,
 113	},
 114	[BTRFS_RAID_SINGLE] = {
 115		.sub_stripes	= 1,
 116		.dev_stripes	= 1,
 117		.devs_max	= 1,
 118		.devs_min	= 1,
 119		.tolerated_failures = 0,
 120		.devs_increment	= 1,
 121		.ncopies	= 1,
 122		.nparity        = 0,
 123		.raid_name	= "single",
 124		.bg_flag	= 0,
 125		.mindev_error	= 0,
 126	},
 127	[BTRFS_RAID_RAID5] = {
 128		.sub_stripes	= 1,
 129		.dev_stripes	= 1,
 130		.devs_max	= 0,
 131		.devs_min	= 2,
 132		.tolerated_failures = 1,
 133		.devs_increment	= 1,
 134		.ncopies	= 1,
 135		.nparity        = 1,
 136		.raid_name	= "raid5",
 137		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
 138		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
 139	},
 140	[BTRFS_RAID_RAID6] = {
 141		.sub_stripes	= 1,
 142		.dev_stripes	= 1,
 143		.devs_max	= 0,
 144		.devs_min	= 3,
 145		.tolerated_failures = 2,
 146		.devs_increment	= 1,
 147		.ncopies	= 1,
 148		.nparity        = 2,
 149		.raid_name	= "raid6",
 150		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
 151		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
 152	},
 153};
 154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155const char *btrfs_bg_type_to_raid_name(u64 flags)
 156{
 157	const int index = btrfs_bg_flags_to_raid_index(flags);
 158
 159	if (index >= BTRFS_NR_RAID_TYPES)
 160		return NULL;
 161
 162	return btrfs_raid_array[index].raid_name;
 163}
 164
 
 
 
 
 
 
 
 165/*
 166 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 167 * bytes including terminating null byte.
 168 */
 169void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
 170{
 171	int i;
 172	int ret;
 173	char *bp = buf;
 174	u64 flags = bg_flags;
 175	u32 size_bp = size_buf;
 176
 177	if (!flags) {
 178		strcpy(bp, "NONE");
 179		return;
 180	}
 181
 182#define DESCRIBE_FLAG(flag, desc)						\
 183	do {								\
 184		if (flags & (flag)) {					\
 185			ret = snprintf(bp, size_bp, "%s|", (desc));	\
 186			if (ret < 0 || ret >= size_bp)			\
 187				goto out_overflow;			\
 188			size_bp -= ret;					\
 189			bp += ret;					\
 190			flags &= ~(flag);				\
 191		}							\
 192	} while (0)
 193
 194	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
 195	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
 196	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
 197
 198	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
 199	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
 200		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
 201			      btrfs_raid_array[i].raid_name);
 202#undef DESCRIBE_FLAG
 203
 204	if (flags) {
 205		ret = snprintf(bp, size_bp, "0x%llx|", flags);
 206		size_bp -= ret;
 207	}
 208
 209	if (size_bp < size_buf)
 210		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
 211
 212	/*
 213	 * The text is trimmed, it's up to the caller to provide sufficiently
 214	 * large buffer
 215	 */
 216out_overflow:;
 217}
 218
 219static int init_first_rw_device(struct btrfs_trans_handle *trans);
 220static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
 221static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
 222static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 223static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
 224			     enum btrfs_map_op op,
 225			     u64 logical, u64 *length,
 226			     struct btrfs_bio **bbio_ret,
 227			     int mirror_num, int need_raid_map);
 228
 229/*
 230 * Device locking
 231 * ==============
 232 *
 233 * There are several mutexes that protect manipulation of devices and low-level
 234 * structures like chunks but not block groups, extents or files
 235 *
 236 * uuid_mutex (global lock)
 237 * ------------------------
 238 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 239 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 240 * device) or requested by the device= mount option
 241 *
 242 * the mutex can be very coarse and can cover long-running operations
 243 *
 244 * protects: updates to fs_devices counters like missing devices, rw devices,
 245 * seeding, structure cloning, opening/closing devices at mount/umount time
 246 *
 247 * global::fs_devs - add, remove, updates to the global list
 248 *
 249 * does not protect: manipulation of the fs_devices::devices list in general
 250 * but in mount context it could be used to exclude list modifications by eg.
 251 * scan ioctl
 252 *
 253 * btrfs_device::name - renames (write side), read is RCU
 254 *
 255 * fs_devices::device_list_mutex (per-fs, with RCU)
 256 * ------------------------------------------------
 257 * protects updates to fs_devices::devices, ie. adding and deleting
 258 *
 259 * simple list traversal with read-only actions can be done with RCU protection
 260 *
 261 * may be used to exclude some operations from running concurrently without any
 262 * modifications to the list (see write_all_supers)
 263 *
 264 * Is not required at mount and close times, because our device list is
 265 * protected by the uuid_mutex at that point.
 266 *
 267 * balance_mutex
 268 * -------------
 269 * protects balance structures (status, state) and context accessed from
 270 * several places (internally, ioctl)
 271 *
 272 * chunk_mutex
 273 * -----------
 274 * protects chunks, adding or removing during allocation, trim or when a new
 275 * device is added/removed. Additionally it also protects post_commit_list of
 276 * individual devices, since they can be added to the transaction's
 277 * post_commit_list only with chunk_mutex held.
 278 *
 279 * cleaner_mutex
 280 * -------------
 281 * a big lock that is held by the cleaner thread and prevents running subvolume
 282 * cleaning together with relocation or delayed iputs
 283 *
 284 *
 285 * Lock nesting
 286 * ============
 287 *
 288 * uuid_mutex
 289 *   device_list_mutex
 290 *     chunk_mutex
 291 *   balance_mutex
 292 *
 293 *
 294 * Exclusive operations, BTRFS_FS_EXCL_OP
 295 * ======================================
 296 *
 297 * Maintains the exclusivity of the following operations that apply to the
 298 * whole filesystem and cannot run in parallel.
 299 *
 300 * - Balance (*)
 301 * - Device add
 302 * - Device remove
 303 * - Device replace (*)
 304 * - Resize
 305 *
 306 * The device operations (as above) can be in one of the following states:
 307 *
 308 * - Running state
 309 * - Paused state
 310 * - Completed state
 311 *
 312 * Only device operations marked with (*) can go into the Paused state for the
 313 * following reasons:
 314 *
 315 * - ioctl (only Balance can be Paused through ioctl)
 316 * - filesystem remounted as read-only
 317 * - filesystem unmounted and mounted as read-only
 318 * - system power-cycle and filesystem mounted as read-only
 319 * - filesystem or device errors leading to forced read-only
 320 *
 321 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
 322 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
 323 * A device operation in Paused or Running state can be canceled or resumed
 324 * either by ioctl (Balance only) or when remounted as read-write.
 325 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
 326 * completed.
 327 */
 328
 329DEFINE_MUTEX(uuid_mutex);
 330static LIST_HEAD(fs_uuids);
 331struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
 332{
 333	return &fs_uuids;
 334}
 335
 336/*
 337 * alloc_fs_devices - allocate struct btrfs_fs_devices
 338 * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
 339 * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
 340 *
 341 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 342 * The returned struct is not linked onto any lists and can be destroyed with
 343 * kfree() right away.
 344 */
 345static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
 346						 const u8 *metadata_fsid)
 347{
 348	struct btrfs_fs_devices *fs_devs;
 349
 350	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
 351	if (!fs_devs)
 352		return ERR_PTR(-ENOMEM);
 353
 354	mutex_init(&fs_devs->device_list_mutex);
 355
 356	INIT_LIST_HEAD(&fs_devs->devices);
 357	INIT_LIST_HEAD(&fs_devs->alloc_list);
 358	INIT_LIST_HEAD(&fs_devs->fs_list);
 
 359	if (fsid)
 360		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 361
 362	if (metadata_fsid)
 363		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
 364	else if (fsid)
 365		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
 366
 367	return fs_devs;
 368}
 369
 370void btrfs_free_device(struct btrfs_device *device)
 371{
 372	WARN_ON(!list_empty(&device->post_commit_list));
 373	rcu_string_free(device->name);
 374	extent_io_tree_release(&device->alloc_state);
 375	bio_put(device->flush_bio);
 376	kfree(device);
 377}
 378
 379static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
 380{
 381	struct btrfs_device *device;
 
 382	WARN_ON(fs_devices->opened);
 383	while (!list_empty(&fs_devices->devices)) {
 384		device = list_entry(fs_devices->devices.next,
 385				    struct btrfs_device, dev_list);
 386		list_del(&device->dev_list);
 387		btrfs_free_device(device);
 388	}
 389	kfree(fs_devices);
 390}
 391
 392void __exit btrfs_cleanup_fs_uuids(void)
 393{
 394	struct btrfs_fs_devices *fs_devices;
 395
 396	while (!list_empty(&fs_uuids)) {
 397		fs_devices = list_entry(fs_uuids.next,
 398					struct btrfs_fs_devices, fs_list);
 399		list_del(&fs_devices->fs_list);
 400		free_fs_devices(fs_devices);
 401	}
 402}
 403
 404/*
 405 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
 406 * Returned struct is not linked onto any lists and must be destroyed using
 407 * btrfs_free_device.
 408 */
 409static struct btrfs_device *__alloc_device(void)
 410{
 411	struct btrfs_device *dev;
 412
 413	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 414	if (!dev)
 415		return ERR_PTR(-ENOMEM);
 416
 417	/*
 418	 * Preallocate a bio that's always going to be used for flushing device
 419	 * barriers and matches the device lifespan
 420	 */
 421	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
 422	if (!dev->flush_bio) {
 423		kfree(dev);
 424		return ERR_PTR(-ENOMEM);
 425	}
 426
 427	INIT_LIST_HEAD(&dev->dev_list);
 428	INIT_LIST_HEAD(&dev->dev_alloc_list);
 429	INIT_LIST_HEAD(&dev->post_commit_list);
 430
 431	atomic_set(&dev->reada_in_flight, 0);
 432	atomic_set(&dev->dev_stats_ccnt, 0);
 433	btrfs_device_data_ordered_init(dev);
 434	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
 435	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
 436	extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
 437
 438	return dev;
 439}
 440
 441static noinline struct btrfs_fs_devices *find_fsid(
 442		const u8 *fsid, const u8 *metadata_fsid)
 443{
 444	struct btrfs_fs_devices *fs_devices;
 445
 446	ASSERT(fsid);
 447
 448	/* Handle non-split brain cases */
 449	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 450		if (metadata_fsid) {
 451			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
 452			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
 453				      BTRFS_FSID_SIZE) == 0)
 454				return fs_devices;
 455		} else {
 456			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 457				return fs_devices;
 458		}
 459	}
 460	return NULL;
 461}
 462
 463static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
 464				struct btrfs_super_block *disk_super)
 465{
 466
 467	struct btrfs_fs_devices *fs_devices;
 468
 469	/*
 470	 * Handle scanned device having completed its fsid change but
 471	 * belonging to a fs_devices that was created by first scanning
 472	 * a device which didn't have its fsid/metadata_uuid changed
 473	 * at all and the CHANGING_FSID_V2 flag set.
 474	 */
 475	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 476		if (fs_devices->fsid_change &&
 477		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
 478			   BTRFS_FSID_SIZE) == 0 &&
 479		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 480			   BTRFS_FSID_SIZE) == 0) {
 481			return fs_devices;
 482		}
 483	}
 484	/*
 485	 * Handle scanned device having completed its fsid change but
 486	 * belonging to a fs_devices that was created by a device that
 487	 * has an outdated pair of fsid/metadata_uuid and
 488	 * CHANGING_FSID_V2 flag set.
 489	 */
 490	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 491		if (fs_devices->fsid_change &&
 492		    memcmp(fs_devices->metadata_uuid,
 493			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
 494		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
 495			   BTRFS_FSID_SIZE) == 0) {
 496			return fs_devices;
 497		}
 498	}
 499
 500	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
 501}
 502
 503
 504static int
 505btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 506		      int flush, struct block_device **bdev,
 507		      struct btrfs_super_block **disk_super)
 508{
 509	int ret;
 510
 511	*bdev = blkdev_get_by_path(device_path, flags, holder);
 512
 513	if (IS_ERR(*bdev)) {
 514		ret = PTR_ERR(*bdev);
 515		goto error;
 516	}
 517
 518	if (flush)
 519		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
 520	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
 521	if (ret) {
 522		blkdev_put(*bdev, flags);
 523		goto error;
 524	}
 525	invalidate_bdev(*bdev);
 526	*disk_super = btrfs_read_dev_super(*bdev);
 527	if (IS_ERR(*disk_super)) {
 528		ret = PTR_ERR(*disk_super);
 529		blkdev_put(*bdev, flags);
 530		goto error;
 531	}
 532
 533	return 0;
 534
 535error:
 536	*bdev = NULL;
 537	return ret;
 538}
 539
 540static bool device_path_matched(const char *path, struct btrfs_device *device)
 541{
 542	int found;
 543
 544	rcu_read_lock();
 545	found = strcmp(rcu_str_deref(device->name), path);
 546	rcu_read_unlock();
 547
 548	return found == 0;
 549}
 550
 551/*
 552 *  Search and remove all stale (devices which are not mounted) devices.
 553 *  When both inputs are NULL, it will search and release all stale devices.
 554 *  path:	Optional. When provided will it release all unmounted devices
 555 *		matching this path only.
 556 *  skip_dev:	Optional. Will skip this device when searching for the stale
 557 *		devices.
 558 *  Return:	0 for success or if @path is NULL.
 559 * 		-EBUSY if @path is a mounted device.
 560 * 		-ENOENT if @path does not match any device in the list.
 
 
 561 */
 562static int btrfs_free_stale_devices(const char *path,
 563				     struct btrfs_device *skip_device)
 564{
 565	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
 566	struct btrfs_device *device, *tmp_device;
 567	int ret = 0;
 568
 569	if (path)
 
 
 570		ret = -ENOENT;
 571
 572	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
 573
 574		mutex_lock(&fs_devices->device_list_mutex);
 575		list_for_each_entry_safe(device, tmp_device,
 576					 &fs_devices->devices, dev_list) {
 577			if (skip_device && skip_device == device)
 578				continue;
 579			if (path && !device->name)
 580				continue;
 581			if (path && !device_path_matched(path, device))
 582				continue;
 583			if (fs_devices->opened) {
 584				/* for an already deleted device return 0 */
 585				if (path && ret != 0)
 586					ret = -EBUSY;
 587				break;
 588			}
 589
 590			/* delete the stale device */
 591			fs_devices->num_devices--;
 592			list_del(&device->dev_list);
 593			btrfs_free_device(device);
 594
 595			ret = 0;
 596			if (fs_devices->num_devices == 0)
 597				break;
 598		}
 599		mutex_unlock(&fs_devices->device_list_mutex);
 600
 601		if (fs_devices->num_devices == 0) {
 602			btrfs_sysfs_remove_fsid(fs_devices);
 603			list_del(&fs_devices->fs_list);
 604			free_fs_devices(fs_devices);
 605		}
 606	}
 607
 608	return ret;
 609}
 610
 611/*
 612 * This is only used on mount, and we are protected from competing things
 613 * messing with our fs_devices by the uuid_mutex, thus we do not need the
 614 * fs_devices->device_list_mutex here.
 615 */
 616static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
 617			struct btrfs_device *device, fmode_t flags,
 618			void *holder)
 619{
 620	struct request_queue *q;
 621	struct block_device *bdev;
 622	struct btrfs_super_block *disk_super;
 623	u64 devid;
 624	int ret;
 625
 626	if (device->bdev)
 627		return -EINVAL;
 628	if (!device->name)
 629		return -EINVAL;
 630
 631	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 632				    &bdev, &disk_super);
 633	if (ret)
 634		return ret;
 635
 636	devid = btrfs_stack_device_id(&disk_super->dev_item);
 637	if (devid != device->devid)
 638		goto error_free_page;
 639
 640	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
 641		goto error_free_page;
 642
 643	device->generation = btrfs_super_generation(disk_super);
 644
 645	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 646		if (btrfs_super_incompat_flags(disk_super) &
 647		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
 648			pr_err(
 649		"BTRFS: Invalid seeding and uuid-changed device detected\n");
 650			goto error_free_page;
 651		}
 652
 653		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 654		fs_devices->seeding = true;
 655	} else {
 656		if (bdev_read_only(bdev))
 657			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 658		else
 659			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 660	}
 661
 662	q = bdev_get_queue(bdev);
 663	if (!blk_queue_nonrot(q))
 664		fs_devices->rotating = true;
 665
 
 
 
 666	device->bdev = bdev;
 667	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
 668	device->mode = flags;
 669
 670	fs_devices->open_devices++;
 671	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
 672	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 673		fs_devices->rw_devices++;
 674		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
 675	}
 676	btrfs_release_disk_super(disk_super);
 677
 678	return 0;
 679
 680error_free_page:
 681	btrfs_release_disk_super(disk_super);
 682	blkdev_put(bdev, flags);
 683
 684	return -EINVAL;
 685}
 686
 687/*
 688 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
 689 * being created with a disk that has already completed its fsid change. Such
 690 * disk can belong to an fs which has its FSID changed or to one which doesn't.
 691 * Handle both cases here.
 692 */
 693static struct btrfs_fs_devices *find_fsid_inprogress(
 694					struct btrfs_super_block *disk_super)
 695{
 696	struct btrfs_fs_devices *fs_devices;
 697
 698	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 699		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 700			   BTRFS_FSID_SIZE) != 0 &&
 701		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 702			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
 703			return fs_devices;
 704		}
 705	}
 706
 707	return find_fsid(disk_super->fsid, NULL);
 708}
 709
 710
 711static struct btrfs_fs_devices *find_fsid_changed(
 712					struct btrfs_super_block *disk_super)
 713{
 714	struct btrfs_fs_devices *fs_devices;
 715
 716	/*
 717	 * Handles the case where scanned device is part of an fs that had
 718	 * multiple successful changes of FSID but curently device didn't
 719	 * observe it. Meaning our fsid will be different than theirs. We need
 720	 * to handle two subcases :
 721	 *  1 - The fs still continues to have different METADATA/FSID uuids.
 722	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
 723	 *  are equal).
 724	 */
 725	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 726		/* Changed UUIDs */
 727		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 728			   BTRFS_FSID_SIZE) != 0 &&
 729		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
 730			   BTRFS_FSID_SIZE) == 0 &&
 731		    memcmp(fs_devices->fsid, disk_super->fsid,
 732			   BTRFS_FSID_SIZE) != 0)
 733			return fs_devices;
 734
 735		/* Unchanged UUIDs */
 736		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 737			   BTRFS_FSID_SIZE) == 0 &&
 738		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
 739			   BTRFS_FSID_SIZE) == 0)
 740			return fs_devices;
 741	}
 742
 743	return NULL;
 744}
 745
 746static struct btrfs_fs_devices *find_fsid_reverted_metadata(
 747				struct btrfs_super_block *disk_super)
 748{
 749	struct btrfs_fs_devices *fs_devices;
 750
 751	/*
 752	 * Handle the case where the scanned device is part of an fs whose last
 753	 * metadata UUID change reverted it to the original FSID. At the same
 754	 * time * fs_devices was first created by another constitutent device
 755	 * which didn't fully observe the operation. This results in an
 756	 * btrfs_fs_devices created with metadata/fsid different AND
 757	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
 758	 * fs_devices equal to the FSID of the disk.
 759	 */
 760	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 761		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 762			   BTRFS_FSID_SIZE) != 0 &&
 763		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 764			   BTRFS_FSID_SIZE) == 0 &&
 765		    fs_devices->fsid_change)
 766			return fs_devices;
 767	}
 768
 769	return NULL;
 770}
 771/*
 772 * Add new device to list of registered devices
 773 *
 774 * Returns:
 775 * device pointer which was just added or updated when successful
 776 * error pointer when failed
 777 */
 778static noinline struct btrfs_device *device_list_add(const char *path,
 779			   struct btrfs_super_block *disk_super,
 780			   bool *new_device_added)
 781{
 782	struct btrfs_device *device;
 783	struct btrfs_fs_devices *fs_devices = NULL;
 784	struct rcu_string *name;
 785	u64 found_transid = btrfs_super_generation(disk_super);
 786	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
 
 
 787	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
 788		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
 789	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
 790					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
 791
 
 
 
 
 
 
 
 792	if (fsid_change_in_progress) {
 793		if (!has_metadata_uuid)
 794			fs_devices = find_fsid_inprogress(disk_super);
 795		else
 796			fs_devices = find_fsid_changed(disk_super);
 797	} else if (has_metadata_uuid) {
 798		fs_devices = find_fsid_with_metadata_uuid(disk_super);
 799	} else {
 800		fs_devices = find_fsid_reverted_metadata(disk_super);
 801		if (!fs_devices)
 802			fs_devices = find_fsid(disk_super->fsid, NULL);
 803	}
 804
 805
 806	if (!fs_devices) {
 807		if (has_metadata_uuid)
 808			fs_devices = alloc_fs_devices(disk_super->fsid,
 809						      disk_super->metadata_uuid);
 810		else
 811			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
 812
 813		if (IS_ERR(fs_devices))
 814			return ERR_CAST(fs_devices);
 815
 816		fs_devices->fsid_change = fsid_change_in_progress;
 817
 818		mutex_lock(&fs_devices->device_list_mutex);
 819		list_add(&fs_devices->fs_list, &fs_uuids);
 820
 821		device = NULL;
 822	} else {
 
 
 
 
 
 823		mutex_lock(&fs_devices->device_list_mutex);
 824		device = btrfs_find_device(fs_devices, devid,
 825				disk_super->dev_item.uuid, NULL, false);
 826
 827		/*
 828		 * If this disk has been pulled into an fs devices created by
 829		 * a device which had the CHANGING_FSID_V2 flag then replace the
 830		 * metadata_uuid/fsid values of the fs_devices.
 831		 */
 832		if (fs_devices->fsid_change &&
 833		    found_transid > fs_devices->latest_generation) {
 834			memcpy(fs_devices->fsid, disk_super->fsid,
 835					BTRFS_FSID_SIZE);
 836
 837			if (has_metadata_uuid)
 838				memcpy(fs_devices->metadata_uuid,
 839				       disk_super->metadata_uuid,
 840				       BTRFS_FSID_SIZE);
 841			else
 842				memcpy(fs_devices->metadata_uuid,
 843				       disk_super->fsid, BTRFS_FSID_SIZE);
 844
 845			fs_devices->fsid_change = false;
 846		}
 847	}
 848
 849	if (!device) {
 
 
 850		if (fs_devices->opened) {
 
 
 
 851			mutex_unlock(&fs_devices->device_list_mutex);
 852			return ERR_PTR(-EBUSY);
 853		}
 854
 
 855		device = btrfs_alloc_device(NULL, &devid,
 856					    disk_super->dev_item.uuid);
 
 857		if (IS_ERR(device)) {
 858			mutex_unlock(&fs_devices->device_list_mutex);
 859			/* we can safely leave the fs_devices entry around */
 860			return device;
 861		}
 862
 863		name = rcu_string_strdup(path, GFP_NOFS);
 864		if (!name) {
 865			btrfs_free_device(device);
 866			mutex_unlock(&fs_devices->device_list_mutex);
 867			return ERR_PTR(-ENOMEM);
 868		}
 869		rcu_assign_pointer(device->name, name);
 870
 871		list_add_rcu(&device->dev_list, &fs_devices->devices);
 872		fs_devices->num_devices++;
 873
 874		device->fs_devices = fs_devices;
 875		*new_device_added = true;
 876
 877		if (disk_super->label[0])
 878			pr_info(
 879	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
 880				disk_super->label, devid, found_transid, path,
 881				current->comm, task_pid_nr(current));
 882		else
 883			pr_info(
 884	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
 885				disk_super->fsid, devid, found_transid, path,
 886				current->comm, task_pid_nr(current));
 887
 888	} else if (!device->name || strcmp(device->name->str, path)) {
 889		/*
 890		 * When FS is already mounted.
 891		 * 1. If you are here and if the device->name is NULL that
 892		 *    means this device was missing at time of FS mount.
 893		 * 2. If you are here and if the device->name is different
 894		 *    from 'path' that means either
 895		 *      a. The same device disappeared and reappeared with
 896		 *         different name. or
 897		 *      b. The missing-disk-which-was-replaced, has
 898		 *         reappeared now.
 899		 *
 900		 * We must allow 1 and 2a above. But 2b would be a spurious
 901		 * and unintentional.
 902		 *
 903		 * Further in case of 1 and 2a above, the disk at 'path'
 904		 * would have missed some transaction when it was away and
 905		 * in case of 2a the stale bdev has to be updated as well.
 906		 * 2b must not be allowed at all time.
 907		 */
 908
 909		/*
 910		 * For now, we do allow update to btrfs_fs_device through the
 911		 * btrfs dev scan cli after FS has been mounted.  We're still
 912		 * tracking a problem where systems fail mount by subvolume id
 913		 * when we reject replacement on a mounted FS.
 914		 */
 915		if (!fs_devices->opened && found_transid < device->generation) {
 916			/*
 917			 * That is if the FS is _not_ mounted and if you
 918			 * are here, that means there is more than one
 919			 * disk with same uuid and devid.We keep the one
 920			 * with larger generation number or the last-in if
 921			 * generation are equal.
 922			 */
 923			mutex_unlock(&fs_devices->device_list_mutex);
 
 
 
 924			return ERR_PTR(-EEXIST);
 925		}
 926
 927		/*
 928		 * We are going to replace the device path for a given devid,
 929		 * make sure it's the same device if the device is mounted
 
 
 
 
 
 930		 */
 931		if (device->bdev) {
 932			struct block_device *path_bdev;
 933
 934			path_bdev = lookup_bdev(path);
 935			if (IS_ERR(path_bdev)) {
 936				mutex_unlock(&fs_devices->device_list_mutex);
 937				return ERR_CAST(path_bdev);
 938			}
 939
 940			if (device->bdev != path_bdev) {
 941				bdput(path_bdev);
 942				mutex_unlock(&fs_devices->device_list_mutex);
 943				btrfs_warn_in_rcu(device->fs_info,
 944			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
 945					disk_super->fsid, devid,
 946					rcu_str_deref(device->name), path);
 947				return ERR_PTR(-EEXIST);
 948			}
 949			bdput(path_bdev);
 950			btrfs_info_in_rcu(device->fs_info,
 951				"device fsid %pU devid %llu moved old:%s new:%s",
 952				disk_super->fsid, devid,
 953				rcu_str_deref(device->name), path);
 954		}
 955
 956		name = rcu_string_strdup(path, GFP_NOFS);
 957		if (!name) {
 958			mutex_unlock(&fs_devices->device_list_mutex);
 959			return ERR_PTR(-ENOMEM);
 960		}
 961		rcu_string_free(device->name);
 962		rcu_assign_pointer(device->name, name);
 963		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
 964			fs_devices->missing_devices--;
 965			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
 966		}
 
 967	}
 968
 969	/*
 970	 * Unmount does not free the btrfs_device struct but would zero
 971	 * generation along with most of the other members. So just update
 972	 * it back. We need it to pick the disk with largest generation
 973	 * (as above).
 974	 */
 975	if (!fs_devices->opened) {
 976		device->generation = found_transid;
 977		fs_devices->latest_generation = max_t(u64, found_transid,
 978						fs_devices->latest_generation);
 979	}
 980
 981	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
 982
 983	mutex_unlock(&fs_devices->device_list_mutex);
 984	return device;
 985}
 986
 987static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 988{
 989	struct btrfs_fs_devices *fs_devices;
 990	struct btrfs_device *device;
 991	struct btrfs_device *orig_dev;
 992	int ret = 0;
 993
 
 
 994	fs_devices = alloc_fs_devices(orig->fsid, NULL);
 995	if (IS_ERR(fs_devices))
 996		return fs_devices;
 997
 998	mutex_lock(&orig->device_list_mutex);
 999	fs_devices->total_devices = orig->total_devices;
1000
1001	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1002		struct rcu_string *name;
 
 
 
 
 
 
 
1003
1004		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1005					    orig_dev->uuid);
1006		if (IS_ERR(device)) {
1007			ret = PTR_ERR(device);
1008			goto error;
1009		}
1010
1011		/*
1012		 * This is ok to do without rcu read locked because we hold the
1013		 * uuid mutex so nothing we touch in here is going to disappear.
1014		 */
1015		if (orig_dev->name) {
1016			name = rcu_string_strdup(orig_dev->name->str,
1017					GFP_KERNEL);
1018			if (!name) {
1019				btrfs_free_device(device);
1020				ret = -ENOMEM;
1021				goto error;
1022			}
1023			rcu_assign_pointer(device->name, name);
1024		}
1025
1026		list_add(&device->dev_list, &fs_devices->devices);
1027		device->fs_devices = fs_devices;
1028		fs_devices->num_devices++;
1029	}
1030	mutex_unlock(&orig->device_list_mutex);
1031	return fs_devices;
1032error:
1033	mutex_unlock(&orig->device_list_mutex);
1034	free_fs_devices(fs_devices);
1035	return ERR_PTR(ret);
1036}
1037
1038/*
1039 * After we have read the system tree and know devids belonging to
1040 * this filesystem, remove the device which does not belong there.
1041 */
1042void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1043{
1044	struct btrfs_device *device, *next;
1045	struct btrfs_device *latest_dev = NULL;
1046
1047	mutex_lock(&uuid_mutex);
1048again:
1049	/* This is the initialized path, it is safe to release the devices. */
1050	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1051		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1052							&device->dev_state)) {
1053			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1054			     &device->dev_state) &&
1055			    !test_bit(BTRFS_DEV_STATE_MISSING,
1056				      &device->dev_state) &&
1057			     (!latest_dev ||
1058			      device->generation > latest_dev->generation)) {
1059				latest_dev = device;
1060			}
1061			continue;
1062		}
1063
1064		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
1065			/*
1066			 * In the first step, keep the device which has
1067			 * the correct fsid and the devid that is used
1068			 * for the dev_replace procedure.
1069			 * In the second step, the dev_replace state is
1070			 * read from the device tree and it is known
1071			 * whether the procedure is really active or
1072			 * not, which means whether this device is
1073			 * used or whether it should be removed.
1074			 */
1075			if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1076						  &device->dev_state)) {
1077				continue;
1078			}
1079		}
1080		if (device->bdev) {
1081			blkdev_put(device->bdev, device->mode);
1082			device->bdev = NULL;
1083			fs_devices->open_devices--;
1084		}
1085		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1086			list_del_init(&device->dev_alloc_list);
1087			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1088			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1089				      &device->dev_state))
1090				fs_devices->rw_devices--;
1091		}
1092		list_del_init(&device->dev_list);
1093		fs_devices->num_devices--;
1094		btrfs_free_device(device);
1095	}
1096
1097	if (fs_devices->seed) {
1098		fs_devices = fs_devices->seed;
1099		goto again;
1100	}
 
 
 
 
 
 
 
 
 
 
 
 
1101
1102	fs_devices->latest_bdev = latest_dev->bdev;
1103
1104	mutex_unlock(&uuid_mutex);
1105}
1106
1107static void btrfs_close_bdev(struct btrfs_device *device)
1108{
1109	if (!device->bdev)
1110		return;
1111
1112	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1113		sync_blockdev(device->bdev);
1114		invalidate_bdev(device->bdev);
1115	}
1116
1117	blkdev_put(device->bdev, device->mode);
1118}
1119
1120static void btrfs_close_one_device(struct btrfs_device *device)
1121{
1122	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1123
1124	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1125	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1126		list_del_init(&device->dev_alloc_list);
1127		fs_devices->rw_devices--;
1128	}
1129
1130	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
 
 
 
 
1131		fs_devices->missing_devices--;
 
1132
1133	btrfs_close_bdev(device);
1134	if (device->bdev) {
1135		fs_devices->open_devices--;
1136		device->bdev = NULL;
1137	}
1138	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 
1139
1140	device->fs_info = NULL;
1141	atomic_set(&device->dev_stats_ccnt, 0);
1142	extent_io_tree_release(&device->alloc_state);
1143
 
 
 
 
 
 
 
 
 
 
 
 
 
1144	/* Verify the device is back in a pristine state  */
1145	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1146	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1147	ASSERT(list_empty(&device->dev_alloc_list));
1148	ASSERT(list_empty(&device->post_commit_list));
1149	ASSERT(atomic_read(&device->reada_in_flight) == 0);
1150}
1151
1152static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1153{
1154	struct btrfs_device *device, *tmp;
1155
 
 
1156	if (--fs_devices->opened > 0)
1157		return 0;
1158
1159	mutex_lock(&fs_devices->device_list_mutex);
1160	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1161		btrfs_close_one_device(device);
1162	}
1163	mutex_unlock(&fs_devices->device_list_mutex);
1164
1165	WARN_ON(fs_devices->open_devices);
1166	WARN_ON(fs_devices->rw_devices);
1167	fs_devices->opened = 0;
1168	fs_devices->seeding = false;
1169
1170	return 0;
1171}
1172
1173int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1174{
1175	struct btrfs_fs_devices *seed_devices = NULL;
1176	int ret;
1177
1178	mutex_lock(&uuid_mutex);
1179	ret = close_fs_devices(fs_devices);
1180	if (!fs_devices->opened) {
1181		seed_devices = fs_devices->seed;
1182		fs_devices->seed = NULL;
 
 
 
 
 
 
 
 
 
 
1183	}
1184	mutex_unlock(&uuid_mutex);
1185
1186	while (seed_devices) {
1187		fs_devices = seed_devices;
1188		seed_devices = fs_devices->seed;
1189		close_fs_devices(fs_devices);
 
1190		free_fs_devices(fs_devices);
1191	}
1192	return ret;
1193}
1194
1195static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1196				fmode_t flags, void *holder)
1197{
1198	struct btrfs_device *device;
1199	struct btrfs_device *latest_dev = NULL;
 
1200
1201	flags |= FMODE_EXCL;
1202
1203	list_for_each_entry(device, &fs_devices->devices, dev_list) {
1204		/* Just open everything we can; ignore failures here */
1205		if (btrfs_open_one_device(fs_devices, device, flags, holder))
1206			continue;
1207
1208		if (!latest_dev ||
1209		    device->generation > latest_dev->generation)
 
1210			latest_dev = device;
 
 
 
 
 
1211	}
1212	if (fs_devices->open_devices == 0)
1213		return -EINVAL;
1214
1215	fs_devices->opened = 1;
1216	fs_devices->latest_bdev = latest_dev->bdev;
1217	fs_devices->total_rw_bytes = 0;
1218	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
 
1219
1220	return 0;
1221}
1222
1223static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
 
1224{
1225	struct btrfs_device *dev1, *dev2;
1226
1227	dev1 = list_entry(a, struct btrfs_device, dev_list);
1228	dev2 = list_entry(b, struct btrfs_device, dev_list);
1229
1230	if (dev1->devid < dev2->devid)
1231		return -1;
1232	else if (dev1->devid > dev2->devid)
1233		return 1;
1234	return 0;
1235}
1236
1237int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1238		       fmode_t flags, void *holder)
1239{
1240	int ret;
1241
1242	lockdep_assert_held(&uuid_mutex);
1243	/*
1244	 * The device_list_mutex cannot be taken here in case opening the
1245	 * underlying device takes further locks like bd_mutex.
1246	 *
1247	 * We also don't need the lock here as this is called during mount and
1248	 * exclusion is provided by uuid_mutex
1249	 */
1250
1251	if (fs_devices->opened) {
1252		fs_devices->opened++;
1253		ret = 0;
1254	} else {
1255		list_sort(NULL, &fs_devices->devices, devid_cmp);
1256		ret = open_fs_devices(fs_devices, flags, holder);
1257	}
1258
1259	return ret;
1260}
1261
1262void btrfs_release_disk_super(struct btrfs_super_block *super)
1263{
1264	struct page *page = virt_to_page(super);
1265
1266	put_page(page);
1267}
1268
1269static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1270						       u64 bytenr)
1271{
1272	struct btrfs_super_block *disk_super;
1273	struct page *page;
1274	void *p;
1275	pgoff_t index;
1276
1277	/* make sure our super fits in the device */
1278	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1279		return ERR_PTR(-EINVAL);
1280
1281	/* make sure our super fits in the page */
1282	if (sizeof(*disk_super) > PAGE_SIZE)
1283		return ERR_PTR(-EINVAL);
1284
1285	/* make sure our super doesn't straddle pages on disk */
1286	index = bytenr >> PAGE_SHIFT;
1287	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1288		return ERR_PTR(-EINVAL);
1289
1290	/* pull in the page with our super */
1291	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1292
1293	if (IS_ERR(page))
1294		return ERR_CAST(page);
1295
1296	p = page_address(page);
1297
1298	/* align our pointer to the offset of the super block */
1299	disk_super = p + offset_in_page(bytenr);
1300
1301	if (btrfs_super_bytenr(disk_super) != bytenr ||
1302	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1303		btrfs_release_disk_super(p);
1304		return ERR_PTR(-EINVAL);
1305	}
1306
1307	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1308		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1309
1310	return disk_super;
1311}
1312
1313int btrfs_forget_devices(const char *path)
1314{
1315	int ret;
1316
1317	mutex_lock(&uuid_mutex);
1318	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1319	mutex_unlock(&uuid_mutex);
1320
1321	return ret;
1322}
1323
1324/*
1325 * Look for a btrfs signature on a device. This may be called out of the mount path
1326 * and we are not allowed to call set_blocksize during the scan. The superblock
1327 * is read via pagecache
1328 */
1329struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1330					   void *holder)
1331{
1332	struct btrfs_super_block *disk_super;
1333	bool new_device_added = false;
1334	struct btrfs_device *device = NULL;
1335	struct block_device *bdev;
1336	u64 bytenr;
 
1337
1338	lockdep_assert_held(&uuid_mutex);
1339
1340	/*
1341	 * we would like to check all the supers, but that would make
1342	 * a btrfs mount succeed after a mkfs from a different FS.
1343	 * So, we need to add a special mount option to scan for
1344	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1345	 */
1346	bytenr = btrfs_sb_offset(0);
1347	flags |= FMODE_EXCL;
1348
1349	bdev = blkdev_get_by_path(path, flags, holder);
1350	if (IS_ERR(bdev))
1351		return ERR_CAST(bdev);
1352
1353	disk_super = btrfs_read_disk_super(bdev, bytenr);
 
 
 
 
 
 
 
1354	if (IS_ERR(disk_super)) {
1355		device = ERR_CAST(disk_super);
1356		goto error_bdev_put;
1357	}
1358
1359	device = device_list_add(path, disk_super, &new_device_added);
1360	if (!IS_ERR(device)) {
1361		if (new_device_added)
1362			btrfs_free_stale_devices(path, device);
1363	}
1364
1365	btrfs_release_disk_super(disk_super);
1366
1367error_bdev_put:
1368	blkdev_put(bdev, flags);
1369
1370	return device;
1371}
1372
1373/*
1374 * Try to find a chunk that intersects [start, start + len] range and when one
1375 * such is found, record the end of it in *start
1376 */
1377static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1378				    u64 len)
1379{
1380	u64 physical_start, physical_end;
1381
1382	lockdep_assert_held(&device->fs_info->chunk_mutex);
1383
1384	if (!find_first_extent_bit(&device->alloc_state, *start,
1385				   &physical_start, &physical_end,
1386				   CHUNK_ALLOCATED, NULL)) {
1387
1388		if (in_range(physical_start, *start, len) ||
1389		    in_range(*start, physical_start,
1390			     physical_end - physical_start)) {
1391			*start = physical_end + 1;
1392			return true;
1393		}
1394	}
1395	return false;
1396}
1397
1398static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1399{
1400	switch (device->fs_devices->chunk_alloc_policy) {
1401	case BTRFS_CHUNK_ALLOC_REGULAR:
 
 
1402		/*
1403		 * We don't want to overwrite the superblock on the drive nor
1404		 * any area used by the boot loader (grub for example), so we
1405		 * make sure to start at an offset of at least 1MB.
1406		 */
1407		return max_t(u64, start, SZ_1M);
1408	default:
1409		BUG();
1410	}
1411}
1412
1413/**
1414 * dev_extent_hole_check - check if specified hole is suitable for allocation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415 * @device:	the device which we have the hole
1416 * @hole_start: starting position of the hole
1417 * @hole_size:	the size of the hole
1418 * @num_bytes:	the size of the free space that we need
1419 *
1420 * This function may modify @hole_start and @hole_end to reflect the suitable
1421 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1422 */
1423static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1424				  u64 *hole_size, u64 num_bytes)
1425{
1426	bool changed = false;
1427	u64 hole_end = *hole_start + *hole_size;
1428
1429	/*
1430	 * Check before we set max_hole_start, otherwise we could end up
1431	 * sending back this offset anyway.
1432	 */
1433	if (contains_pending_extent(device, hole_start, *hole_size)) {
1434		if (hole_end >= *hole_start)
1435			*hole_size = hole_end - *hole_start;
1436		else
1437			*hole_size = 0;
1438		changed = true;
1439	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440
1441	switch (device->fs_devices->chunk_alloc_policy) {
1442	case BTRFS_CHUNK_ALLOC_REGULAR:
1443		/* No extra check */
1444		break;
1445	default:
1446		BUG();
1447	}
1448
1449	return changed;
1450}
1451
1452/*
1453 * find_free_dev_extent_start - find free space in the specified device
 
1454 * @device:	  the device which we search the free space in
1455 * @num_bytes:	  the size of the free space that we need
1456 * @search_start: the position from which to begin the search
1457 * @start:	  store the start of the free space.
1458 * @len:	  the size of the free space. that we find, or the size
1459 *		  of the max free space if we don't find suitable free space
1460 *
1461 * this uses a pretty simple search, the expectation is that it is
1462 * called very infrequently and that a given device has a small number
1463 * of extents
1464 *
1465 * @start is used to store the start of the free space if we find. But if we
1466 * don't find suitable free space, it will be used to store the start position
1467 * of the max free space.
1468 *
1469 * @len is used to store the size of the free space that we find.
1470 * But if we don't find suitable free space, it is used to store the size of
1471 * the max free space.
1472 *
1473 * NOTE: This function will search *commit* root of device tree, and does extra
1474 * check to ensure dev extents are not double allocated.
1475 * This makes the function safe to allocate dev extents but may not report
1476 * correct usable device space, as device extent freed in current transaction
1477 * is not reported as avaiable.
1478 */
1479static int find_free_dev_extent_start(struct btrfs_device *device,
1480				u64 num_bytes, u64 search_start, u64 *start,
1481				u64 *len)
1482{
1483	struct btrfs_fs_info *fs_info = device->fs_info;
1484	struct btrfs_root *root = fs_info->dev_root;
1485	struct btrfs_key key;
1486	struct btrfs_dev_extent *dev_extent;
1487	struct btrfs_path *path;
1488	u64 hole_size;
1489	u64 max_hole_start;
1490	u64 max_hole_size;
1491	u64 extent_end;
1492	u64 search_end = device->total_bytes;
1493	int ret;
1494	int slot;
1495	struct extent_buffer *l;
1496
1497	search_start = dev_extent_search_start(device, search_start);
1498
 
 
 
1499	path = btrfs_alloc_path();
1500	if (!path)
1501		return -ENOMEM;
1502
1503	max_hole_start = search_start;
1504	max_hole_size = 0;
1505
1506again:
1507	if (search_start >= search_end ||
1508		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1509		ret = -ENOSPC;
1510		goto out;
1511	}
1512
1513	path->reada = READA_FORWARD;
1514	path->search_commit_root = 1;
1515	path->skip_locking = 1;
1516
1517	key.objectid = device->devid;
1518	key.offset = search_start;
1519	key.type = BTRFS_DEV_EXTENT_KEY;
1520
1521	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1522	if (ret < 0)
1523		goto out;
1524	if (ret > 0) {
1525		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1526		if (ret < 0)
1527			goto out;
1528	}
1529
1530	while (1) {
1531		l = path->nodes[0];
1532		slot = path->slots[0];
1533		if (slot >= btrfs_header_nritems(l)) {
1534			ret = btrfs_next_leaf(root, path);
1535			if (ret == 0)
1536				continue;
1537			if (ret < 0)
1538				goto out;
1539
1540			break;
1541		}
1542		btrfs_item_key_to_cpu(l, &key, slot);
1543
1544		if (key.objectid < device->devid)
1545			goto next;
1546
1547		if (key.objectid > device->devid)
1548			break;
1549
1550		if (key.type != BTRFS_DEV_EXTENT_KEY)
1551			goto next;
1552
 
 
 
1553		if (key.offset > search_start) {
1554			hole_size = key.offset - search_start;
1555			dev_extent_hole_check(device, &search_start, &hole_size,
1556					      num_bytes);
1557
1558			if (hole_size > max_hole_size) {
1559				max_hole_start = search_start;
1560				max_hole_size = hole_size;
1561			}
1562
1563			/*
1564			 * If this free space is greater than which we need,
1565			 * it must be the max free space that we have found
1566			 * until now, so max_hole_start must point to the start
1567			 * of this free space and the length of this free space
1568			 * is stored in max_hole_size. Thus, we return
1569			 * max_hole_start and max_hole_size and go back to the
1570			 * caller.
1571			 */
1572			if (hole_size >= num_bytes) {
1573				ret = 0;
1574				goto out;
1575			}
1576		}
1577
1578		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1579		extent_end = key.offset + btrfs_dev_extent_length(l,
1580								  dev_extent);
1581		if (extent_end > search_start)
1582			search_start = extent_end;
1583next:
1584		path->slots[0]++;
1585		cond_resched();
1586	}
1587
1588	/*
1589	 * At this point, search_start should be the end of
1590	 * allocated dev extents, and when shrinking the device,
1591	 * search_end may be smaller than search_start.
1592	 */
1593	if (search_end > search_start) {
1594		hole_size = search_end - search_start;
1595		if (dev_extent_hole_check(device, &search_start, &hole_size,
1596					  num_bytes)) {
1597			btrfs_release_path(path);
1598			goto again;
1599		}
1600
1601		if (hole_size > max_hole_size) {
1602			max_hole_start = search_start;
1603			max_hole_size = hole_size;
1604		}
1605	}
1606
1607	/* See above. */
1608	if (max_hole_size < num_bytes)
1609		ret = -ENOSPC;
1610	else
1611		ret = 0;
1612
 
1613out:
1614	btrfs_free_path(path);
1615	*start = max_hole_start;
1616	if (len)
1617		*len = max_hole_size;
1618	return ret;
1619}
1620
1621int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1622			 u64 *start, u64 *len)
1623{
1624	/* FIXME use last free of some kind */
1625	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1626}
1627
1628static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1629			  struct btrfs_device *device,
1630			  u64 start, u64 *dev_extent_len)
1631{
1632	struct btrfs_fs_info *fs_info = device->fs_info;
1633	struct btrfs_root *root = fs_info->dev_root;
1634	int ret;
1635	struct btrfs_path *path;
1636	struct btrfs_key key;
1637	struct btrfs_key found_key;
1638	struct extent_buffer *leaf = NULL;
1639	struct btrfs_dev_extent *extent = NULL;
1640
1641	path = btrfs_alloc_path();
1642	if (!path)
1643		return -ENOMEM;
1644
1645	key.objectid = device->devid;
1646	key.offset = start;
1647	key.type = BTRFS_DEV_EXTENT_KEY;
1648again:
1649	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1650	if (ret > 0) {
1651		ret = btrfs_previous_item(root, path, key.objectid,
1652					  BTRFS_DEV_EXTENT_KEY);
1653		if (ret)
1654			goto out;
1655		leaf = path->nodes[0];
1656		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1657		extent = btrfs_item_ptr(leaf, path->slots[0],
1658					struct btrfs_dev_extent);
1659		BUG_ON(found_key.offset > start || found_key.offset +
1660		       btrfs_dev_extent_length(leaf, extent) < start);
1661		key = found_key;
1662		btrfs_release_path(path);
1663		goto again;
1664	} else if (ret == 0) {
1665		leaf = path->nodes[0];
1666		extent = btrfs_item_ptr(leaf, path->slots[0],
1667					struct btrfs_dev_extent);
1668	} else {
1669		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1670		goto out;
1671	}
1672
1673	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1674
1675	ret = btrfs_del_item(trans, root, path);
1676	if (ret) {
1677		btrfs_handle_fs_error(fs_info, ret,
1678				      "Failed to remove dev extent item");
1679	} else {
1680		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1681	}
1682out:
1683	btrfs_free_path(path);
1684	return ret;
1685}
1686
1687static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1688				  struct btrfs_device *device,
1689				  u64 chunk_offset, u64 start, u64 num_bytes)
1690{
1691	int ret;
1692	struct btrfs_path *path;
1693	struct btrfs_fs_info *fs_info = device->fs_info;
1694	struct btrfs_root *root = fs_info->dev_root;
1695	struct btrfs_dev_extent *extent;
1696	struct extent_buffer *leaf;
1697	struct btrfs_key key;
1698
1699	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1700	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1701	path = btrfs_alloc_path();
1702	if (!path)
1703		return -ENOMEM;
1704
1705	key.objectid = device->devid;
1706	key.offset = start;
1707	key.type = BTRFS_DEV_EXTENT_KEY;
1708	ret = btrfs_insert_empty_item(trans, root, path, &key,
1709				      sizeof(*extent));
1710	if (ret)
1711		goto out;
1712
1713	leaf = path->nodes[0];
1714	extent = btrfs_item_ptr(leaf, path->slots[0],
1715				struct btrfs_dev_extent);
1716	btrfs_set_dev_extent_chunk_tree(leaf, extent,
1717					BTRFS_CHUNK_TREE_OBJECTID);
1718	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1719					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1720	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1721
1722	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1723	btrfs_mark_buffer_dirty(leaf);
1724out:
1725	btrfs_free_path(path);
1726	return ret;
1727}
1728
1729static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1730{
1731	struct extent_map_tree *em_tree;
1732	struct extent_map *em;
1733	struct rb_node *n;
1734	u64 ret = 0;
1735
1736	em_tree = &fs_info->mapping_tree;
1737	read_lock(&em_tree->lock);
1738	n = rb_last(&em_tree->map.rb_root);
1739	if (n) {
1740		em = rb_entry(n, struct extent_map, rb_node);
1741		ret = em->start + em->len;
1742	}
1743	read_unlock(&em_tree->lock);
1744
1745	return ret;
1746}
1747
1748static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1749				    u64 *devid_ret)
1750{
1751	int ret;
1752	struct btrfs_key key;
1753	struct btrfs_key found_key;
1754	struct btrfs_path *path;
1755
1756	path = btrfs_alloc_path();
1757	if (!path)
1758		return -ENOMEM;
1759
1760	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1761	key.type = BTRFS_DEV_ITEM_KEY;
1762	key.offset = (u64)-1;
1763
1764	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1765	if (ret < 0)
1766		goto error;
1767
1768	if (ret == 0) {
1769		/* Corruption */
1770		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1771		ret = -EUCLEAN;
1772		goto error;
1773	}
1774
1775	ret = btrfs_previous_item(fs_info->chunk_root, path,
1776				  BTRFS_DEV_ITEMS_OBJECTID,
1777				  BTRFS_DEV_ITEM_KEY);
1778	if (ret) {
1779		*devid_ret = 1;
1780	} else {
1781		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1782				      path->slots[0]);
1783		*devid_ret = found_key.offset + 1;
1784	}
1785	ret = 0;
1786error:
1787	btrfs_free_path(path);
1788	return ret;
1789}
1790
1791/*
1792 * the device information is stored in the chunk root
1793 * the btrfs_device struct should be fully filled in
1794 */
1795static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1796			    struct btrfs_device *device)
1797{
1798	int ret;
1799	struct btrfs_path *path;
1800	struct btrfs_dev_item *dev_item;
1801	struct extent_buffer *leaf;
1802	struct btrfs_key key;
1803	unsigned long ptr;
1804
1805	path = btrfs_alloc_path();
1806	if (!path)
1807		return -ENOMEM;
1808
1809	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1810	key.type = BTRFS_DEV_ITEM_KEY;
1811	key.offset = device->devid;
1812
 
1813	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1814				      &key, sizeof(*dev_item));
 
1815	if (ret)
1816		goto out;
1817
1818	leaf = path->nodes[0];
1819	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1820
1821	btrfs_set_device_id(leaf, dev_item, device->devid);
1822	btrfs_set_device_generation(leaf, dev_item, 0);
1823	btrfs_set_device_type(leaf, dev_item, device->type);
1824	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1825	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1826	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1827	btrfs_set_device_total_bytes(leaf, dev_item,
1828				     btrfs_device_get_disk_total_bytes(device));
1829	btrfs_set_device_bytes_used(leaf, dev_item,
1830				    btrfs_device_get_bytes_used(device));
1831	btrfs_set_device_group(leaf, dev_item, 0);
1832	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1833	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1834	btrfs_set_device_start_offset(leaf, dev_item, 0);
1835
1836	ptr = btrfs_device_uuid(dev_item);
1837	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1838	ptr = btrfs_device_fsid(dev_item);
1839	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1840			    ptr, BTRFS_FSID_SIZE);
1841	btrfs_mark_buffer_dirty(leaf);
1842
1843	ret = 0;
1844out:
1845	btrfs_free_path(path);
1846	return ret;
1847}
1848
1849/*
1850 * Function to update ctime/mtime for a given device path.
1851 * Mainly used for ctime/mtime based probe like libblkid.
 
 
1852 */
1853static void update_dev_time(const char *path_name)
1854{
1855	struct file *filp;
 
 
1856
1857	filp = filp_open(path_name, O_RDWR, 0);
1858	if (IS_ERR(filp))
1859		return;
1860	file_update_time(filp);
1861	filp_close(filp, NULL);
 
 
1862}
1863
1864static int btrfs_rm_dev_item(struct btrfs_device *device)
 
1865{
1866	struct btrfs_root *root = device->fs_info->chunk_root;
1867	int ret;
1868	struct btrfs_path *path;
1869	struct btrfs_key key;
1870	struct btrfs_trans_handle *trans;
1871
1872	path = btrfs_alloc_path();
1873	if (!path)
1874		return -ENOMEM;
1875
1876	trans = btrfs_start_transaction(root, 0);
1877	if (IS_ERR(trans)) {
1878		btrfs_free_path(path);
1879		return PTR_ERR(trans);
1880	}
1881	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1882	key.type = BTRFS_DEV_ITEM_KEY;
1883	key.offset = device->devid;
1884
 
1885	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 
1886	if (ret) {
1887		if (ret > 0)
1888			ret = -ENOENT;
1889		btrfs_abort_transaction(trans, ret);
1890		btrfs_end_transaction(trans);
1891		goto out;
1892	}
1893
1894	ret = btrfs_del_item(trans, root, path);
1895	if (ret) {
1896		btrfs_abort_transaction(trans, ret);
1897		btrfs_end_transaction(trans);
1898	}
1899
1900out:
1901	btrfs_free_path(path);
1902	if (!ret)
1903		ret = btrfs_commit_transaction(trans);
1904	return ret;
1905}
1906
1907/*
1908 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1909 * filesystem. It's up to the caller to adjust that number regarding eg. device
1910 * replace.
1911 */
1912static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1913		u64 num_devices)
1914{
1915	u64 all_avail;
1916	unsigned seq;
1917	int i;
1918
1919	do {
1920		seq = read_seqbegin(&fs_info->profiles_lock);
1921
1922		all_avail = fs_info->avail_data_alloc_bits |
1923			    fs_info->avail_system_alloc_bits |
1924			    fs_info->avail_metadata_alloc_bits;
1925	} while (read_seqretry(&fs_info->profiles_lock, seq));
1926
1927	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1928		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1929			continue;
1930
1931		if (num_devices < btrfs_raid_array[i].devs_min) {
1932			int ret = btrfs_raid_array[i].mindev_error;
1933
1934			if (ret)
1935				return ret;
1936		}
1937	}
1938
1939	return 0;
1940}
1941
1942static struct btrfs_device * btrfs_find_next_active_device(
1943		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1944{
1945	struct btrfs_device *next_device;
1946
1947	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1948		if (next_device != device &&
1949		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1950		    && next_device->bdev)
1951			return next_device;
1952	}
1953
1954	return NULL;
1955}
1956
1957/*
1958 * Helper function to check if the given device is part of s_bdev / latest_bdev
1959 * and replace it with the provided or the next active device, in the context
1960 * where this function called, there should be always be another device (or
1961 * this_dev) which is active.
1962 */
1963void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1964				     struct btrfs_device *this_dev)
1965{
1966	struct btrfs_fs_info *fs_info = device->fs_info;
1967	struct btrfs_device *next_device;
1968
1969	if (this_dev)
1970		next_device = this_dev;
1971	else
1972		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1973								device);
1974	ASSERT(next_device);
1975
1976	if (fs_info->sb->s_bdev &&
1977			(fs_info->sb->s_bdev == device->bdev))
1978		fs_info->sb->s_bdev = next_device->bdev;
1979
1980	if (fs_info->fs_devices->latest_bdev == device->bdev)
1981		fs_info->fs_devices->latest_bdev = next_device->bdev;
1982}
1983
1984/*
1985 * Return btrfs_fs_devices::num_devices excluding the device that's being
1986 * currently replaced.
1987 */
1988static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
1989{
1990	u64 num_devices = fs_info->fs_devices->num_devices;
1991
1992	down_read(&fs_info->dev_replace.rwsem);
1993	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1994		ASSERT(num_devices > 1);
1995		num_devices--;
1996	}
1997	up_read(&fs_info->dev_replace.rwsem);
1998
1999	return num_devices;
2000}
2001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2003			       struct block_device *bdev,
2004			       const char *device_path)
2005{
2006	struct btrfs_super_block *disk_super;
2007	int copy_num;
2008
2009	if (!bdev)
2010		return;
2011
2012	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2013		struct page *page;
2014		int ret;
2015
2016		disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2017		if (IS_ERR(disk_super))
2018			continue;
2019
2020		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2021
2022		page = virt_to_page(disk_super);
2023		set_page_dirty(page);
2024		lock_page(page);
2025		/* write_on_page() unlocks the page */
2026		ret = write_one_page(page);
2027		if (ret)
2028			btrfs_warn(fs_info,
2029				"error clearing superblock number %d (%d)",
2030				copy_num, ret);
2031		btrfs_release_disk_super(disk_super);
2032
2033	}
2034
2035	/* Notify udev that device has changed */
2036	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2037
2038	/* Update ctime/mtime for device path for libblkid */
2039	update_dev_time(device_path);
2040}
2041
2042int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2043		u64 devid)
 
2044{
 
2045	struct btrfs_device *device;
2046	struct btrfs_fs_devices *cur_devices;
2047	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2048	u64 num_devices;
2049	int ret = 0;
2050
2051	mutex_lock(&uuid_mutex);
 
 
 
2052
 
 
 
 
 
2053	num_devices = btrfs_num_devices(fs_info);
2054
2055	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2056	if (ret)
2057		goto out;
2058
2059	device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2060
2061	if (IS_ERR(device)) {
2062		if (PTR_ERR(device) == -ENOENT &&
2063		    strcmp(device_path, "missing") == 0)
2064			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2065		else
2066			ret = PTR_ERR(device);
2067		goto out;
2068	}
2069
2070	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2071		btrfs_warn_in_rcu(fs_info,
2072		  "cannot remove device %s (devid %llu) due to active swapfile",
2073				  rcu_str_deref(device->name), device->devid);
2074		ret = -ETXTBSY;
2075		goto out;
2076	}
2077
2078	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2079		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2080		goto out;
2081	}
2082
2083	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2084	    fs_info->fs_devices->rw_devices == 1) {
2085		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2086		goto out;
2087	}
2088
2089	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2090		mutex_lock(&fs_info->chunk_mutex);
2091		list_del_init(&device->dev_alloc_list);
2092		device->fs_devices->rw_devices--;
2093		mutex_unlock(&fs_info->chunk_mutex);
2094	}
2095
2096	mutex_unlock(&uuid_mutex);
2097	ret = btrfs_shrink_device(device, 0);
2098	mutex_lock(&uuid_mutex);
2099	if (ret)
2100		goto error_undo;
2101
2102	/*
2103	 * TODO: the superblock still includes this device in its num_devices
2104	 * counter although write_all_supers() is not locked out. This
2105	 * could give a filesystem state which requires a degraded mount.
2106	 */
2107	ret = btrfs_rm_dev_item(device);
2108	if (ret)
2109		goto error_undo;
 
 
 
 
 
 
 
 
 
 
 
 
2110
2111	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2112	btrfs_scrub_cancel_dev(device);
2113
2114	/*
2115	 * the device list mutex makes sure that we don't change
2116	 * the device list while someone else is writing out all
2117	 * the device supers. Whoever is writing all supers, should
2118	 * lock the device list mutex before getting the number of
2119	 * devices in the super block (super_copy). Conversely,
2120	 * whoever updates the number of devices in the super block
2121	 * (super_copy) should hold the device list mutex.
2122	 */
2123
2124	/*
2125	 * In normal cases the cur_devices == fs_devices. But in case
2126	 * of deleting a seed device, the cur_devices should point to
2127	 * its own fs_devices listed under the fs_devices->seed.
2128	 */
2129	cur_devices = device->fs_devices;
2130	mutex_lock(&fs_devices->device_list_mutex);
2131	list_del_rcu(&device->dev_list);
2132
2133	cur_devices->num_devices--;
2134	cur_devices->total_devices--;
2135	/* Update total_devices of the parent fs_devices if it's seed */
2136	if (cur_devices != fs_devices)
2137		fs_devices->total_devices--;
2138
2139	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2140		cur_devices->missing_devices--;
2141
2142	btrfs_assign_next_active_device(device, NULL);
2143
2144	if (device->bdev) {
2145		cur_devices->open_devices--;
2146		/* remove sysfs entry */
2147		btrfs_sysfs_remove_devices_dir(fs_devices, device);
2148	}
2149
2150	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2151	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2152	mutex_unlock(&fs_devices->device_list_mutex);
2153
2154	/*
2155	 * at this point, the device is zero sized and detached from
2156	 * the devices list.  All that's left is to zero out the old
2157	 * supers and free the device.
 
 
 
 
 
2158	 */
2159	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2160		btrfs_scratch_superblocks(fs_info, device->bdev,
2161					  device->name->str);
 
 
 
 
 
2162
2163	btrfs_close_bdev(device);
 
2164	synchronize_rcu();
2165	btrfs_free_device(device);
2166
2167	if (cur_devices->open_devices == 0) {
2168		while (fs_devices) {
2169			if (fs_devices->seed == cur_devices) {
2170				fs_devices->seed = cur_devices->seed;
2171				break;
2172			}
2173			fs_devices = fs_devices->seed;
2174		}
2175		cur_devices->seed = NULL;
2176		close_fs_devices(cur_devices);
 
2177		free_fs_devices(cur_devices);
2178	}
2179
2180out:
2181	mutex_unlock(&uuid_mutex);
2182	return ret;
2183
2184error_undo:
2185	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2186		mutex_lock(&fs_info->chunk_mutex);
2187		list_add(&device->dev_alloc_list,
2188			 &fs_devices->alloc_list);
2189		device->fs_devices->rw_devices++;
2190		mutex_unlock(&fs_info->chunk_mutex);
2191	}
2192	goto out;
2193}
2194
2195void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2196{
2197	struct btrfs_fs_devices *fs_devices;
2198
2199	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2200
2201	/*
2202	 * in case of fs with no seed, srcdev->fs_devices will point
2203	 * to fs_devices of fs_info. However when the dev being replaced is
2204	 * a seed dev it will point to the seed's local fs_devices. In short
2205	 * srcdev will have its correct fs_devices in both the cases.
2206	 */
2207	fs_devices = srcdev->fs_devices;
2208
2209	list_del_rcu(&srcdev->dev_list);
2210	list_del(&srcdev->dev_alloc_list);
2211	fs_devices->num_devices--;
2212	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2213		fs_devices->missing_devices--;
2214
2215	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2216		fs_devices->rw_devices--;
2217
2218	if (srcdev->bdev)
2219		fs_devices->open_devices--;
2220}
2221
2222void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2223{
2224	struct btrfs_fs_info *fs_info = srcdev->fs_info;
2225	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2226
2227	mutex_lock(&uuid_mutex);
2228
2229	btrfs_close_bdev(srcdev);
2230	synchronize_rcu();
2231	btrfs_free_device(srcdev);
2232
2233	/* if this is no devs we rather delete the fs_devices */
2234	if (!fs_devices->num_devices) {
2235		struct btrfs_fs_devices *tmp_fs_devices;
2236
2237		/*
2238		 * On a mounted FS, num_devices can't be zero unless it's a
2239		 * seed. In case of a seed device being replaced, the replace
2240		 * target added to the sprout FS, so there will be no more
2241		 * device left under the seed FS.
2242		 */
2243		ASSERT(fs_devices->seeding);
2244
2245		tmp_fs_devices = fs_info->fs_devices;
2246		while (tmp_fs_devices) {
2247			if (tmp_fs_devices->seed == fs_devices) {
2248				tmp_fs_devices->seed = fs_devices->seed;
2249				break;
2250			}
2251			tmp_fs_devices = tmp_fs_devices->seed;
2252		}
2253		fs_devices->seed = NULL;
2254		close_fs_devices(fs_devices);
2255		free_fs_devices(fs_devices);
2256	}
2257	mutex_unlock(&uuid_mutex);
2258}
2259
2260void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2261{
2262	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2263
2264	mutex_lock(&fs_devices->device_list_mutex);
2265
2266	btrfs_sysfs_remove_devices_dir(fs_devices, tgtdev);
2267
2268	if (tgtdev->bdev)
2269		fs_devices->open_devices--;
2270
2271	fs_devices->num_devices--;
2272
2273	btrfs_assign_next_active_device(tgtdev, NULL);
2274
2275	list_del_rcu(&tgtdev->dev_list);
2276
2277	mutex_unlock(&fs_devices->device_list_mutex);
2278
2279	/*
2280	 * The update_dev_time() with in btrfs_scratch_superblocks()
2281	 * may lead to a call to btrfs_show_devname() which will try
2282	 * to hold device_list_mutex. And here this device
2283	 * is already out of device list, so we don't have to hold
2284	 * the device_list_mutex lock.
2285	 */
2286	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2287				  tgtdev->name->str);
2288
2289	btrfs_close_bdev(tgtdev);
2290	synchronize_rcu();
2291	btrfs_free_device(tgtdev);
2292}
2293
2294static struct btrfs_device *btrfs_find_device_by_path(
2295		struct btrfs_fs_info *fs_info, const char *device_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2296{
2297	int ret = 0;
2298	struct btrfs_super_block *disk_super;
2299	u64 devid;
2300	u8 *dev_uuid;
2301	struct block_device *bdev;
2302	struct btrfs_device *device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2303
2304	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2305				    fs_info->bdev_holder, 0, &bdev, &disk_super);
2306	if (ret)
2307		return ERR_PTR(ret);
 
 
2308
2309	devid = btrfs_stack_device_id(&disk_super->dev_item);
2310	dev_uuid = disk_super->dev_item.uuid;
2311	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2312		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2313					   disk_super->metadata_uuid, true);
2314	else
2315		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2316					   disk_super->fsid, true);
2317
2318	btrfs_release_disk_super(disk_super);
2319	if (!device)
2320		device = ERR_PTR(-ENOENT);
2321	blkdev_put(bdev, FMODE_READ);
2322	return device;
2323}
2324
2325/*
2326 * Lookup a device given by device id, or the path if the id is 0.
2327 */
 
 
 
 
 
 
 
 
 
 
2328struct btrfs_device *btrfs_find_device_by_devspec(
2329		struct btrfs_fs_info *fs_info, u64 devid,
2330		const char *device_path)
2331{
 
2332	struct btrfs_device *device;
 
2333
2334	if (devid) {
2335		device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2336					   NULL, true);
2337		if (!device)
2338			return ERR_PTR(-ENOENT);
2339		return device;
2340	}
2341
2342	if (!device_path || !device_path[0])
2343		return ERR_PTR(-EINVAL);
2344
2345	if (strcmp(device_path, "missing") == 0) {
2346		/* Find first missing device */
2347		list_for_each_entry(device, &fs_info->fs_devices->devices,
2348				    dev_list) {
2349			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2350				     &device->dev_state) && !device->bdev)
2351				return device;
2352		}
2353		return ERR_PTR(-ENOENT);
2354	}
2355
2356	return btrfs_find_device_by_path(fs_info, device_path);
2357}
2358
2359/*
2360 * does all the dirty work required for changing file system's UUID.
2361 */
2362static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2363{
2364	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2365	struct btrfs_fs_devices *old_devices;
2366	struct btrfs_fs_devices *seed_devices;
2367	struct btrfs_super_block *disk_super = fs_info->super_copy;
2368	struct btrfs_device *device;
2369	u64 super_flags;
2370
2371	lockdep_assert_held(&uuid_mutex);
2372	if (!fs_devices->seeding)
2373		return -EINVAL;
2374
 
 
 
 
2375	seed_devices = alloc_fs_devices(NULL, NULL);
2376	if (IS_ERR(seed_devices))
2377		return PTR_ERR(seed_devices);
2378
 
 
 
 
 
 
2379	old_devices = clone_fs_devices(fs_devices);
2380	if (IS_ERR(old_devices)) {
2381		kfree(seed_devices);
2382		return PTR_ERR(old_devices);
2383	}
2384
2385	list_add(&old_devices->fs_list, &fs_uuids);
2386
2387	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2388	seed_devices->opened = 1;
2389	INIT_LIST_HEAD(&seed_devices->devices);
2390	INIT_LIST_HEAD(&seed_devices->alloc_list);
2391	mutex_init(&seed_devices->device_list_mutex);
2392
2393	mutex_lock(&fs_devices->device_list_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2394	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2395			      synchronize_rcu);
2396	list_for_each_entry(device, &seed_devices->devices, dev_list)
2397		device->fs_devices = seed_devices;
2398
2399	mutex_lock(&fs_info->chunk_mutex);
2400	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2401	mutex_unlock(&fs_info->chunk_mutex);
2402
2403	fs_devices->seeding = false;
2404	fs_devices->num_devices = 0;
2405	fs_devices->open_devices = 0;
2406	fs_devices->missing_devices = 0;
2407	fs_devices->rotating = false;
2408	fs_devices->seed = seed_devices;
2409
2410	generate_random_uuid(fs_devices->fsid);
2411	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2412	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2413	mutex_unlock(&fs_devices->device_list_mutex);
2414
2415	super_flags = btrfs_super_flags(disk_super) &
2416		      ~BTRFS_SUPER_FLAG_SEEDING;
2417	btrfs_set_super_flags(disk_super, super_flags);
2418
2419	return 0;
2420}
2421
2422/*
2423 * Store the expected generation for seed devices in device items.
2424 */
2425static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2426{
 
2427	struct btrfs_fs_info *fs_info = trans->fs_info;
2428	struct btrfs_root *root = fs_info->chunk_root;
2429	struct btrfs_path *path;
2430	struct extent_buffer *leaf;
2431	struct btrfs_dev_item *dev_item;
2432	struct btrfs_device *device;
2433	struct btrfs_key key;
2434	u8 fs_uuid[BTRFS_FSID_SIZE];
2435	u8 dev_uuid[BTRFS_UUID_SIZE];
2436	u64 devid;
2437	int ret;
2438
2439	path = btrfs_alloc_path();
2440	if (!path)
2441		return -ENOMEM;
2442
2443	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2444	key.offset = 0;
2445	key.type = BTRFS_DEV_ITEM_KEY;
2446
2447	while (1) {
 
2448		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 
2449		if (ret < 0)
2450			goto error;
2451
2452		leaf = path->nodes[0];
2453next_slot:
2454		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2455			ret = btrfs_next_leaf(root, path);
2456			if (ret > 0)
2457				break;
2458			if (ret < 0)
2459				goto error;
2460			leaf = path->nodes[0];
2461			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2462			btrfs_release_path(path);
2463			continue;
2464		}
2465
2466		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2467		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2468		    key.type != BTRFS_DEV_ITEM_KEY)
2469			break;
2470
2471		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2472					  struct btrfs_dev_item);
2473		devid = btrfs_device_id(leaf, dev_item);
2474		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2475				   BTRFS_UUID_SIZE);
2476		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2477				   BTRFS_FSID_SIZE);
2478		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2479					   fs_uuid, true);
 
2480		BUG_ON(!device); /* Logic error */
2481
2482		if (device->fs_devices->seeding) {
2483			btrfs_set_device_generation(leaf, dev_item,
2484						    device->generation);
2485			btrfs_mark_buffer_dirty(leaf);
2486		}
2487
2488		path->slots[0]++;
2489		goto next_slot;
2490	}
2491	ret = 0;
2492error:
2493	btrfs_free_path(path);
2494	return ret;
2495}
2496
2497int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2498{
2499	struct btrfs_root *root = fs_info->dev_root;
2500	struct request_queue *q;
2501	struct btrfs_trans_handle *trans;
2502	struct btrfs_device *device;
2503	struct block_device *bdev;
2504	struct super_block *sb = fs_info->sb;
2505	struct rcu_string *name;
2506	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 
2507	u64 orig_super_total_bytes;
2508	u64 orig_super_num_devices;
2509	int seeding_dev = 0;
2510	int ret = 0;
2511	bool unlocked = false;
 
2512
2513	if (sb_rdonly(sb) && !fs_devices->seeding)
2514		return -EROFS;
2515
2516	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2517				  fs_info->bdev_holder);
2518	if (IS_ERR(bdev))
2519		return PTR_ERR(bdev);
2520
 
 
 
 
 
2521	if (fs_devices->seeding) {
2522		seeding_dev = 1;
2523		down_write(&sb->s_umount);
2524		mutex_lock(&uuid_mutex);
 
2525	}
2526
2527	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2528
2529	mutex_lock(&fs_devices->device_list_mutex);
2530	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2531		if (device->bdev == bdev) {
2532			ret = -EEXIST;
2533			mutex_unlock(
2534				&fs_devices->device_list_mutex);
2535			goto error;
2536		}
2537	}
2538	mutex_unlock(&fs_devices->device_list_mutex);
2539
2540	device = btrfs_alloc_device(fs_info, NULL, NULL);
2541	if (IS_ERR(device)) {
2542		/* we can safely leave the fs_devices entry around */
2543		ret = PTR_ERR(device);
2544		goto error;
2545	}
2546
2547	name = rcu_string_strdup(device_path, GFP_KERNEL);
2548	if (!name) {
2549		ret = -ENOMEM;
 
 
 
 
 
2550		goto error_free_device;
2551	}
2552	rcu_assign_pointer(device->name, name);
2553
2554	trans = btrfs_start_transaction(root, 0);
2555	if (IS_ERR(trans)) {
2556		ret = PTR_ERR(trans);
2557		goto error_free_device;
2558	}
2559
2560	q = bdev_get_queue(bdev);
2561	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2562	device->generation = trans->transid;
2563	device->io_width = fs_info->sectorsize;
2564	device->io_align = fs_info->sectorsize;
2565	device->sector_size = fs_info->sectorsize;
2566	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2567					 fs_info->sectorsize);
2568	device->disk_total_bytes = device->total_bytes;
2569	device->commit_total_bytes = device->total_bytes;
2570	device->fs_info = fs_info;
2571	device->bdev = bdev;
2572	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2573	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2574	device->mode = FMODE_EXCL;
2575	device->dev_stats_valid = 1;
2576	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2577
2578	if (seeding_dev) {
2579		sb->s_flags &= ~SB_RDONLY;
2580		ret = btrfs_prepare_sprout(fs_info);
2581		if (ret) {
 
 
 
2582			btrfs_abort_transaction(trans, ret);
2583			goto error_trans;
2584		}
2585	}
2586
 
 
 
 
 
 
 
2587	device->fs_devices = fs_devices;
2588
2589	mutex_lock(&fs_devices->device_list_mutex);
2590	mutex_lock(&fs_info->chunk_mutex);
2591	list_add_rcu(&device->dev_list, &fs_devices->devices);
2592	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2593	fs_devices->num_devices++;
2594	fs_devices->open_devices++;
2595	fs_devices->rw_devices++;
2596	fs_devices->total_devices++;
2597	fs_devices->total_rw_bytes += device->total_bytes;
2598
2599	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2600
2601	if (!blk_queue_nonrot(q))
2602		fs_devices->rotating = true;
2603
2604	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2605	btrfs_set_super_total_bytes(fs_info->super_copy,
2606		round_down(orig_super_total_bytes + device->total_bytes,
2607			   fs_info->sectorsize));
2608
2609	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2610	btrfs_set_super_num_devices(fs_info->super_copy,
2611				    orig_super_num_devices + 1);
2612
2613	/* add sysfs device entry */
2614	btrfs_sysfs_add_devices_dir(fs_devices, device);
2615
2616	/*
2617	 * we've got more storage, clear any full flags on the space
2618	 * infos
2619	 */
2620	btrfs_clear_space_info_full(fs_info);
2621
2622	mutex_unlock(&fs_info->chunk_mutex);
 
 
 
 
2623	mutex_unlock(&fs_devices->device_list_mutex);
2624
2625	if (seeding_dev) {
2626		mutex_lock(&fs_info->chunk_mutex);
2627		ret = init_first_rw_device(trans);
2628		mutex_unlock(&fs_info->chunk_mutex);
2629		if (ret) {
2630			btrfs_abort_transaction(trans, ret);
2631			goto error_sysfs;
2632		}
2633	}
2634
2635	ret = btrfs_add_dev_item(trans, device);
2636	if (ret) {
2637		btrfs_abort_transaction(trans, ret);
2638		goto error_sysfs;
2639	}
2640
2641	if (seeding_dev) {
2642		ret = btrfs_finish_sprout(trans);
2643		if (ret) {
2644			btrfs_abort_transaction(trans, ret);
2645			goto error_sysfs;
2646		}
2647
2648		btrfs_sysfs_update_sprout_fsid(fs_devices,
2649				fs_info->fs_devices->fsid);
 
 
 
2650	}
2651
2652	ret = btrfs_commit_transaction(trans);
2653
2654	if (seeding_dev) {
2655		mutex_unlock(&uuid_mutex);
2656		up_write(&sb->s_umount);
2657		unlocked = true;
2658
2659		if (ret) /* transaction commit */
2660			return ret;
2661
2662		ret = btrfs_relocate_sys_chunks(fs_info);
2663		if (ret < 0)
2664			btrfs_handle_fs_error(fs_info, ret,
2665				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2666		trans = btrfs_attach_transaction(root);
2667		if (IS_ERR(trans)) {
2668			if (PTR_ERR(trans) == -ENOENT)
2669				return 0;
2670			ret = PTR_ERR(trans);
2671			trans = NULL;
2672			goto error_sysfs;
2673		}
2674		ret = btrfs_commit_transaction(trans);
2675	}
2676
2677	/*
2678	 * Now that we have written a new super block to this device, check all
2679	 * other fs_devices list if device_path alienates any other scanned
2680	 * device.
2681	 * We can ignore the return value as it typically returns -EINVAL and
2682	 * only succeeds if the device was an alien.
2683	 */
2684	btrfs_forget_devices(device_path);
2685
2686	/* Update ctime/mtime for blkid or udev */
2687	update_dev_time(device_path);
2688
2689	return ret;
2690
2691error_sysfs:
2692	btrfs_sysfs_remove_devices_dir(fs_devices, device);
2693	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2694	mutex_lock(&fs_info->chunk_mutex);
2695	list_del_rcu(&device->dev_list);
2696	list_del(&device->dev_alloc_list);
2697	fs_info->fs_devices->num_devices--;
2698	fs_info->fs_devices->open_devices--;
2699	fs_info->fs_devices->rw_devices--;
2700	fs_info->fs_devices->total_devices--;
2701	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2702	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2703	btrfs_set_super_total_bytes(fs_info->super_copy,
2704				    orig_super_total_bytes);
2705	btrfs_set_super_num_devices(fs_info->super_copy,
2706				    orig_super_num_devices);
2707	mutex_unlock(&fs_info->chunk_mutex);
2708	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2709error_trans:
2710	if (seeding_dev)
2711		sb->s_flags |= SB_RDONLY;
2712	if (trans)
2713		btrfs_end_transaction(trans);
 
 
2714error_free_device:
2715	btrfs_free_device(device);
2716error:
2717	blkdev_put(bdev, FMODE_EXCL);
2718	if (seeding_dev && !unlocked) {
2719		mutex_unlock(&uuid_mutex);
2720		up_write(&sb->s_umount);
2721	}
2722	return ret;
2723}
2724
2725static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2726					struct btrfs_device *device)
2727{
2728	int ret;
2729	struct btrfs_path *path;
2730	struct btrfs_root *root = device->fs_info->chunk_root;
2731	struct btrfs_dev_item *dev_item;
2732	struct extent_buffer *leaf;
2733	struct btrfs_key key;
2734
2735	path = btrfs_alloc_path();
2736	if (!path)
2737		return -ENOMEM;
2738
2739	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2740	key.type = BTRFS_DEV_ITEM_KEY;
2741	key.offset = device->devid;
2742
2743	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2744	if (ret < 0)
2745		goto out;
2746
2747	if (ret > 0) {
2748		ret = -ENOENT;
2749		goto out;
2750	}
2751
2752	leaf = path->nodes[0];
2753	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2754
2755	btrfs_set_device_id(leaf, dev_item, device->devid);
2756	btrfs_set_device_type(leaf, dev_item, device->type);
2757	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2758	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2759	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2760	btrfs_set_device_total_bytes(leaf, dev_item,
2761				     btrfs_device_get_disk_total_bytes(device));
2762	btrfs_set_device_bytes_used(leaf, dev_item,
2763				    btrfs_device_get_bytes_used(device));
2764	btrfs_mark_buffer_dirty(leaf);
2765
2766out:
2767	btrfs_free_path(path);
2768	return ret;
2769}
2770
2771int btrfs_grow_device(struct btrfs_trans_handle *trans,
2772		      struct btrfs_device *device, u64 new_size)
2773{
2774	struct btrfs_fs_info *fs_info = device->fs_info;
2775	struct btrfs_super_block *super_copy = fs_info->super_copy;
2776	u64 old_total;
2777	u64 diff;
 
2778
2779	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2780		return -EACCES;
2781
2782	new_size = round_down(new_size, fs_info->sectorsize);
2783
2784	mutex_lock(&fs_info->chunk_mutex);
2785	old_total = btrfs_super_total_bytes(super_copy);
2786	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2787
2788	if (new_size <= device->total_bytes ||
2789	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2790		mutex_unlock(&fs_info->chunk_mutex);
2791		return -EINVAL;
2792	}
2793
2794	btrfs_set_super_total_bytes(super_copy,
2795			round_down(old_total + diff, fs_info->sectorsize));
2796	device->fs_devices->total_rw_bytes += diff;
2797
2798	btrfs_device_set_total_bytes(device, new_size);
2799	btrfs_device_set_disk_total_bytes(device, new_size);
2800	btrfs_clear_space_info_full(device->fs_info);
2801	if (list_empty(&device->post_commit_list))
2802		list_add_tail(&device->post_commit_list,
2803			      &trans->transaction->dev_update_list);
2804	mutex_unlock(&fs_info->chunk_mutex);
2805
2806	return btrfs_update_device(trans, device);
 
 
 
 
2807}
2808
2809static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2810{
2811	struct btrfs_fs_info *fs_info = trans->fs_info;
2812	struct btrfs_root *root = fs_info->chunk_root;
2813	int ret;
2814	struct btrfs_path *path;
2815	struct btrfs_key key;
2816
2817	path = btrfs_alloc_path();
2818	if (!path)
2819		return -ENOMEM;
2820
2821	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2822	key.offset = chunk_offset;
2823	key.type = BTRFS_CHUNK_ITEM_KEY;
2824
2825	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2826	if (ret < 0)
2827		goto out;
2828	else if (ret > 0) { /* Logic error or corruption */
2829		btrfs_handle_fs_error(fs_info, -ENOENT,
2830				      "Failed lookup while freeing chunk.");
2831		ret = -ENOENT;
2832		goto out;
2833	}
2834
2835	ret = btrfs_del_item(trans, root, path);
2836	if (ret < 0)
2837		btrfs_handle_fs_error(fs_info, ret,
2838				      "Failed to delete chunk item.");
2839out:
2840	btrfs_free_path(path);
2841	return ret;
2842}
2843
2844static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2845{
2846	struct btrfs_super_block *super_copy = fs_info->super_copy;
2847	struct btrfs_disk_key *disk_key;
2848	struct btrfs_chunk *chunk;
2849	u8 *ptr;
2850	int ret = 0;
2851	u32 num_stripes;
2852	u32 array_size;
2853	u32 len = 0;
2854	u32 cur;
2855	struct btrfs_key key;
2856
2857	mutex_lock(&fs_info->chunk_mutex);
2858	array_size = btrfs_super_sys_array_size(super_copy);
2859
2860	ptr = super_copy->sys_chunk_array;
2861	cur = 0;
2862
2863	while (cur < array_size) {
2864		disk_key = (struct btrfs_disk_key *)ptr;
2865		btrfs_disk_key_to_cpu(&key, disk_key);
2866
2867		len = sizeof(*disk_key);
2868
2869		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2870			chunk = (struct btrfs_chunk *)(ptr + len);
2871			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2872			len += btrfs_chunk_item_size(num_stripes);
2873		} else {
2874			ret = -EIO;
2875			break;
2876		}
2877		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2878		    key.offset == chunk_offset) {
2879			memmove(ptr, ptr + len, array_size - (cur + len));
2880			array_size -= len;
2881			btrfs_set_super_sys_array_size(super_copy, array_size);
2882		} else {
2883			ptr += len;
2884			cur += len;
2885		}
2886	}
2887	mutex_unlock(&fs_info->chunk_mutex);
2888	return ret;
2889}
2890
2891/*
2892 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2893 * @logical: Logical block offset in bytes.
2894 * @length: Length of extent in bytes.
2895 *
2896 * Return: Chunk mapping or ERR_PTR.
2897 */
2898struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2899				       u64 logical, u64 length)
2900{
2901	struct extent_map_tree *em_tree;
2902	struct extent_map *em;
2903
2904	em_tree = &fs_info->mapping_tree;
2905	read_lock(&em_tree->lock);
2906	em = lookup_extent_mapping(em_tree, logical, length);
2907	read_unlock(&em_tree->lock);
2908
2909	if (!em) {
2910		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2911			   logical, length);
2912		return ERR_PTR(-EINVAL);
2913	}
2914
2915	if (em->start > logical || em->start + em->len < logical) {
2916		btrfs_crit(fs_info,
2917			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2918			   logical, length, em->start, em->start + em->len);
2919		free_extent_map(em);
2920		return ERR_PTR(-EINVAL);
2921	}
2922
2923	/* callers are responsible for dropping em's ref. */
2924	return em;
2925}
2926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2927int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2928{
2929	struct btrfs_fs_info *fs_info = trans->fs_info;
2930	struct extent_map *em;
2931	struct map_lookup *map;
2932	u64 dev_extent_len = 0;
2933	int i, ret = 0;
2934	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2935
2936	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2937	if (IS_ERR(em)) {
2938		/*
2939		 * This is a logic error, but we don't want to just rely on the
2940		 * user having built with ASSERT enabled, so if ASSERT doesn't
2941		 * do anything we still error out.
2942		 */
2943		ASSERT(0);
2944		return PTR_ERR(em);
2945	}
2946	map = em->map_lookup;
2947	mutex_lock(&fs_info->chunk_mutex);
2948	check_system_chunk(trans, map->type);
2949	mutex_unlock(&fs_info->chunk_mutex);
2950
2951	/*
2952	 * Take the device list mutex to prevent races with the final phase of
2953	 * a device replace operation that replaces the device object associated
2954	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
 
 
 
 
 
2955	 */
2956	mutex_lock(&fs_devices->device_list_mutex);
2957	for (i = 0; i < map->num_stripes; i++) {
2958		struct btrfs_device *device = map->stripes[i].dev;
2959		ret = btrfs_free_dev_extent(trans, device,
2960					    map->stripes[i].physical,
2961					    &dev_extent_len);
2962		if (ret) {
2963			mutex_unlock(&fs_devices->device_list_mutex);
2964			btrfs_abort_transaction(trans, ret);
2965			goto out;
2966		}
2967
2968		if (device->bytes_used > 0) {
2969			mutex_lock(&fs_info->chunk_mutex);
2970			btrfs_device_set_bytes_used(device,
2971					device->bytes_used - dev_extent_len);
2972			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2973			btrfs_clear_space_info_full(fs_info);
2974			mutex_unlock(&fs_info->chunk_mutex);
2975		}
 
 
2976
2977		ret = btrfs_update_device(trans, device);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2978		if (ret) {
2979			mutex_unlock(&fs_devices->device_list_mutex);
2980			btrfs_abort_transaction(trans, ret);
2981			goto out;
2982		}
2983	}
2984	mutex_unlock(&fs_devices->device_list_mutex);
2985
2986	ret = btrfs_free_chunk(trans, chunk_offset);
2987	if (ret) {
 
 
 
 
2988		btrfs_abort_transaction(trans, ret);
2989		goto out;
2990	}
2991
2992	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2993
2994	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2995		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2996		if (ret) {
2997			btrfs_abort_transaction(trans, ret);
2998			goto out;
2999		}
3000	}
3001
 
 
 
 
 
 
 
 
 
3002	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3003	if (ret) {
3004		btrfs_abort_transaction(trans, ret);
3005		goto out;
3006	}
3007
3008out:
 
 
 
 
3009	/* once for us */
3010	free_extent_map(em);
3011	return ret;
3012}
3013
3014static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3015{
3016	struct btrfs_root *root = fs_info->chunk_root;
3017	struct btrfs_trans_handle *trans;
3018	struct btrfs_block_group *block_group;
 
3019	int ret;
3020
 
 
 
 
 
 
3021	/*
3022	 * Prevent races with automatic removal of unused block groups.
3023	 * After we relocate and before we remove the chunk with offset
3024	 * chunk_offset, automatic removal of the block group can kick in,
3025	 * resulting in a failure when calling btrfs_remove_chunk() below.
3026	 *
3027	 * Make sure to acquire this mutex before doing a tree search (dev
3028	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3029	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3030	 * we release the path used to search the chunk/dev tree and before
3031	 * the current task acquires this mutex and calls us.
3032	 */
3033	lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3034
3035	/* step one, relocate all the extents inside this chunk */
3036	btrfs_scrub_pause(fs_info);
3037	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3038	btrfs_scrub_continue(fs_info);
3039	if (ret)
3040		return ret;
3041
3042	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3043	if (!block_group)
3044		return -ENOENT;
3045	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
 
3046	btrfs_put_block_group(block_group);
3047
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3048	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3049						     chunk_offset);
3050	if (IS_ERR(trans)) {
3051		ret = PTR_ERR(trans);
3052		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3053		return ret;
3054	}
3055
3056	/*
3057	 * step two, delete the device extents and the
3058	 * chunk tree entries
3059	 */
3060	ret = btrfs_remove_chunk(trans, chunk_offset);
3061	btrfs_end_transaction(trans);
3062	return ret;
3063}
3064
3065static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3066{
3067	struct btrfs_root *chunk_root = fs_info->chunk_root;
3068	struct btrfs_path *path;
3069	struct extent_buffer *leaf;
3070	struct btrfs_chunk *chunk;
3071	struct btrfs_key key;
3072	struct btrfs_key found_key;
3073	u64 chunk_type;
3074	bool retried = false;
3075	int failed = 0;
3076	int ret;
3077
3078	path = btrfs_alloc_path();
3079	if (!path)
3080		return -ENOMEM;
3081
3082again:
3083	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3084	key.offset = (u64)-1;
3085	key.type = BTRFS_CHUNK_ITEM_KEY;
3086
3087	while (1) {
3088		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3089		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3090		if (ret < 0) {
3091			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3092			goto error;
3093		}
3094		BUG_ON(ret == 0); /* Corruption */
3095
3096		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3097					  key.type);
3098		if (ret)
3099			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3100		if (ret < 0)
3101			goto error;
3102		if (ret > 0)
3103			break;
3104
3105		leaf = path->nodes[0];
3106		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3107
3108		chunk = btrfs_item_ptr(leaf, path->slots[0],
3109				       struct btrfs_chunk);
3110		chunk_type = btrfs_chunk_type(leaf, chunk);
3111		btrfs_release_path(path);
3112
3113		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3114			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3115			if (ret == -ENOSPC)
3116				failed++;
3117			else
3118				BUG_ON(ret);
3119		}
3120		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3121
3122		if (found_key.offset == 0)
3123			break;
3124		key.offset = found_key.offset - 1;
3125	}
3126	ret = 0;
3127	if (failed && !retried) {
3128		failed = 0;
3129		retried = true;
3130		goto again;
3131	} else if (WARN_ON(failed && retried)) {
3132		ret = -ENOSPC;
3133	}
3134error:
3135	btrfs_free_path(path);
3136	return ret;
3137}
3138
3139/*
3140 * return 1 : allocate a data chunk successfully,
3141 * return <0: errors during allocating a data chunk,
3142 * return 0 : no need to allocate a data chunk.
3143 */
3144static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3145				      u64 chunk_offset)
3146{
3147	struct btrfs_block_group *cache;
3148	u64 bytes_used;
3149	u64 chunk_type;
3150
3151	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3152	ASSERT(cache);
3153	chunk_type = cache->flags;
3154	btrfs_put_block_group(cache);
3155
3156	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3157		return 0;
3158
3159	spin_lock(&fs_info->data_sinfo->lock);
3160	bytes_used = fs_info->data_sinfo->bytes_used;
3161	spin_unlock(&fs_info->data_sinfo->lock);
3162
3163	if (!bytes_used) {
3164		struct btrfs_trans_handle *trans;
3165		int ret;
3166
3167		trans =	btrfs_join_transaction(fs_info->tree_root);
3168		if (IS_ERR(trans))
3169			return PTR_ERR(trans);
3170
3171		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3172		btrfs_end_transaction(trans);
3173		if (ret < 0)
3174			return ret;
3175		return 1;
3176	}
3177
3178	return 0;
3179}
3180
3181static int insert_balance_item(struct btrfs_fs_info *fs_info,
3182			       struct btrfs_balance_control *bctl)
3183{
3184	struct btrfs_root *root = fs_info->tree_root;
3185	struct btrfs_trans_handle *trans;
3186	struct btrfs_balance_item *item;
3187	struct btrfs_disk_balance_args disk_bargs;
3188	struct btrfs_path *path;
3189	struct extent_buffer *leaf;
3190	struct btrfs_key key;
3191	int ret, err;
3192
3193	path = btrfs_alloc_path();
3194	if (!path)
3195		return -ENOMEM;
3196
3197	trans = btrfs_start_transaction(root, 0);
3198	if (IS_ERR(trans)) {
3199		btrfs_free_path(path);
3200		return PTR_ERR(trans);
3201	}
3202
3203	key.objectid = BTRFS_BALANCE_OBJECTID;
3204	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3205	key.offset = 0;
3206
3207	ret = btrfs_insert_empty_item(trans, root, path, &key,
3208				      sizeof(*item));
3209	if (ret)
3210		goto out;
3211
3212	leaf = path->nodes[0];
3213	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3214
3215	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3216
3217	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3218	btrfs_set_balance_data(leaf, item, &disk_bargs);
3219	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3220	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3221	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3222	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3223
3224	btrfs_set_balance_flags(leaf, item, bctl->flags);
3225
3226	btrfs_mark_buffer_dirty(leaf);
3227out:
3228	btrfs_free_path(path);
3229	err = btrfs_commit_transaction(trans);
3230	if (err && !ret)
3231		ret = err;
3232	return ret;
3233}
3234
3235static int del_balance_item(struct btrfs_fs_info *fs_info)
3236{
3237	struct btrfs_root *root = fs_info->tree_root;
3238	struct btrfs_trans_handle *trans;
3239	struct btrfs_path *path;
3240	struct btrfs_key key;
3241	int ret, err;
3242
3243	path = btrfs_alloc_path();
3244	if (!path)
3245		return -ENOMEM;
3246
3247	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3248	if (IS_ERR(trans)) {
3249		btrfs_free_path(path);
3250		return PTR_ERR(trans);
3251	}
3252
3253	key.objectid = BTRFS_BALANCE_OBJECTID;
3254	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3255	key.offset = 0;
3256
3257	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3258	if (ret < 0)
3259		goto out;
3260	if (ret > 0) {
3261		ret = -ENOENT;
3262		goto out;
3263	}
3264
3265	ret = btrfs_del_item(trans, root, path);
3266out:
3267	btrfs_free_path(path);
3268	err = btrfs_commit_transaction(trans);
3269	if (err && !ret)
3270		ret = err;
3271	return ret;
3272}
3273
3274/*
3275 * This is a heuristic used to reduce the number of chunks balanced on
3276 * resume after balance was interrupted.
3277 */
3278static void update_balance_args(struct btrfs_balance_control *bctl)
3279{
3280	/*
3281	 * Turn on soft mode for chunk types that were being converted.
3282	 */
3283	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3284		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3285	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3286		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3287	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3288		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3289
3290	/*
3291	 * Turn on usage filter if is not already used.  The idea is
3292	 * that chunks that we have already balanced should be
3293	 * reasonably full.  Don't do it for chunks that are being
3294	 * converted - that will keep us from relocating unconverted
3295	 * (albeit full) chunks.
3296	 */
3297	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3298	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3299	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3300		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3301		bctl->data.usage = 90;
3302	}
3303	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3304	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3305	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3306		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3307		bctl->sys.usage = 90;
3308	}
3309	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3310	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3311	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3312		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3313		bctl->meta.usage = 90;
3314	}
3315}
3316
3317/*
3318 * Clear the balance status in fs_info and delete the balance item from disk.
3319 */
3320static void reset_balance_state(struct btrfs_fs_info *fs_info)
3321{
3322	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3323	int ret;
3324
3325	BUG_ON(!fs_info->balance_ctl);
3326
3327	spin_lock(&fs_info->balance_lock);
3328	fs_info->balance_ctl = NULL;
3329	spin_unlock(&fs_info->balance_lock);
3330
3331	kfree(bctl);
3332	ret = del_balance_item(fs_info);
3333	if (ret)
3334		btrfs_handle_fs_error(fs_info, ret, NULL);
3335}
3336
3337/*
3338 * Balance filters.  Return 1 if chunk should be filtered out
3339 * (should not be balanced).
3340 */
3341static int chunk_profiles_filter(u64 chunk_type,
3342				 struct btrfs_balance_args *bargs)
3343{
3344	chunk_type = chunk_to_extended(chunk_type) &
3345				BTRFS_EXTENDED_PROFILE_MASK;
3346
3347	if (bargs->profiles & chunk_type)
3348		return 0;
3349
3350	return 1;
3351}
3352
3353static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3354			      struct btrfs_balance_args *bargs)
3355{
3356	struct btrfs_block_group *cache;
3357	u64 chunk_used;
3358	u64 user_thresh_min;
3359	u64 user_thresh_max;
3360	int ret = 1;
3361
3362	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3363	chunk_used = cache->used;
3364
3365	if (bargs->usage_min == 0)
3366		user_thresh_min = 0;
3367	else
3368		user_thresh_min = div_factor_fine(cache->length,
3369						  bargs->usage_min);
3370
3371	if (bargs->usage_max == 0)
3372		user_thresh_max = 1;
3373	else if (bargs->usage_max > 100)
3374		user_thresh_max = cache->length;
3375	else
3376		user_thresh_max = div_factor_fine(cache->length,
3377						  bargs->usage_max);
3378
3379	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3380		ret = 0;
3381
3382	btrfs_put_block_group(cache);
3383	return ret;
3384}
3385
3386static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3387		u64 chunk_offset, struct btrfs_balance_args *bargs)
3388{
3389	struct btrfs_block_group *cache;
3390	u64 chunk_used, user_thresh;
3391	int ret = 1;
3392
3393	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3394	chunk_used = cache->used;
3395
3396	if (bargs->usage_min == 0)
3397		user_thresh = 1;
3398	else if (bargs->usage > 100)
3399		user_thresh = cache->length;
3400	else
3401		user_thresh = div_factor_fine(cache->length, bargs->usage);
3402
3403	if (chunk_used < user_thresh)
3404		ret = 0;
3405
3406	btrfs_put_block_group(cache);
3407	return ret;
3408}
3409
3410static int chunk_devid_filter(struct extent_buffer *leaf,
3411			      struct btrfs_chunk *chunk,
3412			      struct btrfs_balance_args *bargs)
3413{
3414	struct btrfs_stripe *stripe;
3415	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3416	int i;
3417
3418	for (i = 0; i < num_stripes; i++) {
3419		stripe = btrfs_stripe_nr(chunk, i);
3420		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3421			return 0;
3422	}
3423
3424	return 1;
3425}
3426
3427static u64 calc_data_stripes(u64 type, int num_stripes)
3428{
3429	const int index = btrfs_bg_flags_to_raid_index(type);
3430	const int ncopies = btrfs_raid_array[index].ncopies;
3431	const int nparity = btrfs_raid_array[index].nparity;
3432
3433	if (nparity)
3434		return num_stripes - nparity;
3435	else
3436		return num_stripes / ncopies;
3437}
3438
3439/* [pstart, pend) */
3440static int chunk_drange_filter(struct extent_buffer *leaf,
3441			       struct btrfs_chunk *chunk,
3442			       struct btrfs_balance_args *bargs)
3443{
3444	struct btrfs_stripe *stripe;
3445	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3446	u64 stripe_offset;
3447	u64 stripe_length;
3448	u64 type;
3449	int factor;
3450	int i;
3451
3452	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3453		return 0;
3454
3455	type = btrfs_chunk_type(leaf, chunk);
3456	factor = calc_data_stripes(type, num_stripes);
3457
3458	for (i = 0; i < num_stripes; i++) {
3459		stripe = btrfs_stripe_nr(chunk, i);
3460		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3461			continue;
3462
3463		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3464		stripe_length = btrfs_chunk_length(leaf, chunk);
3465		stripe_length = div_u64(stripe_length, factor);
3466
3467		if (stripe_offset < bargs->pend &&
3468		    stripe_offset + stripe_length > bargs->pstart)
3469			return 0;
3470	}
3471
3472	return 1;
3473}
3474
3475/* [vstart, vend) */
3476static int chunk_vrange_filter(struct extent_buffer *leaf,
3477			       struct btrfs_chunk *chunk,
3478			       u64 chunk_offset,
3479			       struct btrfs_balance_args *bargs)
3480{
3481	if (chunk_offset < bargs->vend &&
3482	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3483		/* at least part of the chunk is inside this vrange */
3484		return 0;
3485
3486	return 1;
3487}
3488
3489static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3490			       struct btrfs_chunk *chunk,
3491			       struct btrfs_balance_args *bargs)
3492{
3493	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3494
3495	if (bargs->stripes_min <= num_stripes
3496			&& num_stripes <= bargs->stripes_max)
3497		return 0;
3498
3499	return 1;
3500}
3501
3502static int chunk_soft_convert_filter(u64 chunk_type,
3503				     struct btrfs_balance_args *bargs)
3504{
3505	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3506		return 0;
3507
3508	chunk_type = chunk_to_extended(chunk_type) &
3509				BTRFS_EXTENDED_PROFILE_MASK;
3510
3511	if (bargs->target == chunk_type)
3512		return 1;
3513
3514	return 0;
3515}
3516
3517static int should_balance_chunk(struct extent_buffer *leaf,
3518				struct btrfs_chunk *chunk, u64 chunk_offset)
3519{
3520	struct btrfs_fs_info *fs_info = leaf->fs_info;
3521	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3522	struct btrfs_balance_args *bargs = NULL;
3523	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3524
3525	/* type filter */
3526	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3527	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3528		return 0;
3529	}
3530
3531	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3532		bargs = &bctl->data;
3533	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3534		bargs = &bctl->sys;
3535	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3536		bargs = &bctl->meta;
3537
3538	/* profiles filter */
3539	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3540	    chunk_profiles_filter(chunk_type, bargs)) {
3541		return 0;
3542	}
3543
3544	/* usage filter */
3545	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3546	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3547		return 0;
3548	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3549	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3550		return 0;
3551	}
3552
3553	/* devid filter */
3554	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3555	    chunk_devid_filter(leaf, chunk, bargs)) {
3556		return 0;
3557	}
3558
3559	/* drange filter, makes sense only with devid filter */
3560	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3561	    chunk_drange_filter(leaf, chunk, bargs)) {
3562		return 0;
3563	}
3564
3565	/* vrange filter */
3566	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3567	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3568		return 0;
3569	}
3570
3571	/* stripes filter */
3572	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3573	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3574		return 0;
3575	}
3576
3577	/* soft profile changing mode */
3578	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3579	    chunk_soft_convert_filter(chunk_type, bargs)) {
3580		return 0;
3581	}
3582
3583	/*
3584	 * limited by count, must be the last filter
3585	 */
3586	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3587		if (bargs->limit == 0)
3588			return 0;
3589		else
3590			bargs->limit--;
3591	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3592		/*
3593		 * Same logic as the 'limit' filter; the minimum cannot be
3594		 * determined here because we do not have the global information
3595		 * about the count of all chunks that satisfy the filters.
3596		 */
3597		if (bargs->limit_max == 0)
3598			return 0;
3599		else
3600			bargs->limit_max--;
3601	}
3602
3603	return 1;
3604}
3605
3606static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3607{
3608	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3609	struct btrfs_root *chunk_root = fs_info->chunk_root;
3610	u64 chunk_type;
3611	struct btrfs_chunk *chunk;
3612	struct btrfs_path *path = NULL;
3613	struct btrfs_key key;
3614	struct btrfs_key found_key;
3615	struct extent_buffer *leaf;
3616	int slot;
3617	int ret;
3618	int enospc_errors = 0;
3619	bool counting = true;
3620	/* The single value limit and min/max limits use the same bytes in the */
3621	u64 limit_data = bctl->data.limit;
3622	u64 limit_meta = bctl->meta.limit;
3623	u64 limit_sys = bctl->sys.limit;
3624	u32 count_data = 0;
3625	u32 count_meta = 0;
3626	u32 count_sys = 0;
3627	int chunk_reserved = 0;
3628
3629	path = btrfs_alloc_path();
3630	if (!path) {
3631		ret = -ENOMEM;
3632		goto error;
3633	}
3634
3635	/* zero out stat counters */
3636	spin_lock(&fs_info->balance_lock);
3637	memset(&bctl->stat, 0, sizeof(bctl->stat));
3638	spin_unlock(&fs_info->balance_lock);
3639again:
3640	if (!counting) {
3641		/*
3642		 * The single value limit and min/max limits use the same bytes
3643		 * in the
3644		 */
3645		bctl->data.limit = limit_data;
3646		bctl->meta.limit = limit_meta;
3647		bctl->sys.limit = limit_sys;
3648	}
3649	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3650	key.offset = (u64)-1;
3651	key.type = BTRFS_CHUNK_ITEM_KEY;
3652
3653	while (1) {
3654		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3655		    atomic_read(&fs_info->balance_cancel_req)) {
3656			ret = -ECANCELED;
3657			goto error;
3658		}
3659
3660		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3661		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3662		if (ret < 0) {
3663			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3664			goto error;
3665		}
3666
3667		/*
3668		 * this shouldn't happen, it means the last relocate
3669		 * failed
3670		 */
3671		if (ret == 0)
3672			BUG(); /* FIXME break ? */
3673
3674		ret = btrfs_previous_item(chunk_root, path, 0,
3675					  BTRFS_CHUNK_ITEM_KEY);
3676		if (ret) {
3677			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3678			ret = 0;
3679			break;
3680		}
3681
3682		leaf = path->nodes[0];
3683		slot = path->slots[0];
3684		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3685
3686		if (found_key.objectid != key.objectid) {
3687			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3688			break;
3689		}
3690
3691		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3692		chunk_type = btrfs_chunk_type(leaf, chunk);
3693
3694		if (!counting) {
3695			spin_lock(&fs_info->balance_lock);
3696			bctl->stat.considered++;
3697			spin_unlock(&fs_info->balance_lock);
3698		}
3699
3700		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3701
3702		btrfs_release_path(path);
3703		if (!ret) {
3704			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3705			goto loop;
3706		}
3707
3708		if (counting) {
3709			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3710			spin_lock(&fs_info->balance_lock);
3711			bctl->stat.expected++;
3712			spin_unlock(&fs_info->balance_lock);
3713
3714			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3715				count_data++;
3716			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3717				count_sys++;
3718			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3719				count_meta++;
3720
3721			goto loop;
3722		}
3723
3724		/*
3725		 * Apply limit_min filter, no need to check if the LIMITS
3726		 * filter is used, limit_min is 0 by default
3727		 */
3728		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3729					count_data < bctl->data.limit_min)
3730				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3731					count_meta < bctl->meta.limit_min)
3732				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3733					count_sys < bctl->sys.limit_min)) {
3734			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3735			goto loop;
3736		}
3737
3738		if (!chunk_reserved) {
3739			/*
3740			 * We may be relocating the only data chunk we have,
3741			 * which could potentially end up with losing data's
3742			 * raid profile, so lets allocate an empty one in
3743			 * advance.
3744			 */
3745			ret = btrfs_may_alloc_data_chunk(fs_info,
3746							 found_key.offset);
3747			if (ret < 0) {
3748				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3749				goto error;
3750			} else if (ret == 1) {
3751				chunk_reserved = 1;
3752			}
3753		}
3754
3755		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3756		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3757		if (ret == -ENOSPC) {
3758			enospc_errors++;
3759		} else if (ret == -ETXTBSY) {
3760			btrfs_info(fs_info,
3761	   "skipping relocation of block group %llu due to active swapfile",
3762				   found_key.offset);
3763			ret = 0;
3764		} else if (ret) {
3765			goto error;
3766		} else {
3767			spin_lock(&fs_info->balance_lock);
3768			bctl->stat.completed++;
3769			spin_unlock(&fs_info->balance_lock);
3770		}
3771loop:
3772		if (found_key.offset == 0)
3773			break;
3774		key.offset = found_key.offset - 1;
3775	}
3776
3777	if (counting) {
3778		btrfs_release_path(path);
3779		counting = false;
3780		goto again;
3781	}
3782error:
3783	btrfs_free_path(path);
3784	if (enospc_errors) {
3785		btrfs_info(fs_info, "%d enospc errors during balance",
3786			   enospc_errors);
3787		if (!ret)
3788			ret = -ENOSPC;
3789	}
3790
3791	return ret;
3792}
3793
3794/**
3795 * alloc_profile_is_valid - see if a given profile is valid and reduced
3796 * @flags: profile to validate
3797 * @extended: if true @flags is treated as an extended profile
 
3798 */
3799static int alloc_profile_is_valid(u64 flags, int extended)
3800{
3801	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3802			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3803
3804	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3805
3806	/* 1) check that all other bits are zeroed */
3807	if (flags & ~mask)
3808		return 0;
3809
3810	/* 2) see if profile is reduced */
3811	if (flags == 0)
3812		return !extended; /* "0" is valid for usual profiles */
3813
3814	return has_single_bit_set(flags);
3815}
3816
3817static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3818{
3819	/* cancel requested || normal exit path */
3820	return atomic_read(&fs_info->balance_cancel_req) ||
3821		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3822		 atomic_read(&fs_info->balance_cancel_req) == 0);
3823}
3824
3825/*
3826 * Validate target profile against allowed profiles and return true if it's OK.
3827 * Otherwise print the error message and return false.
3828 */
3829static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3830		const struct btrfs_balance_args *bargs,
3831		u64 allowed, const char *type)
3832{
3833	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3834		return true;
3835
3836	/* Profile is valid and does not have bits outside of the allowed set */
3837	if (alloc_profile_is_valid(bargs->target, 1) &&
3838	    (bargs->target & ~allowed) == 0)
3839		return true;
3840
3841	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3842			type, btrfs_bg_type_to_raid_name(bargs->target));
3843	return false;
3844}
3845
3846/*
3847 * Fill @buf with textual description of balance filter flags @bargs, up to
3848 * @size_buf including the terminating null. The output may be trimmed if it
3849 * does not fit into the provided buffer.
3850 */
3851static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3852				 u32 size_buf)
3853{
3854	int ret;
3855	u32 size_bp = size_buf;
3856	char *bp = buf;
3857	u64 flags = bargs->flags;
3858	char tmp_buf[128] = {'\0'};
3859
3860	if (!flags)
3861		return;
3862
3863#define CHECK_APPEND_NOARG(a)						\
3864	do {								\
3865		ret = snprintf(bp, size_bp, (a));			\
3866		if (ret < 0 || ret >= size_bp)				\
3867			goto out_overflow;				\
3868		size_bp -= ret;						\
3869		bp += ret;						\
3870	} while (0)
3871
3872#define CHECK_APPEND_1ARG(a, v1)					\
3873	do {								\
3874		ret = snprintf(bp, size_bp, (a), (v1));			\
3875		if (ret < 0 || ret >= size_bp)				\
3876			goto out_overflow;				\
3877		size_bp -= ret;						\
3878		bp += ret;						\
3879	} while (0)
3880
3881#define CHECK_APPEND_2ARG(a, v1, v2)					\
3882	do {								\
3883		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
3884		if (ret < 0 || ret >= size_bp)				\
3885			goto out_overflow;				\
3886		size_bp -= ret;						\
3887		bp += ret;						\
3888	} while (0)
3889
3890	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3891		CHECK_APPEND_1ARG("convert=%s,",
3892				  btrfs_bg_type_to_raid_name(bargs->target));
3893
3894	if (flags & BTRFS_BALANCE_ARGS_SOFT)
3895		CHECK_APPEND_NOARG("soft,");
3896
3897	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3898		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3899					    sizeof(tmp_buf));
3900		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3901	}
3902
3903	if (flags & BTRFS_BALANCE_ARGS_USAGE)
3904		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3905
3906	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3907		CHECK_APPEND_2ARG("usage=%u..%u,",
3908				  bargs->usage_min, bargs->usage_max);
3909
3910	if (flags & BTRFS_BALANCE_ARGS_DEVID)
3911		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3912
3913	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3914		CHECK_APPEND_2ARG("drange=%llu..%llu,",
3915				  bargs->pstart, bargs->pend);
3916
3917	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3918		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3919				  bargs->vstart, bargs->vend);
3920
3921	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3922		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3923
3924	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3925		CHECK_APPEND_2ARG("limit=%u..%u,",
3926				bargs->limit_min, bargs->limit_max);
3927
3928	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3929		CHECK_APPEND_2ARG("stripes=%u..%u,",
3930				  bargs->stripes_min, bargs->stripes_max);
3931
3932#undef CHECK_APPEND_2ARG
3933#undef CHECK_APPEND_1ARG
3934#undef CHECK_APPEND_NOARG
3935
3936out_overflow:
3937
3938	if (size_bp < size_buf)
3939		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3940	else
3941		buf[0] = '\0';
3942}
3943
3944static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3945{
3946	u32 size_buf = 1024;
3947	char tmp_buf[192] = {'\0'};
3948	char *buf;
3949	char *bp;
3950	u32 size_bp = size_buf;
3951	int ret;
3952	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3953
3954	buf = kzalloc(size_buf, GFP_KERNEL);
3955	if (!buf)
3956		return;
3957
3958	bp = buf;
3959
3960#define CHECK_APPEND_1ARG(a, v1)					\
3961	do {								\
3962		ret = snprintf(bp, size_bp, (a), (v1));			\
3963		if (ret < 0 || ret >= size_bp)				\
3964			goto out_overflow;				\
3965		size_bp -= ret;						\
3966		bp += ret;						\
3967	} while (0)
3968
3969	if (bctl->flags & BTRFS_BALANCE_FORCE)
3970		CHECK_APPEND_1ARG("%s", "-f ");
3971
3972	if (bctl->flags & BTRFS_BALANCE_DATA) {
3973		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
3974		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
3975	}
3976
3977	if (bctl->flags & BTRFS_BALANCE_METADATA) {
3978		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
3979		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
3980	}
3981
3982	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
3983		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
3984		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
3985	}
3986
3987#undef CHECK_APPEND_1ARG
3988
3989out_overflow:
3990
3991	if (size_bp < size_buf)
3992		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
3993	btrfs_info(fs_info, "balance: %s %s",
3994		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
3995		   "resume" : "start", buf);
3996
3997	kfree(buf);
3998}
3999
4000/*
4001 * Should be called with balance mutexe held
4002 */
4003int btrfs_balance(struct btrfs_fs_info *fs_info,
4004		  struct btrfs_balance_control *bctl,
4005		  struct btrfs_ioctl_balance_args *bargs)
4006{
4007	u64 meta_target, data_target;
4008	u64 allowed;
4009	int mixed = 0;
4010	int ret;
4011	u64 num_devices;
4012	unsigned seq;
4013	bool reducing_redundancy;
4014	int i;
4015
4016	if (btrfs_fs_closing(fs_info) ||
4017	    atomic_read(&fs_info->balance_pause_req) ||
4018	    btrfs_should_cancel_balance(fs_info)) {
4019		ret = -EINVAL;
4020		goto out;
4021	}
4022
4023	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4024	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4025		mixed = 1;
4026
4027	/*
4028	 * In case of mixed groups both data and meta should be picked,
4029	 * and identical options should be given for both of them.
4030	 */
4031	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4032	if (mixed && (bctl->flags & allowed)) {
4033		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4034		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4035		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4036			btrfs_err(fs_info,
4037	  "balance: mixed groups data and metadata options must be the same");
4038			ret = -EINVAL;
4039			goto out;
4040		}
4041	}
4042
4043	/*
4044	 * rw_devices will not change at the moment, device add/delete/replace
4045	 * are excluded by EXCL_OP
4046	 */
4047	num_devices = fs_info->fs_devices->rw_devices;
4048
4049	/*
4050	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4051	 * special bit for it, to make it easier to distinguish.  Thus we need
4052	 * to set it manually, or balance would refuse the profile.
4053	 */
4054	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4055	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4056		if (num_devices >= btrfs_raid_array[i].devs_min)
4057			allowed |= btrfs_raid_array[i].bg_flag;
4058
4059	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4060	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4061	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4062		ret = -EINVAL;
4063		goto out;
4064	}
4065
4066	/*
4067	 * Allow to reduce metadata or system integrity only if force set for
4068	 * profiles with redundancy (copies, parity)
4069	 */
4070	allowed = 0;
4071	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4072		if (btrfs_raid_array[i].ncopies >= 2 ||
4073		    btrfs_raid_array[i].tolerated_failures >= 1)
4074			allowed |= btrfs_raid_array[i].bg_flag;
4075	}
4076	do {
4077		seq = read_seqbegin(&fs_info->profiles_lock);
4078
4079		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4080		     (fs_info->avail_system_alloc_bits & allowed) &&
4081		     !(bctl->sys.target & allowed)) ||
4082		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4083		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4084		     !(bctl->meta.target & allowed)))
4085			reducing_redundancy = true;
4086		else
4087			reducing_redundancy = false;
4088
4089		/* if we're not converting, the target field is uninitialized */
4090		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4091			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4092		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4093			bctl->data.target : fs_info->avail_data_alloc_bits;
4094	} while (read_seqretry(&fs_info->profiles_lock, seq));
4095
4096	if (reducing_redundancy) {
4097		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4098			btrfs_info(fs_info,
4099			   "balance: force reducing metadata redundancy");
4100		} else {
4101			btrfs_err(fs_info,
4102	"balance: reduces metadata redundancy, use --force if you want this");
4103			ret = -EINVAL;
4104			goto out;
4105		}
4106	}
4107
4108	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4109		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4110		btrfs_warn(fs_info,
4111	"balance: metadata profile %s has lower redundancy than data profile %s",
4112				btrfs_bg_type_to_raid_name(meta_target),
4113				btrfs_bg_type_to_raid_name(data_target));
4114	}
4115
4116	if (fs_info->send_in_progress) {
4117		btrfs_warn_rl(fs_info,
4118"cannot run balance while send operations are in progress (%d in progress)",
4119			      fs_info->send_in_progress);
4120		ret = -EAGAIN;
4121		goto out;
4122	}
4123
4124	ret = insert_balance_item(fs_info, bctl);
4125	if (ret && ret != -EEXIST)
4126		goto out;
4127
4128	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4129		BUG_ON(ret == -EEXIST);
4130		BUG_ON(fs_info->balance_ctl);
4131		spin_lock(&fs_info->balance_lock);
4132		fs_info->balance_ctl = bctl;
4133		spin_unlock(&fs_info->balance_lock);
4134	} else {
4135		BUG_ON(ret != -EEXIST);
4136		spin_lock(&fs_info->balance_lock);
4137		update_balance_args(bctl);
4138		spin_unlock(&fs_info->balance_lock);
4139	}
4140
4141	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4142	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4143	describe_balance_start_or_resume(fs_info);
4144	mutex_unlock(&fs_info->balance_mutex);
4145
4146	ret = __btrfs_balance(fs_info);
4147
4148	mutex_lock(&fs_info->balance_mutex);
4149	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4150		btrfs_info(fs_info, "balance: paused");
 
 
4151	/*
4152	 * Balance can be canceled by:
4153	 *
4154	 * - Regular cancel request
4155	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4156	 *
4157	 * - Fatal signal to "btrfs" process
4158	 *   Either the signal caught by wait_reserve_ticket() and callers
4159	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4160	 *   got -ECANCELED.
4161	 *   Either way, in this case balance_cancel_req = 0, and
4162	 *   ret == -EINTR or ret == -ECANCELED.
4163	 *
4164	 * So here we only check the return value to catch canceled balance.
4165	 */
4166	else if (ret == -ECANCELED || ret == -EINTR)
4167		btrfs_info(fs_info, "balance: canceled");
4168	else
4169		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4170
4171	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4172
4173	if (bargs) {
4174		memset(bargs, 0, sizeof(*bargs));
4175		btrfs_update_ioctl_balance_args(fs_info, bargs);
4176	}
4177
4178	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4179	    balance_need_close(fs_info)) {
4180		reset_balance_state(fs_info);
4181		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4182	}
4183
4184	wake_up(&fs_info->balance_wait_q);
4185
4186	return ret;
4187out:
4188	if (bctl->flags & BTRFS_BALANCE_RESUME)
4189		reset_balance_state(fs_info);
4190	else
4191		kfree(bctl);
4192	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4193
4194	return ret;
4195}
4196
4197static int balance_kthread(void *data)
4198{
4199	struct btrfs_fs_info *fs_info = data;
4200	int ret = 0;
4201
 
4202	mutex_lock(&fs_info->balance_mutex);
4203	if (fs_info->balance_ctl)
4204		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4205	mutex_unlock(&fs_info->balance_mutex);
 
4206
4207	return ret;
4208}
4209
4210int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4211{
4212	struct task_struct *tsk;
4213
4214	mutex_lock(&fs_info->balance_mutex);
4215	if (!fs_info->balance_ctl) {
4216		mutex_unlock(&fs_info->balance_mutex);
4217		return 0;
4218	}
4219	mutex_unlock(&fs_info->balance_mutex);
4220
4221	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4222		btrfs_info(fs_info, "balance: resume skipped");
4223		return 0;
4224	}
4225
 
 
 
 
4226	/*
4227	 * A ro->rw remount sequence should continue with the paused balance
4228	 * regardless of who pauses it, system or the user as of now, so set
4229	 * the resume flag.
4230	 */
4231	spin_lock(&fs_info->balance_lock);
4232	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4233	spin_unlock(&fs_info->balance_lock);
4234
4235	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4236	return PTR_ERR_OR_ZERO(tsk);
4237}
4238
4239int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4240{
4241	struct btrfs_balance_control *bctl;
4242	struct btrfs_balance_item *item;
4243	struct btrfs_disk_balance_args disk_bargs;
4244	struct btrfs_path *path;
4245	struct extent_buffer *leaf;
4246	struct btrfs_key key;
4247	int ret;
4248
4249	path = btrfs_alloc_path();
4250	if (!path)
4251		return -ENOMEM;
4252
4253	key.objectid = BTRFS_BALANCE_OBJECTID;
4254	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4255	key.offset = 0;
4256
4257	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4258	if (ret < 0)
4259		goto out;
4260	if (ret > 0) { /* ret = -ENOENT; */
4261		ret = 0;
4262		goto out;
4263	}
4264
4265	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4266	if (!bctl) {
4267		ret = -ENOMEM;
4268		goto out;
4269	}
4270
4271	leaf = path->nodes[0];
4272	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4273
4274	bctl->flags = btrfs_balance_flags(leaf, item);
4275	bctl->flags |= BTRFS_BALANCE_RESUME;
4276
4277	btrfs_balance_data(leaf, item, &disk_bargs);
4278	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4279	btrfs_balance_meta(leaf, item, &disk_bargs);
4280	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4281	btrfs_balance_sys(leaf, item, &disk_bargs);
4282	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4283
4284	/*
4285	 * This should never happen, as the paused balance state is recovered
4286	 * during mount without any chance of other exclusive ops to collide.
4287	 *
4288	 * This gives the exclusive op status to balance and keeps in paused
4289	 * state until user intervention (cancel or umount). If the ownership
4290	 * cannot be assigned, show a message but do not fail. The balance
4291	 * is in a paused state and must have fs_info::balance_ctl properly
4292	 * set up.
4293	 */
4294	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4295		btrfs_warn(fs_info,
4296	"balance: cannot set exclusive op status, resume manually");
4297
 
 
4298	mutex_lock(&fs_info->balance_mutex);
4299	BUG_ON(fs_info->balance_ctl);
4300	spin_lock(&fs_info->balance_lock);
4301	fs_info->balance_ctl = bctl;
4302	spin_unlock(&fs_info->balance_lock);
4303	mutex_unlock(&fs_info->balance_mutex);
4304out:
4305	btrfs_free_path(path);
4306	return ret;
4307}
4308
4309int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4310{
4311	int ret = 0;
4312
4313	mutex_lock(&fs_info->balance_mutex);
4314	if (!fs_info->balance_ctl) {
4315		mutex_unlock(&fs_info->balance_mutex);
4316		return -ENOTCONN;
4317	}
4318
4319	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4320		atomic_inc(&fs_info->balance_pause_req);
4321		mutex_unlock(&fs_info->balance_mutex);
4322
4323		wait_event(fs_info->balance_wait_q,
4324			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4325
4326		mutex_lock(&fs_info->balance_mutex);
4327		/* we are good with balance_ctl ripped off from under us */
4328		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4329		atomic_dec(&fs_info->balance_pause_req);
4330	} else {
4331		ret = -ENOTCONN;
4332	}
4333
4334	mutex_unlock(&fs_info->balance_mutex);
4335	return ret;
4336}
4337
4338int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4339{
4340	mutex_lock(&fs_info->balance_mutex);
4341	if (!fs_info->balance_ctl) {
4342		mutex_unlock(&fs_info->balance_mutex);
4343		return -ENOTCONN;
4344	}
4345
4346	/*
4347	 * A paused balance with the item stored on disk can be resumed at
4348	 * mount time if the mount is read-write. Otherwise it's still paused
4349	 * and we must not allow cancelling as it deletes the item.
4350	 */
4351	if (sb_rdonly(fs_info->sb)) {
4352		mutex_unlock(&fs_info->balance_mutex);
4353		return -EROFS;
4354	}
4355
4356	atomic_inc(&fs_info->balance_cancel_req);
4357	/*
4358	 * if we are running just wait and return, balance item is
4359	 * deleted in btrfs_balance in this case
4360	 */
4361	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4362		mutex_unlock(&fs_info->balance_mutex);
4363		wait_event(fs_info->balance_wait_q,
4364			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4365		mutex_lock(&fs_info->balance_mutex);
4366	} else {
4367		mutex_unlock(&fs_info->balance_mutex);
4368		/*
4369		 * Lock released to allow other waiters to continue, we'll
4370		 * reexamine the status again.
4371		 */
4372		mutex_lock(&fs_info->balance_mutex);
4373
4374		if (fs_info->balance_ctl) {
4375			reset_balance_state(fs_info);
4376			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4377			btrfs_info(fs_info, "balance: canceled");
4378		}
4379	}
4380
4381	BUG_ON(fs_info->balance_ctl ||
4382		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4383	atomic_dec(&fs_info->balance_cancel_req);
4384	mutex_unlock(&fs_info->balance_mutex);
4385	return 0;
4386}
4387
4388int btrfs_uuid_scan_kthread(void *data)
4389{
4390	struct btrfs_fs_info *fs_info = data;
4391	struct btrfs_root *root = fs_info->tree_root;
4392	struct btrfs_key key;
4393	struct btrfs_path *path = NULL;
4394	int ret = 0;
4395	struct extent_buffer *eb;
4396	int slot;
4397	struct btrfs_root_item root_item;
4398	u32 item_size;
4399	struct btrfs_trans_handle *trans = NULL;
4400	bool closing = false;
4401
4402	path = btrfs_alloc_path();
4403	if (!path) {
4404		ret = -ENOMEM;
4405		goto out;
4406	}
4407
4408	key.objectid = 0;
4409	key.type = BTRFS_ROOT_ITEM_KEY;
4410	key.offset = 0;
4411
4412	while (1) {
4413		if (btrfs_fs_closing(fs_info)) {
4414			closing = true;
4415			break;
4416		}
4417		ret = btrfs_search_forward(root, &key, path,
4418				BTRFS_OLDEST_GENERATION);
4419		if (ret) {
4420			if (ret > 0)
4421				ret = 0;
4422			break;
4423		}
4424
4425		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4426		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4427		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4428		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4429			goto skip;
4430
4431		eb = path->nodes[0];
4432		slot = path->slots[0];
4433		item_size = btrfs_item_size_nr(eb, slot);
4434		if (item_size < sizeof(root_item))
4435			goto skip;
4436
4437		read_extent_buffer(eb, &root_item,
4438				   btrfs_item_ptr_offset(eb, slot),
4439				   (int)sizeof(root_item));
4440		if (btrfs_root_refs(&root_item) == 0)
4441			goto skip;
4442
4443		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4444		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4445			if (trans)
4446				goto update_tree;
4447
4448			btrfs_release_path(path);
4449			/*
4450			 * 1 - subvol uuid item
4451			 * 1 - received_subvol uuid item
4452			 */
4453			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4454			if (IS_ERR(trans)) {
4455				ret = PTR_ERR(trans);
4456				break;
4457			}
4458			continue;
4459		} else {
4460			goto skip;
4461		}
4462update_tree:
4463		btrfs_release_path(path);
4464		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4465			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4466						  BTRFS_UUID_KEY_SUBVOL,
4467						  key.objectid);
4468			if (ret < 0) {
4469				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4470					ret);
4471				break;
4472			}
4473		}
4474
4475		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4476			ret = btrfs_uuid_tree_add(trans,
4477						  root_item.received_uuid,
4478						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4479						  key.objectid);
4480			if (ret < 0) {
4481				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4482					ret);
4483				break;
4484			}
4485		}
4486
4487skip:
4488		btrfs_release_path(path);
4489		if (trans) {
4490			ret = btrfs_end_transaction(trans);
4491			trans = NULL;
4492			if (ret)
4493				break;
4494		}
4495
4496		if (key.offset < (u64)-1) {
4497			key.offset++;
4498		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4499			key.offset = 0;
4500			key.type = BTRFS_ROOT_ITEM_KEY;
4501		} else if (key.objectid < (u64)-1) {
4502			key.offset = 0;
4503			key.type = BTRFS_ROOT_ITEM_KEY;
4504			key.objectid++;
4505		} else {
4506			break;
4507		}
4508		cond_resched();
4509	}
4510
4511out:
4512	btrfs_free_path(path);
4513	if (trans && !IS_ERR(trans))
4514		btrfs_end_transaction(trans);
4515	if (ret)
4516		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4517	else if (!closing)
4518		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4519	up(&fs_info->uuid_tree_rescan_sem);
4520	return 0;
4521}
4522
4523int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4524{
4525	struct btrfs_trans_handle *trans;
4526	struct btrfs_root *tree_root = fs_info->tree_root;
4527	struct btrfs_root *uuid_root;
4528	struct task_struct *task;
4529	int ret;
4530
4531	/*
4532	 * 1 - root node
4533	 * 1 - root item
4534	 */
4535	trans = btrfs_start_transaction(tree_root, 2);
4536	if (IS_ERR(trans))
4537		return PTR_ERR(trans);
4538
4539	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4540	if (IS_ERR(uuid_root)) {
4541		ret = PTR_ERR(uuid_root);
4542		btrfs_abort_transaction(trans, ret);
4543		btrfs_end_transaction(trans);
4544		return ret;
4545	}
4546
4547	fs_info->uuid_root = uuid_root;
4548
4549	ret = btrfs_commit_transaction(trans);
4550	if (ret)
4551		return ret;
4552
4553	down(&fs_info->uuid_tree_rescan_sem);
4554	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4555	if (IS_ERR(task)) {
4556		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4557		btrfs_warn(fs_info, "failed to start uuid_scan task");
4558		up(&fs_info->uuid_tree_rescan_sem);
4559		return PTR_ERR(task);
4560	}
4561
4562	return 0;
4563}
4564
4565/*
4566 * shrinking a device means finding all of the device extents past
4567 * the new size, and then following the back refs to the chunks.
4568 * The chunk relocation code actually frees the device extent
4569 */
4570int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4571{
4572	struct btrfs_fs_info *fs_info = device->fs_info;
4573	struct btrfs_root *root = fs_info->dev_root;
4574	struct btrfs_trans_handle *trans;
4575	struct btrfs_dev_extent *dev_extent = NULL;
4576	struct btrfs_path *path;
4577	u64 length;
4578	u64 chunk_offset;
4579	int ret;
4580	int slot;
4581	int failed = 0;
4582	bool retried = false;
4583	struct extent_buffer *l;
4584	struct btrfs_key key;
4585	struct btrfs_super_block *super_copy = fs_info->super_copy;
4586	u64 old_total = btrfs_super_total_bytes(super_copy);
4587	u64 old_size = btrfs_device_get_total_bytes(device);
4588	u64 diff;
4589	u64 start;
4590
4591	new_size = round_down(new_size, fs_info->sectorsize);
4592	start = new_size;
4593	diff = round_down(old_size - new_size, fs_info->sectorsize);
4594
4595	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4596		return -EINVAL;
4597
4598	path = btrfs_alloc_path();
4599	if (!path)
4600		return -ENOMEM;
4601
4602	path->reada = READA_BACK;
4603
4604	trans = btrfs_start_transaction(root, 0);
4605	if (IS_ERR(trans)) {
4606		btrfs_free_path(path);
4607		return PTR_ERR(trans);
4608	}
4609
4610	mutex_lock(&fs_info->chunk_mutex);
4611
4612	btrfs_device_set_total_bytes(device, new_size);
4613	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4614		device->fs_devices->total_rw_bytes -= diff;
4615		atomic64_sub(diff, &fs_info->free_chunk_space);
4616	}
4617
4618	/*
4619	 * Once the device's size has been set to the new size, ensure all
4620	 * in-memory chunks are synced to disk so that the loop below sees them
4621	 * and relocates them accordingly.
4622	 */
4623	if (contains_pending_extent(device, &start, diff)) {
4624		mutex_unlock(&fs_info->chunk_mutex);
4625		ret = btrfs_commit_transaction(trans);
4626		if (ret)
4627			goto done;
4628	} else {
4629		mutex_unlock(&fs_info->chunk_mutex);
4630		btrfs_end_transaction(trans);
4631	}
4632
4633again:
4634	key.objectid = device->devid;
4635	key.offset = (u64)-1;
4636	key.type = BTRFS_DEV_EXTENT_KEY;
4637
4638	do {
4639		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4640		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4641		if (ret < 0) {
4642			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4643			goto done;
4644		}
4645
4646		ret = btrfs_previous_item(root, path, 0, key.type);
4647		if (ret)
4648			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4649		if (ret < 0)
4650			goto done;
4651		if (ret) {
 
 
 
4652			ret = 0;
4653			btrfs_release_path(path);
4654			break;
4655		}
4656
4657		l = path->nodes[0];
4658		slot = path->slots[0];
4659		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4660
4661		if (key.objectid != device->devid) {
4662			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4663			btrfs_release_path(path);
4664			break;
4665		}
4666
4667		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4668		length = btrfs_dev_extent_length(l, dev_extent);
4669
4670		if (key.offset + length <= new_size) {
4671			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4672			btrfs_release_path(path);
4673			break;
4674		}
4675
4676		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4677		btrfs_release_path(path);
4678
4679		/*
4680		 * We may be relocating the only data chunk we have,
4681		 * which could potentially end up with losing data's
4682		 * raid profile, so lets allocate an empty one in
4683		 * advance.
4684		 */
4685		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4686		if (ret < 0) {
4687			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4688			goto done;
4689		}
4690
4691		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4692		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4693		if (ret == -ENOSPC) {
4694			failed++;
4695		} else if (ret) {
4696			if (ret == -ETXTBSY) {
4697				btrfs_warn(fs_info,
4698		   "could not shrink block group %llu due to active swapfile",
4699					   chunk_offset);
4700			}
4701			goto done;
4702		}
4703	} while (key.offset-- > 0);
4704
4705	if (failed && !retried) {
4706		failed = 0;
4707		retried = true;
4708		goto again;
4709	} else if (failed && retried) {
4710		ret = -ENOSPC;
4711		goto done;
4712	}
4713
4714	/* Shrinking succeeded, else we would be at "done". */
4715	trans = btrfs_start_transaction(root, 0);
4716	if (IS_ERR(trans)) {
4717		ret = PTR_ERR(trans);
4718		goto done;
4719	}
4720
4721	mutex_lock(&fs_info->chunk_mutex);
4722	/* Clear all state bits beyond the shrunk device size */
4723	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4724			  CHUNK_STATE_MASK);
4725
4726	btrfs_device_set_disk_total_bytes(device, new_size);
4727	if (list_empty(&device->post_commit_list))
4728		list_add_tail(&device->post_commit_list,
4729			      &trans->transaction->dev_update_list);
4730
4731	WARN_ON(diff > old_total);
4732	btrfs_set_super_total_bytes(super_copy,
4733			round_down(old_total - diff, fs_info->sectorsize));
4734	mutex_unlock(&fs_info->chunk_mutex);
4735
 
4736	/* Now btrfs_update_device() will change the on-disk size. */
4737	ret = btrfs_update_device(trans, device);
 
4738	if (ret < 0) {
4739		btrfs_abort_transaction(trans, ret);
4740		btrfs_end_transaction(trans);
4741	} else {
4742		ret = btrfs_commit_transaction(trans);
4743	}
4744done:
4745	btrfs_free_path(path);
4746	if (ret) {
4747		mutex_lock(&fs_info->chunk_mutex);
4748		btrfs_device_set_total_bytes(device, old_size);
4749		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4750			device->fs_devices->total_rw_bytes += diff;
4751		atomic64_add(diff, &fs_info->free_chunk_space);
4752		mutex_unlock(&fs_info->chunk_mutex);
4753	}
4754	return ret;
4755}
4756
4757static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4758			   struct btrfs_key *key,
4759			   struct btrfs_chunk *chunk, int item_size)
4760{
4761	struct btrfs_super_block *super_copy = fs_info->super_copy;
4762	struct btrfs_disk_key disk_key;
4763	u32 array_size;
4764	u8 *ptr;
4765
4766	mutex_lock(&fs_info->chunk_mutex);
 
4767	array_size = btrfs_super_sys_array_size(super_copy);
4768	if (array_size + item_size + sizeof(disk_key)
4769			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4770		mutex_unlock(&fs_info->chunk_mutex);
4771		return -EFBIG;
4772	}
4773
4774	ptr = super_copy->sys_chunk_array + array_size;
4775	btrfs_cpu_key_to_disk(&disk_key, key);
4776	memcpy(ptr, &disk_key, sizeof(disk_key));
4777	ptr += sizeof(disk_key);
4778	memcpy(ptr, chunk, item_size);
4779	item_size += sizeof(disk_key);
4780	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4781	mutex_unlock(&fs_info->chunk_mutex);
4782
4783	return 0;
4784}
4785
4786/*
4787 * sort the devices in descending order by max_avail, total_avail
4788 */
4789static int btrfs_cmp_device_info(const void *a, const void *b)
4790{
4791	const struct btrfs_device_info *di_a = a;
4792	const struct btrfs_device_info *di_b = b;
4793
4794	if (di_a->max_avail > di_b->max_avail)
4795		return -1;
4796	if (di_a->max_avail < di_b->max_avail)
4797		return 1;
4798	if (di_a->total_avail > di_b->total_avail)
4799		return -1;
4800	if (di_a->total_avail < di_b->total_avail)
4801		return 1;
4802	return 0;
4803}
4804
4805static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4806{
4807	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4808		return;
4809
4810	btrfs_set_fs_incompat(info, RAID56);
4811}
4812
4813static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4814{
4815	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4816		return;
4817
4818	btrfs_set_fs_incompat(info, RAID1C34);
4819}
4820
4821/*
4822 * Structure used internally for __btrfs_alloc_chunk() function.
4823 * Wraps needed parameters.
4824 */
4825struct alloc_chunk_ctl {
4826	u64 start;
4827	u64 type;
4828	/* Total number of stripes to allocate */
4829	int num_stripes;
4830	/* sub_stripes info for map */
4831	int sub_stripes;
4832	/* Stripes per device */
4833	int dev_stripes;
4834	/* Maximum number of devices to use */
4835	int devs_max;
4836	/* Minimum number of devices to use */
4837	int devs_min;
4838	/* ndevs has to be a multiple of this */
4839	int devs_increment;
4840	/* Number of copies */
4841	int ncopies;
4842	/* Number of stripes worth of bytes to store parity information */
4843	int nparity;
4844	u64 max_stripe_size;
4845	u64 max_chunk_size;
4846	u64 dev_extent_min;
4847	u64 stripe_size;
4848	u64 chunk_size;
4849	int ndevs;
4850};
4851
4852static void init_alloc_chunk_ctl_policy_regular(
4853				struct btrfs_fs_devices *fs_devices,
4854				struct alloc_chunk_ctl *ctl)
4855{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4856	u64 type = ctl->type;
4857
 
4858	if (type & BTRFS_BLOCK_GROUP_DATA) {
4859		ctl->max_stripe_size = SZ_1G;
4860		ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4861	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4862		/* For larger filesystems, use larger metadata chunks */
4863		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4864			ctl->max_stripe_size = SZ_1G;
4865		else
4866			ctl->max_stripe_size = SZ_256M;
4867		ctl->max_chunk_size = ctl->max_stripe_size;
4868	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4869		ctl->max_stripe_size = SZ_32M;
4870		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4871		ctl->devs_max = min_t(int, ctl->devs_max,
4872				      BTRFS_MAX_DEVS_SYS_CHUNK);
4873	} else {
4874		BUG();
4875	}
4876
4877	/* We don't want a chunk larger than 10% of writable space */
4878	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4879				  ctl->max_chunk_size);
4880	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
 
 
4881}
4882
4883static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4884				 struct alloc_chunk_ctl *ctl)
4885{
4886	int index = btrfs_bg_flags_to_raid_index(ctl->type);
4887
4888	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4889	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4890	ctl->devs_max = btrfs_raid_array[index].devs_max;
4891	if (!ctl->devs_max)
4892		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4893	ctl->devs_min = btrfs_raid_array[index].devs_min;
4894	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4895	ctl->ncopies = btrfs_raid_array[index].ncopies;
4896	ctl->nparity = btrfs_raid_array[index].nparity;
4897	ctl->ndevs = 0;
4898
4899	switch (fs_devices->chunk_alloc_policy) {
4900	case BTRFS_CHUNK_ALLOC_REGULAR:
4901		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4902		break;
 
 
 
4903	default:
4904		BUG();
4905	}
4906}
4907
4908static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4909			      struct alloc_chunk_ctl *ctl,
4910			      struct btrfs_device_info *devices_info)
4911{
4912	struct btrfs_fs_info *info = fs_devices->fs_info;
4913	struct btrfs_device *device;
4914	u64 total_avail;
4915	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4916	int ret;
4917	int ndevs = 0;
4918	u64 max_avail;
4919	u64 dev_offset;
4920
4921	/*
4922	 * in the first pass through the devices list, we gather information
4923	 * about the available holes on each device.
4924	 */
4925	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4926		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4927			WARN(1, KERN_ERR
4928			       "BTRFS: read-only device in alloc_list\n");
4929			continue;
4930		}
4931
4932		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4933					&device->dev_state) ||
4934		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4935			continue;
4936
4937		if (device->total_bytes > device->bytes_used)
4938			total_avail = device->total_bytes - device->bytes_used;
4939		else
4940			total_avail = 0;
4941
4942		/* If there is no space on this device, skip it. */
4943		if (total_avail < ctl->dev_extent_min)
4944			continue;
4945
4946		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
4947					   &max_avail);
4948		if (ret && ret != -ENOSPC)
4949			return ret;
4950
4951		if (ret == 0)
4952			max_avail = dev_extent_want;
4953
4954		if (max_avail < ctl->dev_extent_min) {
4955			if (btrfs_test_opt(info, ENOSPC_DEBUG))
4956				btrfs_debug(info,
4957			"%s: devid %llu has no free space, have=%llu want=%llu",
4958					    __func__, device->devid, max_avail,
4959					    ctl->dev_extent_min);
4960			continue;
4961		}
4962
4963		if (ndevs == fs_devices->rw_devices) {
4964			WARN(1, "%s: found more than %llu devices\n",
4965			     __func__, fs_devices->rw_devices);
4966			break;
4967		}
4968		devices_info[ndevs].dev_offset = dev_offset;
4969		devices_info[ndevs].max_avail = max_avail;
4970		devices_info[ndevs].total_avail = total_avail;
4971		devices_info[ndevs].dev = device;
4972		++ndevs;
4973	}
4974	ctl->ndevs = ndevs;
4975
4976	/*
4977	 * now sort the devices by hole size / available space
4978	 */
4979	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4980	     btrfs_cmp_device_info, NULL);
4981
4982	return 0;
4983}
4984
4985static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
4986				      struct btrfs_device_info *devices_info)
4987{
4988	/* Number of stripes that count for block group size */
4989	int data_stripes;
4990
4991	/*
4992	 * The primary goal is to maximize the number of stripes, so use as
4993	 * many devices as possible, even if the stripes are not maximum sized.
4994	 *
4995	 * The DUP profile stores more than one stripe per device, the
4996	 * max_avail is the total size so we have to adjust.
4997	 */
4998	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
4999				   ctl->dev_stripes);
5000	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5001
5002	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5003	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5004
5005	/*
5006	 * Use the number of data stripes to figure out how big this chunk is
5007	 * really going to be in terms of logical address space, and compare
5008	 * that answer with the max chunk size. If it's higher, we try to
5009	 * reduce stripe_size.
5010	 */
5011	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5012		/*
5013		 * Reduce stripe_size, round it up to a 16MB boundary again and
5014		 * then use it, unless it ends up being even bigger than the
5015		 * previous value we had already.
5016		 */
5017		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5018							data_stripes), SZ_16M),
5019				       ctl->stripe_size);
5020	}
5021
 
 
 
5022	/* Align to BTRFS_STRIPE_LEN */
5023	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5024	ctl->chunk_size = ctl->stripe_size * data_stripes;
5025
5026	return 0;
5027}
5028
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5029static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5030			      struct alloc_chunk_ctl *ctl,
5031			      struct btrfs_device_info *devices_info)
5032{
5033	struct btrfs_fs_info *info = fs_devices->fs_info;
5034
5035	/*
5036	 * Round down to number of usable stripes, devs_increment can be any
5037	 * number so we can't use round_down() that requires power of 2, while
5038	 * rounddown is safe.
5039	 */
5040	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5041
5042	if (ctl->ndevs < ctl->devs_min) {
5043		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5044			btrfs_debug(info,
5045	"%s: not enough devices with free space: have=%d minimum required=%d",
5046				    __func__, ctl->ndevs, ctl->devs_min);
5047		}
5048		return -ENOSPC;
5049	}
5050
5051	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5052
5053	switch (fs_devices->chunk_alloc_policy) {
5054	case BTRFS_CHUNK_ALLOC_REGULAR:
5055		return decide_stripe_size_regular(ctl, devices_info);
 
 
5056	default:
5057		BUG();
5058	}
5059}
5060
5061static int create_chunk(struct btrfs_trans_handle *trans,
5062			struct alloc_chunk_ctl *ctl,
5063			struct btrfs_device_info *devices_info)
5064{
5065	struct btrfs_fs_info *info = trans->fs_info;
5066	struct map_lookup *map = NULL;
5067	struct extent_map_tree *em_tree;
 
5068	struct extent_map *em;
5069	u64 start = ctl->start;
5070	u64 type = ctl->type;
5071	int ret;
5072	int i;
5073	int j;
5074
5075	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5076	if (!map)
5077		return -ENOMEM;
5078	map->num_stripes = ctl->num_stripes;
5079
5080	for (i = 0; i < ctl->ndevs; ++i) {
5081		for (j = 0; j < ctl->dev_stripes; ++j) {
5082			int s = i * ctl->dev_stripes + j;
5083			map->stripes[s].dev = devices_info[i].dev;
5084			map->stripes[s].physical = devices_info[i].dev_offset +
5085						   j * ctl->stripe_size;
5086		}
5087	}
5088	map->stripe_len = BTRFS_STRIPE_LEN;
5089	map->io_align = BTRFS_STRIPE_LEN;
5090	map->io_width = BTRFS_STRIPE_LEN;
5091	map->type = type;
5092	map->sub_stripes = ctl->sub_stripes;
5093
5094	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5095
5096	em = alloc_extent_map();
5097	if (!em) {
5098		kfree(map);
5099		return -ENOMEM;
5100	}
5101	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5102	em->map_lookup = map;
5103	em->start = start;
5104	em->len = ctl->chunk_size;
5105	em->block_start = 0;
5106	em->block_len = em->len;
5107	em->orig_block_len = ctl->stripe_size;
5108
5109	em_tree = &info->mapping_tree;
5110	write_lock(&em_tree->lock);
5111	ret = add_extent_mapping(em_tree, em, 0);
5112	if (ret) {
5113		write_unlock(&em_tree->lock);
5114		free_extent_map(em);
5115		return ret;
5116	}
5117	write_unlock(&em_tree->lock);
5118
5119	ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5120	if (ret)
5121		goto error_del_extent;
5122
5123	for (i = 0; i < map->num_stripes; i++) {
5124		struct btrfs_device *dev = map->stripes[i].dev;
5125
5126		btrfs_device_set_bytes_used(dev,
5127					    dev->bytes_used + ctl->stripe_size);
5128		if (list_empty(&dev->post_commit_list))
5129			list_add_tail(&dev->post_commit_list,
5130				      &trans->transaction->dev_update_list);
5131	}
5132
5133	atomic64_sub(ctl->stripe_size * map->num_stripes,
5134		     &info->free_chunk_space);
5135
5136	free_extent_map(em);
5137	check_raid56_incompat_flag(info, type);
5138	check_raid1c34_incompat_flag(info, type);
5139
5140	return 0;
5141
5142error_del_extent:
5143	write_lock(&em_tree->lock);
5144	remove_extent_mapping(em_tree, em);
5145	write_unlock(&em_tree->lock);
5146
5147	/* One for our allocation */
5148	free_extent_map(em);
5149	/* One for the tree reference */
5150	free_extent_map(em);
5151
5152	return ret;
5153}
5154
5155int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
 
5156{
5157	struct btrfs_fs_info *info = trans->fs_info;
5158	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5159	struct btrfs_device_info *devices_info = NULL;
5160	struct alloc_chunk_ctl ctl;
 
5161	int ret;
5162
5163	lockdep_assert_held(&info->chunk_mutex);
5164
5165	if (!alloc_profile_is_valid(type, 0)) {
5166		ASSERT(0);
5167		return -EINVAL;
5168	}
5169
5170	if (list_empty(&fs_devices->alloc_list)) {
5171		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5172			btrfs_debug(info, "%s: no writable device", __func__);
5173		return -ENOSPC;
5174	}
5175
5176	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5177		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5178		ASSERT(0);
5179		return -EINVAL;
5180	}
5181
5182	ctl.start = find_next_chunk(info);
5183	ctl.type = type;
5184	init_alloc_chunk_ctl(fs_devices, &ctl);
5185
5186	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5187			       GFP_NOFS);
5188	if (!devices_info)
5189		return -ENOMEM;
5190
5191	ret = gather_device_info(fs_devices, &ctl, devices_info);
5192	if (ret < 0)
 
5193		goto out;
 
5194
5195	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5196	if (ret < 0)
 
5197		goto out;
 
5198
5199	ret = create_chunk(trans, &ctl, devices_info);
5200
5201out:
5202	kfree(devices_info);
5203	return ret;
5204}
5205
5206/*
5207 * Chunk allocation falls into two parts. The first part does work
5208 * that makes the new allocated chunk usable, but does not do any operation
5209 * that modifies the chunk tree. The second part does the work that
5210 * requires modifying the chunk tree. This division is important for the
5211 * bootstrap process of adding storage to a seed btrfs.
 
5212 */
5213int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5214			     u64 chunk_offset, u64 chunk_size)
5215{
5216	struct btrfs_fs_info *fs_info = trans->fs_info;
5217	struct btrfs_root *extent_root = fs_info->extent_root;
5218	struct btrfs_root *chunk_root = fs_info->chunk_root;
5219	struct btrfs_key key;
5220	struct btrfs_device *device;
5221	struct btrfs_chunk *chunk;
5222	struct btrfs_stripe *stripe;
5223	struct extent_map *em;
5224	struct map_lookup *map;
5225	size_t item_size;
5226	u64 dev_offset;
5227	u64 stripe_size;
5228	int i = 0;
5229	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5230
5231	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5232	if (IS_ERR(em))
5233		return PTR_ERR(em);
 
 
 
5234
5235	map = em->map_lookup;
5236	item_size = btrfs_chunk_item_size(map->num_stripes);
5237	stripe_size = em->orig_block_len;
5238
5239	chunk = kzalloc(item_size, GFP_NOFS);
5240	if (!chunk) {
5241		ret = -ENOMEM;
 
5242		goto out;
5243	}
5244
5245	/*
5246	 * Take the device list mutex to prevent races with the final phase of
5247	 * a device replace operation that replaces the device object associated
5248	 * with the map's stripes, because the device object's id can change
5249	 * at any time during that final phase of the device replace operation
5250	 * (dev-replace.c:btrfs_dev_replace_finishing()).
5251	 */
5252	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5253	for (i = 0; i < map->num_stripes; i++) {
5254		device = map->stripes[i].dev;
5255		dev_offset = map->stripes[i].physical;
5256
5257		ret = btrfs_update_device(trans, device);
5258		if (ret)
5259			break;
5260		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5261					     dev_offset, stripe_size);
5262		if (ret)
5263			break;
5264	}
5265	if (ret) {
5266		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5267		goto out;
5268	}
5269
5270	stripe = &chunk->stripe;
5271	for (i = 0; i < map->num_stripes; i++) {
5272		device = map->stripes[i].dev;
5273		dev_offset = map->stripes[i].physical;
5274
5275		btrfs_set_stack_stripe_devid(stripe, device->devid);
5276		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5277		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5278		stripe++;
5279	}
5280	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5281
5282	btrfs_set_stack_chunk_length(chunk, chunk_size);
5283	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5284	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5285	btrfs_set_stack_chunk_type(chunk, map->type);
5286	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5287	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5288	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5289	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5290	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5291
5292	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5293	key.type = BTRFS_CHUNK_ITEM_KEY;
5294	key.offset = chunk_offset;
5295
5296	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5297	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5298		/*
5299		 * TODO: Cleanup of inserted chunk root in case of
5300		 * failure.
5301		 */
 
5302		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
 
 
5303	}
5304
5305out:
5306	kfree(chunk);
5307	free_extent_map(em);
5308	return ret;
5309}
5310
5311static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5312{
5313	struct btrfs_fs_info *fs_info = trans->fs_info;
5314	u64 alloc_profile;
5315	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5316
5317	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5318	ret = btrfs_alloc_chunk(trans, alloc_profile);
5319	if (ret)
5320		return ret;
5321
5322	alloc_profile = btrfs_system_alloc_profile(fs_info);
5323	ret = btrfs_alloc_chunk(trans, alloc_profile);
5324	return ret;
 
 
 
5325}
5326
5327static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5328{
5329	const int index = btrfs_bg_flags_to_raid_index(map->type);
5330
5331	return btrfs_raid_array[index].tolerated_failures;
5332}
5333
5334int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5335{
5336	struct extent_map *em;
5337	struct map_lookup *map;
5338	int readonly = 0;
5339	int miss_ndevs = 0;
5340	int i;
 
5341
5342	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5343	if (IS_ERR(em))
5344		return 1;
5345
5346	map = em->map_lookup;
5347	for (i = 0; i < map->num_stripes; i++) {
5348		if (test_bit(BTRFS_DEV_STATE_MISSING,
5349					&map->stripes[i].dev->dev_state)) {
5350			miss_ndevs++;
5351			continue;
5352		}
5353		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5354					&map->stripes[i].dev->dev_state)) {
5355			readonly = 1;
5356			goto end;
5357		}
5358	}
5359
5360	/*
5361	 * If the number of missing devices is larger than max errors,
5362	 * we can not write the data into that chunk successfully, so
5363	 * set it readonly.
5364	 */
5365	if (miss_ndevs > btrfs_chunk_max_errors(map))
5366		readonly = 1;
5367end:
5368	free_extent_map(em);
5369	return readonly;
5370}
5371
5372void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5373{
5374	struct extent_map *em;
5375
5376	while (1) {
5377		write_lock(&tree->lock);
5378		em = lookup_extent_mapping(tree, 0, (u64)-1);
5379		if (em)
5380			remove_extent_mapping(tree, em);
5381		write_unlock(&tree->lock);
5382		if (!em)
5383			break;
5384		/* once for us */
5385		free_extent_map(em);
5386		/* once for the tree */
5387		free_extent_map(em);
5388	}
5389}
5390
5391int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5392{
5393	struct extent_map *em;
5394	struct map_lookup *map;
5395	int ret;
 
5396
5397	em = btrfs_get_chunk_map(fs_info, logical, len);
5398	if (IS_ERR(em))
5399		/*
5400		 * We could return errors for these cases, but that could get
5401		 * ugly and we'd probably do the same thing which is just not do
5402		 * anything else and exit, so return 1 so the callers don't try
5403		 * to use other copies.
5404		 */
5405		return 1;
5406
5407	map = em->map_lookup;
5408	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5409		ret = map->num_stripes;
5410	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5411		ret = map->sub_stripes;
 
5412	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5413		ret = 2;
5414	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5415		/*
5416		 * There could be two corrupted data stripes, we need
5417		 * to loop retry in order to rebuild the correct data.
5418		 *
5419		 * Fail a stripe at a time on every retry except the
5420		 * stripe under reconstruction.
5421		 */
5422		ret = map->num_stripes;
5423	else
5424		ret = 1;
5425	free_extent_map(em);
5426
5427	down_read(&fs_info->dev_replace.rwsem);
5428	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5429	    fs_info->dev_replace.tgtdev)
5430		ret++;
5431	up_read(&fs_info->dev_replace.rwsem);
5432
5433	return ret;
5434}
5435
5436unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5437				    u64 logical)
5438{
5439	struct extent_map *em;
5440	struct map_lookup *map;
5441	unsigned long len = fs_info->sectorsize;
5442
 
 
 
5443	em = btrfs_get_chunk_map(fs_info, logical, len);
5444
5445	if (!WARN_ON(IS_ERR(em))) {
5446		map = em->map_lookup;
5447		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5448			len = map->stripe_len * nr_data_stripes(map);
5449		free_extent_map(em);
5450	}
5451	return len;
5452}
5453
5454int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5455{
5456	struct extent_map *em;
5457	struct map_lookup *map;
5458	int ret = 0;
5459
 
 
 
5460	em = btrfs_get_chunk_map(fs_info, logical, len);
5461
5462	if(!WARN_ON(IS_ERR(em))) {
5463		map = em->map_lookup;
5464		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5465			ret = 1;
5466		free_extent_map(em);
5467	}
5468	return ret;
5469}
5470
5471static int find_live_mirror(struct btrfs_fs_info *fs_info,
5472			    struct map_lookup *map, int first,
5473			    int dev_replace_is_ongoing)
5474{
5475	int i;
5476	int num_stripes;
5477	int preferred_mirror;
5478	int tolerance;
5479	struct btrfs_device *srcdev;
5480
5481	ASSERT((map->type &
5482		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5483
5484	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5485		num_stripes = map->sub_stripes;
5486	else
5487		num_stripes = map->num_stripes;
5488
5489	preferred_mirror = first + current->pid % num_stripes;
 
 
 
 
 
 
 
 
 
 
 
5490
5491	if (dev_replace_is_ongoing &&
5492	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5493	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5494		srcdev = fs_info->dev_replace.srcdev;
5495	else
5496		srcdev = NULL;
5497
5498	/*
5499	 * try to avoid the drive that is the source drive for a
5500	 * dev-replace procedure, only choose it if no other non-missing
5501	 * mirror is available
5502	 */
5503	for (tolerance = 0; tolerance < 2; tolerance++) {
5504		if (map->stripes[preferred_mirror].dev->bdev &&
5505		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5506			return preferred_mirror;
5507		for (i = first; i < first + num_stripes; i++) {
5508			if (map->stripes[i].dev->bdev &&
5509			    (tolerance || map->stripes[i].dev != srcdev))
5510				return i;
5511		}
5512	}
5513
5514	/* we couldn't find one that doesn't fail.  Just return something
5515	 * and the io error handling code will clean up eventually
5516	 */
5517	return preferred_mirror;
5518}
5519
5520/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5521static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5522{
5523	int i;
5524	int again = 1;
5525
5526	while (again) {
5527		again = 0;
5528		for (i = 0; i < num_stripes - 1; i++) {
5529			/* Swap if parity is on a smaller index */
5530			if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5531				swap(bbio->stripes[i], bbio->stripes[i + 1]);
5532				swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5533				again = 1;
5534			}
5535		}
5536	}
5537}
5538
5539static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5540{
5541	struct btrfs_bio *bbio = kzalloc(
5542		 /* the size of the btrfs_bio */
5543		sizeof(struct btrfs_bio) +
5544		/* plus the variable array for the stripes */
5545		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5546		/* plus the variable array for the tgt dev */
 
 
5547		sizeof(int) * (real_stripes) +
5548		/*
5549		 * plus the raid_map, which includes both the tgt dev
5550		 * and the stripes
5551		 */
5552		sizeof(u64) * (total_stripes),
5553		GFP_NOFS|__GFP_NOFAIL);
 
 
 
5554
5555	atomic_set(&bbio->error, 0);
5556	refcount_set(&bbio->refs, 1);
5557
5558	bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5559	bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
 
5560
5561	return bbio;
5562}
5563
5564void btrfs_get_bbio(struct btrfs_bio *bbio)
5565{
5566	WARN_ON(!refcount_read(&bbio->refs));
5567	refcount_inc(&bbio->refs);
5568}
5569
5570void btrfs_put_bbio(struct btrfs_bio *bbio)
5571{
5572	if (!bbio)
5573		return;
5574	if (refcount_dec_and_test(&bbio->refs))
5575		kfree(bbio);
5576}
5577
5578/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5579/*
5580 * Please note that, discard won't be sent to target device of device
5581 * replace.
5582 */
5583static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5584					 u64 logical, u64 *length_ret,
5585					 struct btrfs_bio **bbio_ret)
5586{
5587	struct extent_map *em;
5588	struct map_lookup *map;
5589	struct btrfs_bio *bbio;
5590	u64 length = *length_ret;
5591	u64 offset;
5592	u64 stripe_nr;
5593	u64 stripe_nr_end;
5594	u64 stripe_end_offset;
5595	u64 stripe_cnt;
5596	u64 stripe_len;
5597	u64 stripe_offset;
5598	u64 num_stripes;
5599	u32 stripe_index;
5600	u32 factor = 0;
5601	u32 sub_stripes = 0;
5602	u64 stripes_per_dev = 0;
5603	u32 remaining_stripes = 0;
5604	u32 last_stripe = 0;
5605	int ret = 0;
5606	int i;
5607
5608	/* discard always return a bbio */
5609	ASSERT(bbio_ret);
5610
5611	em = btrfs_get_chunk_map(fs_info, logical, length);
5612	if (IS_ERR(em))
5613		return PTR_ERR(em);
5614
5615	map = em->map_lookup;
 
5616	/* we don't discard raid56 yet */
5617	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5618		ret = -EOPNOTSUPP;
5619		goto out;
5620	}
5621
5622	offset = logical - em->start;
5623	length = min_t(u64, em->start + em->len - logical, length);
5624	*length_ret = length;
5625
5626	stripe_len = map->stripe_len;
5627	/*
5628	 * stripe_nr counts the total number of stripes we have to stride
5629	 * to get to this block
5630	 */
5631	stripe_nr = div64_u64(offset, stripe_len);
5632
5633	/* stripe_offset is the offset of this block in its stripe */
5634	stripe_offset = offset - stripe_nr * stripe_len;
5635
5636	stripe_nr_end = round_up(offset + length, map->stripe_len);
5637	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5638	stripe_cnt = stripe_nr_end - stripe_nr;
5639	stripe_end_offset = stripe_nr_end * map->stripe_len -
5640			    (offset + length);
5641	/*
5642	 * after this, stripe_nr is the number of stripes on this
5643	 * device we have to walk to find the data, and stripe_index is
5644	 * the number of our device in the stripe array
5645	 */
5646	num_stripes = 1;
5647	stripe_index = 0;
5648	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5649			 BTRFS_BLOCK_GROUP_RAID10)) {
5650		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5651			sub_stripes = 1;
5652		else
5653			sub_stripes = map->sub_stripes;
5654
5655		factor = map->num_stripes / sub_stripes;
5656		num_stripes = min_t(u64, map->num_stripes,
5657				    sub_stripes * stripe_cnt);
5658		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5659		stripe_index *= sub_stripes;
5660		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5661					      &remaining_stripes);
5662		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5663		last_stripe *= sub_stripes;
5664	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5665				BTRFS_BLOCK_GROUP_DUP)) {
5666		num_stripes = map->num_stripes;
5667	} else {
5668		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5669					&stripe_index);
5670	}
5671
5672	bbio = alloc_btrfs_bio(num_stripes, 0);
5673	if (!bbio) {
5674		ret = -ENOMEM;
5675		goto out;
5676	}
5677
5678	for (i = 0; i < num_stripes; i++) {
5679		bbio->stripes[i].physical =
5680			map->stripes[stripe_index].physical +
5681			stripe_offset + stripe_nr * map->stripe_len;
5682		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5683
5684		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5685				 BTRFS_BLOCK_GROUP_RAID10)) {
5686			bbio->stripes[i].length = stripes_per_dev *
5687				map->stripe_len;
5688
5689			if (i / sub_stripes < remaining_stripes)
5690				bbio->stripes[i].length +=
5691					map->stripe_len;
5692
5693			/*
5694			 * Special for the first stripe and
5695			 * the last stripe:
5696			 *
5697			 * |-------|...|-------|
5698			 *     |----------|
5699			 *    off     end_off
5700			 */
5701			if (i < sub_stripes)
5702				bbio->stripes[i].length -=
5703					stripe_offset;
5704
5705			if (stripe_index >= last_stripe &&
5706			    stripe_index <= (last_stripe +
5707					     sub_stripes - 1))
5708				bbio->stripes[i].length -=
5709					stripe_end_offset;
5710
5711			if (i == sub_stripes - 1)
5712				stripe_offset = 0;
5713		} else {
5714			bbio->stripes[i].length = length;
5715		}
5716
5717		stripe_index++;
5718		if (stripe_index == map->num_stripes) {
5719			stripe_index = 0;
5720			stripe_nr++;
5721		}
5722	}
5723
5724	*bbio_ret = bbio;
5725	bbio->map_type = map->type;
5726	bbio->num_stripes = num_stripes;
5727out:
5728	free_extent_map(em);
5729	return ret;
 
 
 
5730}
5731
5732/*
5733 * In dev-replace case, for repair case (that's the only case where the mirror
5734 * is selected explicitly when calling btrfs_map_block), blocks left of the
5735 * left cursor can also be read from the target drive.
5736 *
5737 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5738 * array of stripes.
5739 * For READ, it also needs to be supported using the same mirror number.
5740 *
5741 * If the requested block is not left of the left cursor, EIO is returned. This
5742 * can happen because btrfs_num_copies() returns one more in the dev-replace
5743 * case.
5744 */
5745static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5746					 u64 logical, u64 length,
5747					 u64 srcdev_devid, int *mirror_num,
5748					 u64 *physical)
5749{
5750	struct btrfs_bio *bbio = NULL;
5751	int num_stripes;
5752	int index_srcdev = 0;
5753	int found = 0;
5754	u64 physical_of_found = 0;
5755	int i;
5756	int ret = 0;
5757
5758	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5759				logical, &length, &bbio, 0, 0);
5760	if (ret) {
5761		ASSERT(bbio == NULL);
5762		return ret;
5763	}
5764
5765	num_stripes = bbio->num_stripes;
5766	if (*mirror_num > num_stripes) {
5767		/*
5768		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5769		 * that means that the requested area is not left of the left
5770		 * cursor
5771		 */
5772		btrfs_put_bbio(bbio);
5773		return -EIO;
5774	}
5775
5776	/*
5777	 * process the rest of the function using the mirror_num of the source
5778	 * drive. Therefore look it up first.  At the end, patch the device
5779	 * pointer to the one of the target drive.
5780	 */
5781	for (i = 0; i < num_stripes; i++) {
5782		if (bbio->stripes[i].dev->devid != srcdev_devid)
5783			continue;
5784
5785		/*
5786		 * In case of DUP, in order to keep it simple, only add the
5787		 * mirror with the lowest physical address
5788		 */
5789		if (found &&
5790		    physical_of_found <= bbio->stripes[i].physical)
5791			continue;
5792
5793		index_srcdev = i;
5794		found = 1;
5795		physical_of_found = bbio->stripes[i].physical;
5796	}
5797
5798	btrfs_put_bbio(bbio);
5799
5800	ASSERT(found);
5801	if (!found)
5802		return -EIO;
5803
5804	*mirror_num = index_srcdev + 1;
5805	*physical = physical_of_found;
5806	return ret;
5807}
5808
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5809static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5810				      struct btrfs_bio **bbio_ret,
5811				      struct btrfs_dev_replace *dev_replace,
 
5812				      int *num_stripes_ret, int *max_errors_ret)
5813{
5814	struct btrfs_bio *bbio = *bbio_ret;
5815	u64 srcdev_devid = dev_replace->srcdev->devid;
5816	int tgtdev_indexes = 0;
5817	int num_stripes = *num_stripes_ret;
5818	int max_errors = *max_errors_ret;
5819	int i;
5820
5821	if (op == BTRFS_MAP_WRITE) {
5822		int index_where_to_add;
5823
5824		/*
 
 
 
 
 
 
 
5825		 * duplicate the write operations while the dev replace
5826		 * procedure is running. Since the copying of the old disk to
5827		 * the new disk takes place at run time while the filesystem is
5828		 * mounted writable, the regular write operations to the old
5829		 * disk have to be duplicated to go to the new disk as well.
5830		 *
5831		 * Note that device->missing is handled by the caller, and that
5832		 * the write to the old disk is already set up in the stripes
5833		 * array.
5834		 */
5835		index_where_to_add = num_stripes;
5836		for (i = 0; i < num_stripes; i++) {
5837			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5838				/* write to new disk, too */
5839				struct btrfs_bio_stripe *new =
5840					bbio->stripes + index_where_to_add;
5841				struct btrfs_bio_stripe *old =
5842					bbio->stripes + i;
5843
5844				new->physical = old->physical;
5845				new->length = old->length;
5846				new->dev = dev_replace->tgtdev;
5847				bbio->tgtdev_map[i] = index_where_to_add;
5848				index_where_to_add++;
5849				max_errors++;
5850				tgtdev_indexes++;
5851			}
5852		}
5853		num_stripes = index_where_to_add;
5854	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5855		int index_srcdev = 0;
5856		int found = 0;
5857		u64 physical_of_found = 0;
5858
5859		/*
5860		 * During the dev-replace procedure, the target drive can also
5861		 * be used to read data in case it is needed to repair a corrupt
5862		 * block elsewhere. This is possible if the requested area is
5863		 * left of the left cursor. In this area, the target drive is a
5864		 * full copy of the source drive.
5865		 */
5866		for (i = 0; i < num_stripes; i++) {
5867			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5868				/*
5869				 * In case of DUP, in order to keep it simple,
5870				 * only add the mirror with the lowest physical
5871				 * address
5872				 */
5873				if (found &&
5874				    physical_of_found <=
5875				     bbio->stripes[i].physical)
5876					continue;
5877				index_srcdev = i;
5878				found = 1;
5879				physical_of_found = bbio->stripes[i].physical;
5880			}
5881		}
5882		if (found) {
5883			struct btrfs_bio_stripe *tgtdev_stripe =
5884				bbio->stripes + num_stripes;
5885
5886			tgtdev_stripe->physical = physical_of_found;
5887			tgtdev_stripe->length =
5888				bbio->stripes[index_srcdev].length;
5889			tgtdev_stripe->dev = dev_replace->tgtdev;
5890			bbio->tgtdev_map[index_srcdev] = num_stripes;
5891
5892			tgtdev_indexes++;
5893			num_stripes++;
5894		}
5895	}
5896
5897	*num_stripes_ret = num_stripes;
5898	*max_errors_ret = max_errors;
5899	bbio->num_tgtdevs = tgtdev_indexes;
5900	*bbio_ret = bbio;
5901}
5902
5903static bool need_full_stripe(enum btrfs_map_op op)
5904{
5905	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5906}
5907
5908/*
5909 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5910 *		       tuple. This information is used to calculate how big a
5911 *		       particular bio can get before it straddles a stripe.
5912 *
5913 * @fs_info - the filesystem
5914 * @logical - address that we want to figure out the geometry of
5915 * @len	    - the length of IO we are going to perform, starting at @logical
5916 * @op      - type of operation - write or read
5917 * @io_geom - pointer used to return values
5918 *
5919 * Returns < 0 in case a chunk for the given logical address cannot be found,
5920 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5921 */
5922int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5923			u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
 
5924{
5925	struct extent_map *em;
5926	struct map_lookup *map;
 
5927	u64 offset;
5928	u64 stripe_offset;
5929	u64 stripe_nr;
5930	u64 stripe_len;
5931	u64 raid56_full_stripe_start = (u64)-1;
5932	int data_stripes;
5933	int ret = 0;
5934
5935	ASSERT(op != BTRFS_MAP_DISCARD);
5936
5937	em = btrfs_get_chunk_map(fs_info, logical, len);
5938	if (IS_ERR(em))
5939		return PTR_ERR(em);
5940
5941	map = em->map_lookup;
5942	/* Offset of this logical address in the chunk */
5943	offset = logical - em->start;
5944	/* Len of a stripe in a chunk */
5945	stripe_len = map->stripe_len;
5946	/* Stripe wher this block falls in */
5947	stripe_nr = div64_u64(offset, stripe_len);
5948	/* Offset of stripe in the chunk */
5949	stripe_offset = stripe_nr * stripe_len;
5950	if (offset < stripe_offset) {
5951		btrfs_crit(fs_info,
5952"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5953			stripe_offset, offset, em->start, logical, stripe_len);
5954		ret = -EINVAL;
5955		goto out;
5956	}
5957
5958	/* stripe_offset is the offset of this block in its stripe */
5959	stripe_offset = offset - stripe_offset;
5960	data_stripes = nr_data_stripes(map);
5961
5962	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
 
5963		u64 max_len = stripe_len - stripe_offset;
5964
5965		/*
5966		 * In case of raid56, we need to know the stripe aligned start
5967		 */
5968		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5969			unsigned long full_stripe_len = stripe_len * data_stripes;
5970			raid56_full_stripe_start = offset;
5971
5972			/*
5973			 * Allow a write of a full stripe, but make sure we
5974			 * don't allow straddling of stripes
5975			 */
5976			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5977					full_stripe_len);
5978			raid56_full_stripe_start *= full_stripe_len;
5979
5980			/*
5981			 * For writes to RAID[56], allow a full stripeset across
5982			 * all disks. For other RAID types and for RAID[56]
5983			 * reads, just allow a single stripe (on a single disk).
5984			 */
5985			if (op == BTRFS_MAP_WRITE) {
5986				max_len = stripe_len * data_stripes -
5987					  (offset - raid56_full_stripe_start);
5988			}
5989		}
5990		len = min_t(u64, em->len - offset, max_len);
5991	} else {
5992		len = em->len - offset;
5993	}
5994
5995	io_geom->len = len;
5996	io_geom->offset = offset;
5997	io_geom->stripe_len = stripe_len;
5998	io_geom->stripe_nr = stripe_nr;
5999	io_geom->stripe_offset = stripe_offset;
6000	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6001
6002out:
6003	/* once for us */
6004	free_extent_map(em);
6005	return ret;
 
 
 
 
 
6006}
6007
6008static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6009			     enum btrfs_map_op op,
6010			     u64 logical, u64 *length,
6011			     struct btrfs_bio **bbio_ret,
6012			     int mirror_num, int need_raid_map)
6013{
6014	struct extent_map *em;
6015	struct map_lookup *map;
6016	u64 stripe_offset;
6017	u64 stripe_nr;
6018	u64 stripe_len;
6019	u32 stripe_index;
6020	int data_stripes;
6021	int i;
6022	int ret = 0;
 
6023	int num_stripes;
6024	int max_errors = 0;
6025	int tgtdev_indexes = 0;
6026	struct btrfs_bio *bbio = NULL;
6027	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6028	int dev_replace_is_ongoing = 0;
6029	int num_alloc_stripes;
6030	int patch_the_first_stripe_for_dev_replace = 0;
6031	u64 physical_to_patch_in_first_stripe = 0;
6032	u64 raid56_full_stripe_start = (u64)-1;
6033	struct btrfs_io_geometry geom;
6034
6035	ASSERT(bbio_ret);
6036	ASSERT(op != BTRFS_MAP_DISCARD);
6037
6038	ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
 
 
 
6039	if (ret < 0)
6040		return ret;
6041
6042	em = btrfs_get_chunk_map(fs_info, logical, *length);
6043	ASSERT(!IS_ERR(em));
6044	map = em->map_lookup;
6045
6046	*length = geom.len;
6047	stripe_len = geom.stripe_len;
6048	stripe_nr = geom.stripe_nr;
6049	stripe_offset = geom.stripe_offset;
6050	raid56_full_stripe_start = geom.raid56_stripe_offset;
6051	data_stripes = nr_data_stripes(map);
6052
6053	down_read(&dev_replace->rwsem);
6054	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6055	/*
6056	 * Hold the semaphore for read during the whole operation, write is
6057	 * requested at commit time but must wait.
6058	 */
6059	if (!dev_replace_is_ongoing)
6060		up_read(&dev_replace->rwsem);
6061
6062	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6063	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6064		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6065						    dev_replace->srcdev->devid,
6066						    &mirror_num,
6067					    &physical_to_patch_in_first_stripe);
6068		if (ret)
6069			goto out;
6070		else
6071			patch_the_first_stripe_for_dev_replace = 1;
6072	} else if (mirror_num > map->num_stripes) {
6073		mirror_num = 0;
6074	}
6075
6076	num_stripes = 1;
6077	stripe_index = 0;
6078	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6079		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6080				&stripe_index);
6081		if (!need_full_stripe(op))
6082			mirror_num = 1;
6083	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6084		if (need_full_stripe(op))
6085			num_stripes = map->num_stripes;
6086		else if (mirror_num)
6087			stripe_index = mirror_num - 1;
6088		else {
6089			stripe_index = find_live_mirror(fs_info, map, 0,
6090					    dev_replace_is_ongoing);
6091			mirror_num = stripe_index + 1;
6092		}
6093
6094	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6095		if (need_full_stripe(op)) {
6096			num_stripes = map->num_stripes;
6097		} else if (mirror_num) {
6098			stripe_index = mirror_num - 1;
6099		} else {
6100			mirror_num = 1;
6101		}
6102
6103	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6104		u32 factor = map->num_stripes / map->sub_stripes;
6105
6106		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6107		stripe_index *= map->sub_stripes;
6108
6109		if (need_full_stripe(op))
6110			num_stripes = map->sub_stripes;
6111		else if (mirror_num)
6112			stripe_index += mirror_num - 1;
6113		else {
6114			int old_stripe_index = stripe_index;
6115			stripe_index = find_live_mirror(fs_info, map,
6116					      stripe_index,
6117					      dev_replace_is_ongoing);
6118			mirror_num = stripe_index - old_stripe_index + 1;
6119		}
6120
6121	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 
6122		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6123			/* push stripe_nr back to the start of the full stripe */
6124			stripe_nr = div64_u64(raid56_full_stripe_start,
6125					stripe_len * data_stripes);
6126
6127			/* RAID[56] write or recovery. Return all stripes */
6128			num_stripes = map->num_stripes;
6129			max_errors = nr_parity_stripes(map);
6130
6131			*length = map->stripe_len;
 
 
 
6132			stripe_index = 0;
6133			stripe_offset = 0;
6134		} else {
6135			/*
6136			 * Mirror #0 or #1 means the original data block.
6137			 * Mirror #2 is RAID5 parity block.
6138			 * Mirror #3 is RAID6 Q block.
6139			 */
6140			stripe_nr = div_u64_rem(stripe_nr,
6141					data_stripes, &stripe_index);
6142			if (mirror_num > 1)
6143				stripe_index = data_stripes + mirror_num - 2;
6144
6145			/* We distribute the parity blocks across stripes */
6146			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6147					&stripe_index);
6148			if (!need_full_stripe(op) && mirror_num <= 1)
6149				mirror_num = 1;
6150		}
6151	} else {
6152		/*
6153		 * after this, stripe_nr is the number of stripes on this
6154		 * device we have to walk to find the data, and stripe_index is
6155		 * the number of our device in the stripe array
6156		 */
6157		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6158				&stripe_index);
6159		mirror_num = stripe_index + 1;
6160	}
6161	if (stripe_index >= map->num_stripes) {
6162		btrfs_crit(fs_info,
6163			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6164			   stripe_index, map->num_stripes);
6165		ret = -EINVAL;
6166		goto out;
6167	}
6168
6169	num_alloc_stripes = num_stripes;
6170	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6171		if (op == BTRFS_MAP_WRITE)
6172			num_alloc_stripes <<= 1;
6173		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6174			num_alloc_stripes++;
6175		tgtdev_indexes = num_stripes;
6176	}
6177
6178	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6179	if (!bbio) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6180		ret = -ENOMEM;
6181		goto out;
6182	}
6183
6184	for (i = 0; i < num_stripes; i++) {
6185		bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6186			stripe_offset + stripe_nr * map->stripe_len;
6187		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6188		stripe_index++;
6189	}
6190
6191	/* build raid_map */
6192	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6193	    (need_full_stripe(op) || mirror_num > 1)) {
6194		u64 tmp;
6195		unsigned rot;
6196
6197		/* Work out the disk rotation on this stripe-set */
6198		div_u64_rem(stripe_nr, num_stripes, &rot);
6199
6200		/* Fill in the logical address of each stripe */
6201		tmp = stripe_nr * data_stripes;
6202		for (i = 0; i < data_stripes; i++)
6203			bbio->raid_map[(i+rot) % num_stripes] =
6204				em->start + (tmp + i) * map->stripe_len;
6205
6206		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6207		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6208			bbio->raid_map[(i+rot+1) % num_stripes] =
6209				RAID6_Q_STRIPE;
6210
6211		sort_parity_stripes(bbio, num_stripes);
6212	}
6213
6214	if (need_full_stripe(op))
6215		max_errors = btrfs_chunk_max_errors(map);
6216
6217	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6218	    need_full_stripe(op)) {
6219		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6220					  &max_errors);
6221	}
6222
6223	*bbio_ret = bbio;
6224	bbio->map_type = map->type;
6225	bbio->num_stripes = num_stripes;
6226	bbio->max_errors = max_errors;
6227	bbio->mirror_num = mirror_num;
6228
6229	/*
6230	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6231	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6232	 * available as a mirror
6233	 */
6234	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6235		WARN_ON(num_stripes > 1);
6236		bbio->stripes[0].dev = dev_replace->tgtdev;
6237		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6238		bbio->mirror_num = map->num_stripes + 1;
6239	}
6240out:
6241	if (dev_replace_is_ongoing) {
6242		lockdep_assert_held(&dev_replace->rwsem);
6243		/* Unlock and let waiting writers proceed */
6244		up_read(&dev_replace->rwsem);
6245	}
6246	free_extent_map(em);
6247	return ret;
6248}
6249
6250int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6251		      u64 logical, u64 *length,
6252		      struct btrfs_bio **bbio_ret, int mirror_num)
6253{
6254	if (op == BTRFS_MAP_DISCARD)
6255		return __btrfs_map_block_for_discard(fs_info, logical,
6256						     length, bbio_ret);
6257
6258	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6259				 mirror_num, 0);
6260}
6261
6262/* For Scrub/replace */
6263int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6264		     u64 logical, u64 *length,
6265		     struct btrfs_bio **bbio_ret)
6266{
6267	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
 
6268}
6269
6270static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
 
6271{
6272	bio->bi_private = bbio->private;
6273	bio->bi_end_io = bbio->end_io;
6274	bio_endio(bio);
6275
6276	btrfs_put_bbio(bbio);
6277}
6278
6279static void btrfs_end_bio(struct bio *bio)
 
6280{
6281	struct btrfs_bio *bbio = bio->bi_private;
6282	int is_orig_bio = 0;
6283
6284	if (bio->bi_status) {
6285		atomic_inc(&bbio->error);
6286		if (bio->bi_status == BLK_STS_IOERR ||
6287		    bio->bi_status == BLK_STS_TARGET) {
6288			struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6289
6290			ASSERT(dev->bdev);
6291			if (bio_op(bio) == REQ_OP_WRITE)
6292				btrfs_dev_stat_inc_and_print(dev,
6293						BTRFS_DEV_STAT_WRITE_ERRS);
6294			else if (!(bio->bi_opf & REQ_RAHEAD))
6295				btrfs_dev_stat_inc_and_print(dev,
6296						BTRFS_DEV_STAT_READ_ERRS);
6297			if (bio->bi_opf & REQ_PREFLUSH)
6298				btrfs_dev_stat_inc_and_print(dev,
6299						BTRFS_DEV_STAT_FLUSH_ERRS);
6300		}
6301	}
6302
6303	if (bio == bbio->orig_bio)
6304		is_orig_bio = 1;
6305
6306	btrfs_bio_counter_dec(bbio->fs_info);
6307
6308	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6309		if (!is_orig_bio) {
6310			bio_put(bio);
6311			bio = bbio->orig_bio;
6312		}
6313
6314		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6315		/* only send an error to the higher layers if it is
6316		 * beyond the tolerance of the btrfs bio
6317		 */
6318		if (atomic_read(&bbio->error) > bbio->max_errors) {
6319			bio->bi_status = BLK_STS_IOERR;
6320		} else {
6321			/*
6322			 * this bio is actually up to date, we didn't
6323			 * go over the max number of errors
6324			 */
6325			bio->bi_status = BLK_STS_OK;
6326		}
6327
6328		btrfs_end_bbio(bbio, bio);
6329	} else if (!is_orig_bio) {
6330		bio_put(bio);
6331	}
6332}
6333
6334static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6335			      u64 physical, struct btrfs_device *dev)
6336{
6337	struct btrfs_fs_info *fs_info = bbio->fs_info;
6338
6339	bio->bi_private = bbio;
6340	btrfs_io_bio(bio)->device = dev;
6341	bio->bi_end_io = btrfs_end_bio;
6342	bio->bi_iter.bi_sector = physical >> 9;
6343	btrfs_debug_in_rcu(fs_info,
6344	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6345		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6346		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6347		dev->devid, bio->bi_iter.bi_size);
6348	bio_set_dev(bio, dev->bdev);
6349
6350	btrfs_bio_counter_inc_noblocked(fs_info);
6351
6352	btrfsic_submit_bio(bio);
6353}
6354
6355static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6356{
6357	atomic_inc(&bbio->error);
6358	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6359		/* Should be the original bio. */
6360		WARN_ON(bio != bbio->orig_bio);
6361
6362		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6363		bio->bi_iter.bi_sector = logical >> 9;
6364		if (atomic_read(&bbio->error) > bbio->max_errors)
6365			bio->bi_status = BLK_STS_IOERR;
6366		else
6367			bio->bi_status = BLK_STS_OK;
6368		btrfs_end_bbio(bbio, bio);
6369	}
6370}
6371
6372blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6373			   int mirror_num)
6374{
6375	struct btrfs_device *dev;
6376	struct bio *first_bio = bio;
6377	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6378	u64 length = 0;
6379	u64 map_length;
6380	int ret;
6381	int dev_nr;
6382	int total_devs;
6383	struct btrfs_bio *bbio = NULL;
6384
6385	length = bio->bi_iter.bi_size;
6386	map_length = length;
6387
6388	btrfs_bio_counter_inc_blocked(fs_info);
6389	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6390				&map_length, &bbio, mirror_num, 1);
6391	if (ret) {
6392		btrfs_bio_counter_dec(fs_info);
6393		return errno_to_blk_status(ret);
6394	}
6395
6396	total_devs = bbio->num_stripes;
6397	bbio->orig_bio = first_bio;
6398	bbio->private = first_bio->bi_private;
6399	bbio->end_io = first_bio->bi_end_io;
6400	bbio->fs_info = fs_info;
6401	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6402
6403	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6404	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6405		/* In this case, map_length has been set to the length of
6406		   a single stripe; not the whole write */
6407		if (bio_op(bio) == REQ_OP_WRITE) {
6408			ret = raid56_parity_write(fs_info, bio, bbio,
6409						  map_length);
6410		} else {
6411			ret = raid56_parity_recover(fs_info, bio, bbio,
6412						    map_length, mirror_num, 1);
6413		}
6414
6415		btrfs_bio_counter_dec(fs_info);
6416		return errno_to_blk_status(ret);
6417	}
6418
6419	if (map_length < length) {
6420		btrfs_crit(fs_info,
6421			   "mapping failed logical %llu bio len %llu len %llu",
6422			   logical, length, map_length);
6423		BUG();
6424	}
6425
6426	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6427		dev = bbio->stripes[dev_nr].dev;
6428		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6429						   &dev->dev_state) ||
6430		    (bio_op(first_bio) == REQ_OP_WRITE &&
6431		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6432			bbio_error(bbio, first_bio, logical);
6433			continue;
6434		}
6435
6436		if (dev_nr < total_devs - 1)
6437			bio = btrfs_bio_clone(first_bio);
6438		else
6439			bio = first_bio;
6440
6441		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6442	}
6443	btrfs_bio_counter_dec(fs_info);
6444	return BLK_STS_OK;
6445}
6446
6447/*
6448 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6449 * return NULL.
6450 *
6451 * If devid and uuid are both specified, the match must be exact, otherwise
6452 * only devid is used.
6453 *
6454 * If @seed is true, traverse through the seed devices.
6455 */
6456struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6457				       u64 devid, u8 *uuid, u8 *fsid,
6458				       bool seed)
6459{
6460	struct btrfs_device *device;
 
6461
6462	while (fs_devices) {
6463		if (!fsid ||
6464		    !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6465			list_for_each_entry(device, &fs_devices->devices,
6466					    dev_list) {
6467				if (device->devid == devid &&
6468				    (!uuid || memcmp(device->uuid, uuid,
6469						     BTRFS_UUID_SIZE) == 0))
6470					return device;
6471			}
 
 
 
6472		}
6473		if (seed)
6474			fs_devices = fs_devices->seed;
6475		else
6476			return NULL;
6477	}
 
6478	return NULL;
6479}
6480
6481static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6482					    u64 devid, u8 *dev_uuid)
6483{
6484	struct btrfs_device *device;
6485	unsigned int nofs_flag;
6486
6487	/*
6488	 * We call this under the chunk_mutex, so we want to use NOFS for this
6489	 * allocation, however we don't want to change btrfs_alloc_device() to
6490	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6491	 * places.
6492	 */
 
6493	nofs_flag = memalloc_nofs_save();
6494	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6495	memalloc_nofs_restore(nofs_flag);
6496	if (IS_ERR(device))
6497		return device;
6498
6499	list_add(&device->dev_list, &fs_devices->devices);
6500	device->fs_devices = fs_devices;
6501	fs_devices->num_devices++;
6502
6503	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6504	fs_devices->missing_devices++;
6505
6506	return device;
6507}
6508
6509/**
6510 * btrfs_alloc_device - allocate struct btrfs_device
 
6511 * @fs_info:	used only for generating a new devid, can be NULL if
6512 *		devid is provided (i.e. @devid != NULL).
6513 * @devid:	a pointer to devid for this device.  If NULL a new devid
6514 *		is generated.
6515 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6516 *		is generated.
 
6517 *
6518 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6519 * on error.  Returned struct is not linked onto any lists and must be
6520 * destroyed with btrfs_free_device.
6521 */
6522struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6523					const u64 *devid,
6524					const u8 *uuid)
6525{
6526	struct btrfs_device *dev;
6527	u64 tmp;
6528
6529	if (WARN_ON(!devid && !fs_info))
6530		return ERR_PTR(-EINVAL);
6531
6532	dev = __alloc_device();
6533	if (IS_ERR(dev))
6534		return dev;
 
 
 
 
 
 
 
 
6535
6536	if (devid)
6537		tmp = *devid;
6538	else {
6539		int ret;
6540
6541		ret = find_next_devid(fs_info, &tmp);
6542		if (ret) {
6543			btrfs_free_device(dev);
6544			return ERR_PTR(ret);
6545		}
6546	}
6547	dev->devid = tmp;
6548
6549	if (uuid)
6550		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6551	else
6552		generate_random_uuid(dev->uuid);
6553
 
 
 
 
 
 
 
 
 
 
 
6554	return dev;
6555}
6556
6557static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6558					u64 devid, u8 *uuid, bool error)
6559{
6560	if (error)
6561		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6562			      devid, uuid);
6563	else
6564		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6565			      devid, uuid);
6566}
6567
6568static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6569{
6570	int index = btrfs_bg_flags_to_raid_index(type);
6571	int ncopies = btrfs_raid_array[index].ncopies;
6572	const int nparity = btrfs_raid_array[index].nparity;
6573	int data_stripes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6574
6575	if (nparity)
6576		data_stripes = num_stripes - nparity;
6577	else
6578		data_stripes = num_stripes / ncopies;
 
 
 
6579
6580	return div_u64(chunk_len, data_stripes);
6581}
6582
6583static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6584			  struct btrfs_chunk *chunk)
6585{
 
6586	struct btrfs_fs_info *fs_info = leaf->fs_info;
6587	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6588	struct map_lookup *map;
6589	struct extent_map *em;
6590	u64 logical;
6591	u64 length;
6592	u64 devid;
 
6593	u8 uuid[BTRFS_UUID_SIZE];
 
6594	int num_stripes;
6595	int ret;
6596	int i;
6597
6598	logical = key->offset;
6599	length = btrfs_chunk_length(leaf, chunk);
 
 
6600	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6601
 
 
 
 
 
 
 
6602	/*
6603	 * Only need to verify chunk item if we're reading from sys chunk array,
6604	 * as chunk item in tree block is already verified by tree-checker.
6605	 */
6606	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6607		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6608		if (ret)
6609			return ret;
6610	}
6611
6612	read_lock(&map_tree->lock);
6613	em = lookup_extent_mapping(map_tree, logical, 1);
6614	read_unlock(&map_tree->lock);
6615
6616	/* already mapped? */
6617	if (em && em->start <= logical && em->start + em->len > logical) {
6618		free_extent_map(em);
6619		return 0;
6620	} else if (em) {
6621		free_extent_map(em);
6622	}
6623
6624	em = alloc_extent_map();
6625	if (!em)
6626		return -ENOMEM;
6627	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6628	if (!map) {
6629		free_extent_map(em);
6630		return -ENOMEM;
6631	}
6632
6633	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6634	em->map_lookup = map;
6635	em->start = logical;
6636	em->len = length;
6637	em->orig_start = 0;
6638	em->block_start = 0;
6639	em->block_len = em->len;
6640
6641	map->num_stripes = num_stripes;
6642	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6643	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6644	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6645	map->type = btrfs_chunk_type(leaf, chunk);
6646	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
 
 
 
 
 
 
 
 
6647	map->verified_stripes = 0;
6648	em->orig_block_len = calc_stripe_length(map->type, em->len,
6649						map->num_stripes);
6650	for (i = 0; i < num_stripes; i++) {
6651		map->stripes[i].physical =
6652			btrfs_stripe_offset_nr(leaf, chunk, i);
6653		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
 
6654		read_extent_buffer(leaf, uuid, (unsigned long)
6655				   btrfs_stripe_dev_uuid_nr(chunk, i),
6656				   BTRFS_UUID_SIZE);
6657		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6658							devid, uuid, NULL, true);
6659		if (!map->stripes[i].dev &&
6660		    !btrfs_test_opt(fs_info, DEGRADED)) {
6661			free_extent_map(em);
6662			btrfs_report_missing_device(fs_info, devid, uuid, true);
6663			return -ENOENT;
6664		}
6665		if (!map->stripes[i].dev) {
6666			map->stripes[i].dev =
6667				add_missing_dev(fs_info->fs_devices, devid,
6668						uuid);
6669			if (IS_ERR(map->stripes[i].dev)) {
 
6670				free_extent_map(em);
6671				btrfs_err(fs_info,
6672					"failed to init missing dev %llu: %ld",
6673					devid, PTR_ERR(map->stripes[i].dev));
6674				return PTR_ERR(map->stripes[i].dev);
6675			}
6676			btrfs_report_missing_device(fs_info, devid, uuid, false);
6677		}
 
6678		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6679				&(map->stripes[i].dev->dev_state));
6680
6681	}
6682
6683	write_lock(&map_tree->lock);
6684	ret = add_extent_mapping(map_tree, em, 0);
6685	write_unlock(&map_tree->lock);
6686	if (ret < 0) {
6687		btrfs_err(fs_info,
6688			  "failed to add chunk map, start=%llu len=%llu: %d",
6689			  em->start, em->len, ret);
6690	}
6691	free_extent_map(em);
6692
6693	return ret;
6694}
6695
6696static void fill_device_from_item(struct extent_buffer *leaf,
6697				 struct btrfs_dev_item *dev_item,
6698				 struct btrfs_device *device)
6699{
6700	unsigned long ptr;
6701
6702	device->devid = btrfs_device_id(leaf, dev_item);
6703	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6704	device->total_bytes = device->disk_total_bytes;
6705	device->commit_total_bytes = device->disk_total_bytes;
6706	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6707	device->commit_bytes_used = device->bytes_used;
6708	device->type = btrfs_device_type(leaf, dev_item);
6709	device->io_align = btrfs_device_io_align(leaf, dev_item);
6710	device->io_width = btrfs_device_io_width(leaf, dev_item);
6711	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6712	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6713	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6714
6715	ptr = btrfs_device_uuid(dev_item);
6716	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6717}
6718
6719static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6720						  u8 *fsid)
6721{
6722	struct btrfs_fs_devices *fs_devices;
6723	int ret;
6724
6725	lockdep_assert_held(&uuid_mutex);
6726	ASSERT(fsid);
6727
6728	fs_devices = fs_info->fs_devices->seed;
6729	while (fs_devices) {
6730		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6731			return fs_devices;
6732
6733		fs_devices = fs_devices->seed;
6734	}
6735
6736	fs_devices = find_fsid(fsid, NULL);
6737	if (!fs_devices) {
6738		if (!btrfs_test_opt(fs_info, DEGRADED))
6739			return ERR_PTR(-ENOENT);
6740
6741		fs_devices = alloc_fs_devices(fsid, NULL);
6742		if (IS_ERR(fs_devices))
6743			return fs_devices;
6744
6745		fs_devices->seeding = true;
6746		fs_devices->opened = 1;
6747		return fs_devices;
6748	}
6749
 
 
 
 
6750	fs_devices = clone_fs_devices(fs_devices);
6751	if (IS_ERR(fs_devices))
6752		return fs_devices;
6753
6754	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6755	if (ret) {
6756		free_fs_devices(fs_devices);
6757		fs_devices = ERR_PTR(ret);
6758		goto out;
6759	}
6760
6761	if (!fs_devices->seeding) {
6762		close_fs_devices(fs_devices);
6763		free_fs_devices(fs_devices);
6764		fs_devices = ERR_PTR(-EINVAL);
6765		goto out;
6766	}
6767
6768	fs_devices->seed = fs_info->fs_devices->seed;
6769	fs_info->fs_devices->seed = fs_devices;
6770out:
6771	return fs_devices;
6772}
6773
6774static int read_one_dev(struct extent_buffer *leaf,
6775			struct btrfs_dev_item *dev_item)
6776{
 
6777	struct btrfs_fs_info *fs_info = leaf->fs_info;
6778	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6779	struct btrfs_device *device;
6780	u64 devid;
6781	int ret;
6782	u8 fs_uuid[BTRFS_FSID_SIZE];
6783	u8 dev_uuid[BTRFS_UUID_SIZE];
6784
6785	devid = btrfs_device_id(leaf, dev_item);
 
6786	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6787			   BTRFS_UUID_SIZE);
6788	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6789			   BTRFS_FSID_SIZE);
 
 
6790
6791	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6792		fs_devices = open_seed_devices(fs_info, fs_uuid);
6793		if (IS_ERR(fs_devices))
6794			return PTR_ERR(fs_devices);
6795	}
6796
6797	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6798				   fs_uuid, true);
6799	if (!device) {
6800		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6801			btrfs_report_missing_device(fs_info, devid,
6802							dev_uuid, true);
6803			return -ENOENT;
6804		}
6805
6806		device = add_missing_dev(fs_devices, devid, dev_uuid);
6807		if (IS_ERR(device)) {
6808			btrfs_err(fs_info,
6809				"failed to add missing dev %llu: %ld",
6810				devid, PTR_ERR(device));
6811			return PTR_ERR(device);
6812		}
6813		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6814	} else {
6815		if (!device->bdev) {
6816			if (!btrfs_test_opt(fs_info, DEGRADED)) {
6817				btrfs_report_missing_device(fs_info,
6818						devid, dev_uuid, true);
6819				return -ENOENT;
6820			}
6821			btrfs_report_missing_device(fs_info, devid,
6822							dev_uuid, false);
6823		}
6824
6825		if (!device->bdev &&
6826		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6827			/*
6828			 * this happens when a device that was properly setup
6829			 * in the device info lists suddenly goes bad.
6830			 * device->bdev is NULL, and so we have to set
6831			 * device->missing to one here
6832			 */
6833			device->fs_devices->missing_devices++;
6834			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6835		}
6836
6837		/* Move the device to its own fs_devices */
6838		if (device->fs_devices != fs_devices) {
6839			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6840							&device->dev_state));
6841
6842			list_move(&device->dev_list, &fs_devices->devices);
6843			device->fs_devices->num_devices--;
6844			fs_devices->num_devices++;
6845
6846			device->fs_devices->missing_devices--;
6847			fs_devices->missing_devices++;
6848
6849			device->fs_devices = fs_devices;
6850		}
6851	}
6852
6853	if (device->fs_devices != fs_info->fs_devices) {
6854		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6855		if (device->generation !=
6856		    btrfs_device_generation(leaf, dev_item))
6857			return -EINVAL;
6858	}
6859
6860	fill_device_from_item(leaf, dev_item, device);
 
 
 
 
 
 
 
 
 
 
6861	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6862	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6863	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6864		device->fs_devices->total_rw_bytes += device->total_bytes;
6865		atomic64_add(device->total_bytes - device->bytes_used,
6866				&fs_info->free_chunk_space);
6867	}
6868	ret = 0;
6869	return ret;
6870}
6871
6872int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6873{
6874	struct btrfs_root *root = fs_info->tree_root;
6875	struct btrfs_super_block *super_copy = fs_info->super_copy;
6876	struct extent_buffer *sb;
6877	struct btrfs_disk_key *disk_key;
6878	struct btrfs_chunk *chunk;
6879	u8 *array_ptr;
6880	unsigned long sb_array_offset;
6881	int ret = 0;
6882	u32 num_stripes;
6883	u32 array_size;
6884	u32 len = 0;
6885	u32 cur_offset;
6886	u64 type;
6887	struct btrfs_key key;
6888
6889	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
 
6890	/*
6891	 * This will create extent buffer of nodesize, superblock size is
6892	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6893	 * overallocate but we can keep it as-is, only the first page is used.
6894	 */
6895	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6896	if (IS_ERR(sb))
6897		return PTR_ERR(sb);
6898	set_extent_buffer_uptodate(sb);
6899	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6900	/*
6901	 * The sb extent buffer is artificial and just used to read the system array.
6902	 * set_extent_buffer_uptodate() call does not properly mark all it's
6903	 * pages up-to-date when the page is larger: extent does not cover the
6904	 * whole page and consequently check_page_uptodate does not find all
6905	 * the page's extents up-to-date (the hole beyond sb),
6906	 * write_extent_buffer then triggers a WARN_ON.
6907	 *
6908	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6909	 * but sb spans only this function. Add an explicit SetPageUptodate call
6910	 * to silence the warning eg. on PowerPC 64.
6911	 */
6912	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6913		SetPageUptodate(sb->pages[0]);
6914
6915	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6916	array_size = btrfs_super_sys_array_size(super_copy);
6917
6918	array_ptr = super_copy->sys_chunk_array;
6919	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6920	cur_offset = 0;
6921
6922	while (cur_offset < array_size) {
6923		disk_key = (struct btrfs_disk_key *)array_ptr;
6924		len = sizeof(*disk_key);
6925		if (cur_offset + len > array_size)
6926			goto out_short_read;
6927
6928		btrfs_disk_key_to_cpu(&key, disk_key);
6929
6930		array_ptr += len;
6931		sb_array_offset += len;
6932		cur_offset += len;
6933
6934		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
6935			btrfs_err(fs_info,
6936			    "unexpected item type %u in sys_array at offset %u",
6937				  (u32)key.type, cur_offset);
6938			ret = -EIO;
6939			break;
6940		}
6941
6942		chunk = (struct btrfs_chunk *)sb_array_offset;
6943		/*
6944		 * At least one btrfs_chunk with one stripe must be present,
6945		 * exact stripe count check comes afterwards
6946		 */
6947		len = btrfs_chunk_item_size(1);
6948		if (cur_offset + len > array_size)
6949			goto out_short_read;
6950
6951		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6952		if (!num_stripes) {
6953			btrfs_err(fs_info,
6954			"invalid number of stripes %u in sys_array at offset %u",
6955				  num_stripes, cur_offset);
6956			ret = -EIO;
6957			break;
6958		}
6959
6960		type = btrfs_chunk_type(sb, chunk);
6961		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6962			btrfs_err(fs_info,
6963			"invalid chunk type %llu in sys_array at offset %u",
6964				  type, cur_offset);
6965			ret = -EIO;
6966			break;
6967		}
6968
6969		len = btrfs_chunk_item_size(num_stripes);
6970		if (cur_offset + len > array_size)
6971			goto out_short_read;
6972
6973		ret = read_one_chunk(&key, sb, chunk);
6974		if (ret)
6975			break;
6976
6977		array_ptr += len;
6978		sb_array_offset += len;
6979		cur_offset += len;
6980	}
6981	clear_extent_buffer_uptodate(sb);
6982	free_extent_buffer_stale(sb);
6983	return ret;
6984
6985out_short_read:
6986	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6987			len, cur_offset);
6988	clear_extent_buffer_uptodate(sb);
6989	free_extent_buffer_stale(sb);
6990	return -EIO;
6991}
6992
6993/*
6994 * Check if all chunks in the fs are OK for read-write degraded mount
6995 *
6996 * If the @failing_dev is specified, it's accounted as missing.
6997 *
6998 * Return true if all chunks meet the minimal RW mount requirements.
6999 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7000 */
7001bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7002					struct btrfs_device *failing_dev)
7003{
7004	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7005	struct extent_map *em;
7006	u64 next_start = 0;
7007	bool ret = true;
7008
7009	read_lock(&map_tree->lock);
7010	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7011	read_unlock(&map_tree->lock);
7012	/* No chunk at all? Return false anyway */
7013	if (!em) {
7014		ret = false;
7015		goto out;
7016	}
7017	while (em) {
7018		struct map_lookup *map;
7019		int missing = 0;
7020		int max_tolerated;
7021		int i;
7022
7023		map = em->map_lookup;
7024		max_tolerated =
7025			btrfs_get_num_tolerated_disk_barrier_failures(
7026					map->type);
7027		for (i = 0; i < map->num_stripes; i++) {
7028			struct btrfs_device *dev = map->stripes[i].dev;
7029
7030			if (!dev || !dev->bdev ||
7031			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7032			    dev->last_flush_error)
7033				missing++;
7034			else if (failing_dev && failing_dev == dev)
7035				missing++;
7036		}
7037		if (missing > max_tolerated) {
7038			if (!failing_dev)
7039				btrfs_warn(fs_info,
7040	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7041				   em->start, missing, max_tolerated);
7042			free_extent_map(em);
7043			ret = false;
7044			goto out;
7045		}
7046		next_start = extent_map_end(em);
7047		free_extent_map(em);
7048
7049		read_lock(&map_tree->lock);
7050		em = lookup_extent_mapping(map_tree, next_start,
7051					   (u64)(-1) - next_start);
7052		read_unlock(&map_tree->lock);
7053	}
7054out:
7055	return ret;
7056}
7057
7058static void readahead_tree_node_children(struct extent_buffer *node)
7059{
7060	int i;
7061	const int nr_items = btrfs_header_nritems(node);
7062
7063	for (i = 0; i < nr_items; i++) {
7064		u64 start;
7065
7066		start = btrfs_node_blockptr(node, i);
7067		readahead_tree_block(node->fs_info, start);
7068	}
7069}
7070
7071int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7072{
7073	struct btrfs_root *root = fs_info->chunk_root;
7074	struct btrfs_path *path;
7075	struct extent_buffer *leaf;
7076	struct btrfs_key key;
7077	struct btrfs_key found_key;
7078	int ret;
7079	int slot;
 
7080	u64 total_dev = 0;
7081	u64 last_ra_node = 0;
7082
7083	path = btrfs_alloc_path();
7084	if (!path)
7085		return -ENOMEM;
7086
7087	/*
7088	 * uuid_mutex is needed only if we are mounting a sprout FS
7089	 * otherwise we don't need it.
7090	 */
7091	mutex_lock(&uuid_mutex);
7092
7093	/*
7094	 * It is possible for mount and umount to race in such a way that
7095	 * we execute this code path, but open_fs_devices failed to clear
7096	 * total_rw_bytes. We certainly want it cleared before reading the
7097	 * device items, so clear it here.
7098	 */
7099	fs_info->fs_devices->total_rw_bytes = 0;
7100
7101	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
7102	 * Read all device items, and then all the chunk items. All
7103	 * device items are found before any chunk item (their object id
7104	 * is smaller than the lowest possible object id for a chunk
7105	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7106	 */
7107	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7108	key.offset = 0;
7109	key.type = 0;
7110	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7111	if (ret < 0)
7112		goto error;
7113	while (1) {
7114		struct extent_buffer *node;
7115
7116		leaf = path->nodes[0];
7117		slot = path->slots[0];
7118		if (slot >= btrfs_header_nritems(leaf)) {
7119			ret = btrfs_next_leaf(root, path);
7120			if (ret == 0)
7121				continue;
7122			if (ret < 0)
7123				goto error;
7124			break;
7125		}
7126		/*
7127		 * The nodes on level 1 are not locked but we don't need to do
7128		 * that during mount time as nothing else can access the tree
7129		 */
7130		node = path->nodes[1];
7131		if (node) {
7132			if (last_ra_node != node->start) {
7133				readahead_tree_node_children(node);
7134				last_ra_node = node->start;
7135			}
7136		}
7137		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7138		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7139			struct btrfs_dev_item *dev_item;
7140			dev_item = btrfs_item_ptr(leaf, slot,
7141						  struct btrfs_dev_item);
7142			ret = read_one_dev(leaf, dev_item);
7143			if (ret)
7144				goto error;
7145			total_dev++;
7146		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7147			struct btrfs_chunk *chunk;
 
 
 
 
 
 
 
 
 
7148			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7149			mutex_lock(&fs_info->chunk_mutex);
7150			ret = read_one_chunk(&found_key, leaf, chunk);
7151			mutex_unlock(&fs_info->chunk_mutex);
7152			if (ret)
7153				goto error;
7154		}
7155		path->slots[0]++;
 
 
 
 
7156	}
7157
7158	/*
7159	 * After loading chunk tree, we've got all device information,
7160	 * do another round of validation checks.
7161	 */
7162	if (total_dev != fs_info->fs_devices->total_devices) {
7163		btrfs_err(fs_info,
7164	   "super_num_devices %llu mismatch with num_devices %llu found here",
7165			  btrfs_super_num_devices(fs_info->super_copy),
7166			  total_dev);
7167		ret = -EINVAL;
7168		goto error;
7169	}
7170	if (btrfs_super_total_bytes(fs_info->super_copy) <
7171	    fs_info->fs_devices->total_rw_bytes) {
7172		btrfs_err(fs_info,
7173	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7174			  btrfs_super_total_bytes(fs_info->super_copy),
7175			  fs_info->fs_devices->total_rw_bytes);
7176		ret = -EINVAL;
7177		goto error;
7178	}
7179	ret = 0;
7180error:
7181	mutex_unlock(&uuid_mutex);
7182
7183	btrfs_free_path(path);
7184	return ret;
7185}
7186
7187void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7188{
7189	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7190	struct btrfs_device *device;
 
7191
7192	while (fs_devices) {
7193		mutex_lock(&fs_devices->device_list_mutex);
7194		list_for_each_entry(device, &fs_devices->devices, dev_list)
 
 
 
 
 
7195			device->fs_info = fs_info;
7196		mutex_unlock(&fs_devices->device_list_mutex);
 
 
 
7197
7198		fs_devices = fs_devices->seed;
7199	}
 
 
 
7200}
7201
7202static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7203				 const struct btrfs_dev_stats_item *ptr,
7204				 int index)
7205{
7206	u64 val;
7207
7208	read_extent_buffer(eb, &val,
7209			   offsetof(struct btrfs_dev_stats_item, values) +
7210			    ((unsigned long)ptr) + (index * sizeof(u64)),
7211			   sizeof(val));
7212	return val;
7213}
7214
7215static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7216				      struct btrfs_dev_stats_item *ptr,
7217				      int index, u64 val)
7218{
7219	write_extent_buffer(eb, &val,
7220			    offsetof(struct btrfs_dev_stats_item, values) +
7221			     ((unsigned long)ptr) + (index * sizeof(u64)),
7222			    sizeof(val));
7223}
7224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7225int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7226{
7227	struct btrfs_key key;
7228	struct btrfs_root *dev_root = fs_info->dev_root;
7229	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7230	struct extent_buffer *eb;
7231	int slot;
7232	int ret = 0;
7233	struct btrfs_device *device;
7234	struct btrfs_path *path = NULL;
7235	int i;
7236
7237	path = btrfs_alloc_path();
7238	if (!path)
7239		return -ENOMEM;
7240
7241	mutex_lock(&fs_devices->device_list_mutex);
7242	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7243		int item_size;
7244		struct btrfs_dev_stats_item *ptr;
7245
7246		key.objectid = BTRFS_DEV_STATS_OBJECTID;
7247		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7248		key.offset = device->devid;
7249		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7250		if (ret) {
7251			for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7252				btrfs_dev_stat_set(device, i, 0);
7253			device->dev_stats_valid = 1;
7254			btrfs_release_path(path);
7255			continue;
7256		}
7257		slot = path->slots[0];
7258		eb = path->nodes[0];
7259		item_size = btrfs_item_size_nr(eb, slot);
7260
7261		ptr = btrfs_item_ptr(eb, slot,
7262				     struct btrfs_dev_stats_item);
7263
7264		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7265			if (item_size >= (1 + i) * sizeof(__le64))
7266				btrfs_dev_stat_set(device, i,
7267					btrfs_dev_stats_value(eb, ptr, i));
7268			else
7269				btrfs_dev_stat_set(device, i, 0);
7270		}
7271
7272		device->dev_stats_valid = 1;
7273		btrfs_dev_stat_print_on_load(device);
7274		btrfs_release_path(path);
7275	}
 
7276	mutex_unlock(&fs_devices->device_list_mutex);
7277
7278	btrfs_free_path(path);
7279	return ret < 0 ? ret : 0;
7280}
7281
7282static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7283				struct btrfs_device *device)
7284{
7285	struct btrfs_fs_info *fs_info = trans->fs_info;
7286	struct btrfs_root *dev_root = fs_info->dev_root;
7287	struct btrfs_path *path;
7288	struct btrfs_key key;
7289	struct extent_buffer *eb;
7290	struct btrfs_dev_stats_item *ptr;
7291	int ret;
7292	int i;
7293
7294	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7295	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7296	key.offset = device->devid;
7297
7298	path = btrfs_alloc_path();
7299	if (!path)
7300		return -ENOMEM;
7301	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7302	if (ret < 0) {
7303		btrfs_warn_in_rcu(fs_info,
7304			"error %d while searching for dev_stats item for device %s",
7305			      ret, rcu_str_deref(device->name));
7306		goto out;
7307	}
7308
7309	if (ret == 0 &&
7310	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7311		/* need to delete old one and insert a new one */
7312		ret = btrfs_del_item(trans, dev_root, path);
7313		if (ret != 0) {
7314			btrfs_warn_in_rcu(fs_info,
7315				"delete too small dev_stats item for device %s failed %d",
7316				      rcu_str_deref(device->name), ret);
7317			goto out;
7318		}
7319		ret = 1;
7320	}
7321
7322	if (ret == 1) {
7323		/* need to insert a new item */
7324		btrfs_release_path(path);
7325		ret = btrfs_insert_empty_item(trans, dev_root, path,
7326					      &key, sizeof(*ptr));
7327		if (ret < 0) {
7328			btrfs_warn_in_rcu(fs_info,
7329				"insert dev_stats item for device %s failed %d",
7330				rcu_str_deref(device->name), ret);
7331			goto out;
7332		}
7333	}
7334
7335	eb = path->nodes[0];
7336	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7337	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7338		btrfs_set_dev_stats_value(eb, ptr, i,
7339					  btrfs_dev_stat_read(device, i));
7340	btrfs_mark_buffer_dirty(eb);
7341
7342out:
7343	btrfs_free_path(path);
7344	return ret;
7345}
7346
7347/*
7348 * called from commit_transaction. Writes all changed device stats to disk.
7349 */
7350int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7351{
7352	struct btrfs_fs_info *fs_info = trans->fs_info;
7353	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7354	struct btrfs_device *device;
7355	int stats_cnt;
7356	int ret = 0;
7357
7358	mutex_lock(&fs_devices->device_list_mutex);
7359	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7360		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7361		if (!device->dev_stats_valid || stats_cnt == 0)
7362			continue;
7363
7364
7365		/*
7366		 * There is a LOAD-LOAD control dependency between the value of
7367		 * dev_stats_ccnt and updating the on-disk values which requires
7368		 * reading the in-memory counters. Such control dependencies
7369		 * require explicit read memory barriers.
7370		 *
7371		 * This memory barriers pairs with smp_mb__before_atomic in
7372		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7373		 * barrier implied by atomic_xchg in
7374		 * btrfs_dev_stats_read_and_reset
7375		 */
7376		smp_rmb();
7377
7378		ret = update_dev_stat_item(trans, device);
7379		if (!ret)
7380			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7381	}
7382	mutex_unlock(&fs_devices->device_list_mutex);
7383
7384	return ret;
7385}
7386
7387void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7388{
7389	btrfs_dev_stat_inc(dev, index);
7390	btrfs_dev_stat_print_on_error(dev);
7391}
7392
7393static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7394{
7395	if (!dev->dev_stats_valid)
7396		return;
7397	btrfs_err_rl_in_rcu(dev->fs_info,
7398		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7399			   rcu_str_deref(dev->name),
7400			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7401			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7402			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7403			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7404			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7405}
7406
7407static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7408{
7409	int i;
7410
7411	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7412		if (btrfs_dev_stat_read(dev, i) != 0)
7413			break;
7414	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7415		return; /* all values == 0, suppress message */
7416
7417	btrfs_info_in_rcu(dev->fs_info,
7418		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7419	       rcu_str_deref(dev->name),
7420	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7421	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7422	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7423	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7424	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7425}
7426
7427int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7428			struct btrfs_ioctl_get_dev_stats *stats)
7429{
 
7430	struct btrfs_device *dev;
7431	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7432	int i;
7433
7434	mutex_lock(&fs_devices->device_list_mutex);
7435	dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7436				true);
7437	mutex_unlock(&fs_devices->device_list_mutex);
7438
7439	if (!dev) {
7440		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7441		return -ENODEV;
7442	} else if (!dev->dev_stats_valid) {
7443		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7444		return -ENODEV;
7445	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7446		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7447			if (stats->nr_items > i)
7448				stats->values[i] =
7449					btrfs_dev_stat_read_and_reset(dev, i);
7450			else
7451				btrfs_dev_stat_set(dev, i, 0);
7452		}
7453		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7454			   current->comm, task_pid_nr(current));
7455	} else {
7456		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7457			if (stats->nr_items > i)
7458				stats->values[i] = btrfs_dev_stat_read(dev, i);
7459	}
7460	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7461		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7462	return 0;
7463}
7464
7465/*
7466 * Update the size and bytes used for each device where it changed.  This is
7467 * delayed since we would otherwise get errors while writing out the
7468 * superblocks.
7469 *
7470 * Must be invoked during transaction commit.
7471 */
7472void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7473{
7474	struct btrfs_device *curr, *next;
7475
7476	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7477
7478	if (list_empty(&trans->dev_update_list))
7479		return;
7480
7481	/*
7482	 * We don't need the device_list_mutex here.  This list is owned by the
7483	 * transaction and the transaction must complete before the device is
7484	 * released.
7485	 */
7486	mutex_lock(&trans->fs_info->chunk_mutex);
7487	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7488				 post_commit_list) {
7489		list_del_init(&curr->post_commit_list);
7490		curr->commit_total_bytes = curr->disk_total_bytes;
7491		curr->commit_bytes_used = curr->bytes_used;
7492	}
7493	mutex_unlock(&trans->fs_info->chunk_mutex);
7494}
7495
7496void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7497{
7498	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7499	while (fs_devices) {
7500		fs_devices->fs_info = fs_info;
7501		fs_devices = fs_devices->seed;
7502	}
7503}
7504
7505void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7506{
7507	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7508	while (fs_devices) {
7509		fs_devices->fs_info = NULL;
7510		fs_devices = fs_devices->seed;
7511	}
7512}
7513
7514/*
7515 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7516 */
7517int btrfs_bg_type_to_factor(u64 flags)
7518{
7519	const int index = btrfs_bg_flags_to_raid_index(flags);
7520
7521	return btrfs_raid_array[index].ncopies;
7522}
7523
7524
7525
7526static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7527				 u64 chunk_offset, u64 devid,
7528				 u64 physical_offset, u64 physical_len)
7529{
 
7530	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7531	struct extent_map *em;
7532	struct map_lookup *map;
7533	struct btrfs_device *dev;
7534	u64 stripe_len;
7535	bool found = false;
7536	int ret = 0;
7537	int i;
7538
7539	read_lock(&em_tree->lock);
7540	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7541	read_unlock(&em_tree->lock);
7542
7543	if (!em) {
7544		btrfs_err(fs_info,
7545"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7546			  physical_offset, devid);
7547		ret = -EUCLEAN;
7548		goto out;
7549	}
7550
7551	map = em->map_lookup;
7552	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7553	if (physical_len != stripe_len) {
7554		btrfs_err(fs_info,
7555"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7556			  physical_offset, devid, em->start, physical_len,
7557			  stripe_len);
7558		ret = -EUCLEAN;
7559		goto out;
7560	}
7561
 
 
 
 
 
 
 
 
 
 
7562	for (i = 0; i < map->num_stripes; i++) {
7563		if (map->stripes[i].dev->devid == devid &&
7564		    map->stripes[i].physical == physical_offset) {
7565			found = true;
7566			if (map->verified_stripes >= map->num_stripes) {
7567				btrfs_err(fs_info,
7568				"too many dev extents for chunk %llu found",
7569					  em->start);
7570				ret = -EUCLEAN;
7571				goto out;
7572			}
7573			map->verified_stripes++;
7574			break;
7575		}
7576	}
7577	if (!found) {
7578		btrfs_err(fs_info,
7579	"dev extent physical offset %llu devid %llu has no corresponding chunk",
7580			physical_offset, devid);
7581		ret = -EUCLEAN;
7582	}
7583
7584	/* Make sure no dev extent is beyond device bondary */
7585	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7586	if (!dev) {
7587		btrfs_err(fs_info, "failed to find devid %llu", devid);
7588		ret = -EUCLEAN;
7589		goto out;
7590	}
7591
7592	/* It's possible this device is a dummy for seed device */
7593	if (dev->disk_total_bytes == 0) {
7594		dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
7595					NULL, false);
7596		if (!dev) {
7597			btrfs_err(fs_info, "failed to find seed devid %llu",
7598				  devid);
7599			ret = -EUCLEAN;
7600			goto out;
7601		}
7602	}
7603
7604	if (physical_offset + physical_len > dev->disk_total_bytes) {
7605		btrfs_err(fs_info,
7606"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7607			  devid, physical_offset, physical_len,
7608			  dev->disk_total_bytes);
7609		ret = -EUCLEAN;
7610		goto out;
7611	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7612out:
7613	free_extent_map(em);
7614	return ret;
7615}
7616
7617static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7618{
7619	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7620	struct extent_map *em;
7621	struct rb_node *node;
7622	int ret = 0;
7623
7624	read_lock(&em_tree->lock);
7625	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7626		em = rb_entry(node, struct extent_map, rb_node);
7627		if (em->map_lookup->num_stripes !=
7628		    em->map_lookup->verified_stripes) {
7629			btrfs_err(fs_info,
7630			"chunk %llu has missing dev extent, have %d expect %d",
7631				  em->start, em->map_lookup->verified_stripes,
7632				  em->map_lookup->num_stripes);
7633			ret = -EUCLEAN;
7634			goto out;
7635		}
7636	}
7637out:
7638	read_unlock(&em_tree->lock);
7639	return ret;
7640}
7641
7642/*
7643 * Ensure that all dev extents are mapped to correct chunk, otherwise
7644 * later chunk allocation/free would cause unexpected behavior.
7645 *
7646 * NOTE: This will iterate through the whole device tree, which should be of
7647 * the same size level as the chunk tree.  This slightly increases mount time.
7648 */
7649int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7650{
7651	struct btrfs_path *path;
7652	struct btrfs_root *root = fs_info->dev_root;
7653	struct btrfs_key key;
7654	u64 prev_devid = 0;
7655	u64 prev_dev_ext_end = 0;
7656	int ret = 0;
7657
 
 
 
 
 
 
 
 
 
 
 
 
 
7658	key.objectid = 1;
7659	key.type = BTRFS_DEV_EXTENT_KEY;
7660	key.offset = 0;
7661
7662	path = btrfs_alloc_path();
7663	if (!path)
7664		return -ENOMEM;
7665
7666	path->reada = READA_FORWARD;
7667	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7668	if (ret < 0)
7669		goto out;
7670
7671	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7672		ret = btrfs_next_item(root, path);
7673		if (ret < 0)
7674			goto out;
7675		/* No dev extents at all? Not good */
7676		if (ret > 0) {
7677			ret = -EUCLEAN;
7678			goto out;
7679		}
7680	}
7681	while (1) {
7682		struct extent_buffer *leaf = path->nodes[0];
7683		struct btrfs_dev_extent *dext;
7684		int slot = path->slots[0];
7685		u64 chunk_offset;
7686		u64 physical_offset;
7687		u64 physical_len;
7688		u64 devid;
7689
7690		btrfs_item_key_to_cpu(leaf, &key, slot);
7691		if (key.type != BTRFS_DEV_EXTENT_KEY)
7692			break;
7693		devid = key.objectid;
7694		physical_offset = key.offset;
7695
7696		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7697		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7698		physical_len = btrfs_dev_extent_length(leaf, dext);
7699
7700		/* Check if this dev extent overlaps with the previous one */
7701		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7702			btrfs_err(fs_info,
7703"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7704				  devid, physical_offset, prev_dev_ext_end);
7705			ret = -EUCLEAN;
7706			goto out;
7707		}
7708
7709		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7710					    physical_offset, physical_len);
7711		if (ret < 0)
7712			goto out;
7713		prev_devid = devid;
7714		prev_dev_ext_end = physical_offset + physical_len;
7715
7716		ret = btrfs_next_item(root, path);
7717		if (ret < 0)
7718			goto out;
7719		if (ret > 0) {
7720			ret = 0;
7721			break;
7722		}
7723	}
7724
7725	/* Ensure all chunks have corresponding dev extents */
7726	ret = verify_chunk_dev_extent_mapping(fs_info);
7727out:
7728	btrfs_free_path(path);
7729	return ret;
7730}
7731
7732/*
7733 * Check whether the given block group or device is pinned by any inode being
7734 * used as a swapfile.
7735 */
7736bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7737{
7738	struct btrfs_swapfile_pin *sp;
7739	struct rb_node *node;
7740
7741	spin_lock(&fs_info->swapfile_pins_lock);
7742	node = fs_info->swapfile_pins.rb_node;
7743	while (node) {
7744		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7745		if (ptr < sp->ptr)
7746			node = node->rb_left;
7747		else if (ptr > sp->ptr)
7748			node = node->rb_right;
7749		else
7750			break;
7751	}
7752	spin_unlock(&fs_info->swapfile_pins_lock);
7753	return node != NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7754}