Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/sched/mm.h>
   8#include <linux/slab.h>
 
 
 
 
 
   9#include <linux/ratelimit.h>
  10#include <linux/kthread.h>
 
  11#include <linux/semaphore.h>
  12#include <linux/uuid.h>
  13#include <linux/list_sort.h>
  14#include <linux/namei.h>
  15#include "misc.h"
  16#include "ctree.h"
  17#include "extent_map.h"
  18#include "disk-io.h"
  19#include "transaction.h"
  20#include "print-tree.h"
  21#include "volumes.h"
  22#include "raid56.h"
 
 
  23#include "rcu-string.h"
 
  24#include "dev-replace.h"
  25#include "sysfs.h"
  26#include "tree-checker.h"
  27#include "space-info.h"
  28#include "block-group.h"
  29#include "discard.h"
  30#include "zoned.h"
  31#include "fs.h"
  32#include "accessors.h"
  33#include "uuid-tree.h"
  34#include "ioctl.h"
  35#include "relocation.h"
  36#include "scrub.h"
  37#include "super.h"
  38
  39#define BTRFS_BLOCK_GROUP_STRIPE_MASK	(BTRFS_BLOCK_GROUP_RAID0 | \
  40					 BTRFS_BLOCK_GROUP_RAID10 | \
  41					 BTRFS_BLOCK_GROUP_RAID56_MASK)
  42
  43const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
  44	[BTRFS_RAID_RAID10] = {
  45		.sub_stripes	= 2,
  46		.dev_stripes	= 1,
  47		.devs_max	= 0,	/* 0 == as many as possible */
  48		.devs_min	= 2,
  49		.tolerated_failures = 1,
  50		.devs_increment	= 2,
  51		.ncopies	= 2,
  52		.nparity        = 0,
  53		.raid_name	= "raid10",
  54		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
  55		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
  56	},
  57	[BTRFS_RAID_RAID1] = {
  58		.sub_stripes	= 1,
  59		.dev_stripes	= 1,
  60		.devs_max	= 2,
  61		.devs_min	= 2,
  62		.tolerated_failures = 1,
  63		.devs_increment	= 2,
  64		.ncopies	= 2,
  65		.nparity        = 0,
  66		.raid_name	= "raid1",
  67		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
  68		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
  69	},
  70	[BTRFS_RAID_RAID1C3] = {
  71		.sub_stripes	= 1,
  72		.dev_stripes	= 1,
  73		.devs_max	= 3,
  74		.devs_min	= 3,
  75		.tolerated_failures = 2,
  76		.devs_increment	= 3,
  77		.ncopies	= 3,
  78		.nparity        = 0,
  79		.raid_name	= "raid1c3",
  80		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
  81		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
  82	},
  83	[BTRFS_RAID_RAID1C4] = {
  84		.sub_stripes	= 1,
  85		.dev_stripes	= 1,
  86		.devs_max	= 4,
  87		.devs_min	= 4,
  88		.tolerated_failures = 3,
  89		.devs_increment	= 4,
  90		.ncopies	= 4,
  91		.nparity        = 0,
  92		.raid_name	= "raid1c4",
  93		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
  94		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
  95	},
  96	[BTRFS_RAID_DUP] = {
  97		.sub_stripes	= 1,
  98		.dev_stripes	= 2,
  99		.devs_max	= 1,
 100		.devs_min	= 1,
 101		.tolerated_failures = 0,
 102		.devs_increment	= 1,
 103		.ncopies	= 2,
 104		.nparity        = 0,
 105		.raid_name	= "dup",
 106		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
 107		.mindev_error	= 0,
 108	},
 109	[BTRFS_RAID_RAID0] = {
 110		.sub_stripes	= 1,
 111		.dev_stripes	= 1,
 112		.devs_max	= 0,
 113		.devs_min	= 1,
 114		.tolerated_failures = 0,
 115		.devs_increment	= 1,
 116		.ncopies	= 1,
 117		.nparity        = 0,
 118		.raid_name	= "raid0",
 119		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
 120		.mindev_error	= 0,
 121	},
 122	[BTRFS_RAID_SINGLE] = {
 123		.sub_stripes	= 1,
 124		.dev_stripes	= 1,
 125		.devs_max	= 1,
 126		.devs_min	= 1,
 127		.tolerated_failures = 0,
 128		.devs_increment	= 1,
 129		.ncopies	= 1,
 130		.nparity        = 0,
 131		.raid_name	= "single",
 132		.bg_flag	= 0,
 133		.mindev_error	= 0,
 134	},
 135	[BTRFS_RAID_RAID5] = {
 136		.sub_stripes	= 1,
 137		.dev_stripes	= 1,
 138		.devs_max	= 0,
 139		.devs_min	= 2,
 140		.tolerated_failures = 1,
 141		.devs_increment	= 1,
 142		.ncopies	= 1,
 143		.nparity        = 1,
 144		.raid_name	= "raid5",
 145		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
 146		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
 147	},
 148	[BTRFS_RAID_RAID6] = {
 149		.sub_stripes	= 1,
 150		.dev_stripes	= 1,
 151		.devs_max	= 0,
 152		.devs_min	= 3,
 153		.tolerated_failures = 2,
 154		.devs_increment	= 1,
 155		.ncopies	= 1,
 156		.nparity        = 2,
 157		.raid_name	= "raid6",
 158		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
 159		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
 160	},
 161};
 162
 163/*
 164 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
 165 * can be used as index to access btrfs_raid_array[].
 166 */
 167enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
 168{
 169	const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
 170
 171	if (!profile)
 172		return BTRFS_RAID_SINGLE;
 
 
 
 
 
 173
 174	return BTRFS_BG_FLAG_TO_INDEX(profile);
 175}
 176
 177const char *btrfs_bg_type_to_raid_name(u64 flags)
 178{
 179	const int index = btrfs_bg_flags_to_raid_index(flags);
 180
 181	if (index >= BTRFS_NR_RAID_TYPES)
 182		return NULL;
 183
 184	return btrfs_raid_array[index].raid_name;
 185}
 186
 187int btrfs_nr_parity_stripes(u64 type)
 188{
 189	enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
 190
 191	return btrfs_raid_array[index].nparity;
 192}
 193
 194/*
 195 * Fill @buf with textual description of @bg_flags, no more than @size_buf
 196 * bytes including terminating null byte.
 197 */
 198void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
 199{
 200	int i;
 201	int ret;
 202	char *bp = buf;
 203	u64 flags = bg_flags;
 204	u32 size_bp = size_buf;
 205
 206	if (!flags) {
 207		strcpy(bp, "NONE");
 208		return;
 209	}
 210
 211#define DESCRIBE_FLAG(flag, desc)						\
 212	do {								\
 213		if (flags & (flag)) {					\
 214			ret = snprintf(bp, size_bp, "%s|", (desc));	\
 215			if (ret < 0 || ret >= size_bp)			\
 216				goto out_overflow;			\
 217			size_bp -= ret;					\
 218			bp += ret;					\
 219			flags &= ~(flag);				\
 220		}							\
 221	} while (0)
 222
 223	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
 224	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
 225	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
 226
 227	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
 228	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
 229		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
 230			      btrfs_raid_array[i].raid_name);
 231#undef DESCRIBE_FLAG
 232
 233	if (flags) {
 234		ret = snprintf(bp, size_bp, "0x%llx|", flags);
 235		size_bp -= ret;
 236	}
 237
 238	if (size_bp < size_buf)
 239		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
 240
 241	/*
 242	 * The text is trimmed, it's up to the caller to provide sufficiently
 243	 * large buffer
 244	 */
 245out_overflow:;
 246}
 247
 248static int init_first_rw_device(struct btrfs_trans_handle *trans);
 249static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
 250static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 251
 252/*
 253 * Device locking
 254 * ==============
 255 *
 256 * There are several mutexes that protect manipulation of devices and low-level
 257 * structures like chunks but not block groups, extents or files
 258 *
 259 * uuid_mutex (global lock)
 260 * ------------------------
 261 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
 262 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
 263 * device) or requested by the device= mount option
 264 *
 265 * the mutex can be very coarse and can cover long-running operations
 266 *
 267 * protects: updates to fs_devices counters like missing devices, rw devices,
 268 * seeding, structure cloning, opening/closing devices at mount/umount time
 269 *
 270 * global::fs_devs - add, remove, updates to the global list
 271 *
 272 * does not protect: manipulation of the fs_devices::devices list in general
 273 * but in mount context it could be used to exclude list modifications by eg.
 274 * scan ioctl
 275 *
 276 * btrfs_device::name - renames (write side), read is RCU
 277 *
 278 * fs_devices::device_list_mutex (per-fs, with RCU)
 279 * ------------------------------------------------
 280 * protects updates to fs_devices::devices, ie. adding and deleting
 281 *
 282 * simple list traversal with read-only actions can be done with RCU protection
 283 *
 284 * may be used to exclude some operations from running concurrently without any
 285 * modifications to the list (see write_all_supers)
 286 *
 287 * Is not required at mount and close times, because our device list is
 288 * protected by the uuid_mutex at that point.
 289 *
 290 * balance_mutex
 291 * -------------
 292 * protects balance structures (status, state) and context accessed from
 293 * several places (internally, ioctl)
 294 *
 295 * chunk_mutex
 296 * -----------
 297 * protects chunks, adding or removing during allocation, trim or when a new
 298 * device is added/removed. Additionally it also protects post_commit_list of
 299 * individual devices, since they can be added to the transaction's
 300 * post_commit_list only with chunk_mutex held.
 301 *
 302 * cleaner_mutex
 303 * -------------
 304 * a big lock that is held by the cleaner thread and prevents running subvolume
 305 * cleaning together with relocation or delayed iputs
 306 *
 307 *
 308 * Lock nesting
 309 * ============
 310 *
 311 * uuid_mutex
 312 *   device_list_mutex
 313 *     chunk_mutex
 314 *   balance_mutex
 315 *
 316 *
 317 * Exclusive operations
 318 * ====================
 319 *
 320 * Maintains the exclusivity of the following operations that apply to the
 321 * whole filesystem and cannot run in parallel.
 322 *
 323 * - Balance (*)
 324 * - Device add
 325 * - Device remove
 326 * - Device replace (*)
 327 * - Resize
 328 *
 329 * The device operations (as above) can be in one of the following states:
 330 *
 331 * - Running state
 332 * - Paused state
 333 * - Completed state
 334 *
 335 * Only device operations marked with (*) can go into the Paused state for the
 336 * following reasons:
 337 *
 338 * - ioctl (only Balance can be Paused through ioctl)
 339 * - filesystem remounted as read-only
 340 * - filesystem unmounted and mounted as read-only
 341 * - system power-cycle and filesystem mounted as read-only
 342 * - filesystem or device errors leading to forced read-only
 343 *
 344 * The status of exclusive operation is set and cleared atomically.
 345 * During the course of Paused state, fs_info::exclusive_operation remains set.
 346 * A device operation in Paused or Running state can be canceled or resumed
 347 * either by ioctl (Balance only) or when remounted as read-write.
 348 * The exclusive status is cleared when the device operation is canceled or
 349 * completed.
 350 */
 351
 352DEFINE_MUTEX(uuid_mutex);
 353static LIST_HEAD(fs_uuids);
 354struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
 355{
 356	return &fs_uuids;
 357}
 358
 359/*
 360 * alloc_fs_devices - allocate struct btrfs_fs_devices
 361 * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
 362 * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
 363 *
 364 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
 365 * The returned struct is not linked onto any lists and can be destroyed with
 366 * kfree() right away.
 367 */
 368static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
 369						 const u8 *metadata_fsid)
 370{
 371	struct btrfs_fs_devices *fs_devs;
 372
 373	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
 374	if (!fs_devs)
 375		return ERR_PTR(-ENOMEM);
 376
 377	mutex_init(&fs_devs->device_list_mutex);
 378
 379	INIT_LIST_HEAD(&fs_devs->devices);
 380	INIT_LIST_HEAD(&fs_devs->alloc_list);
 381	INIT_LIST_HEAD(&fs_devs->fs_list);
 382	INIT_LIST_HEAD(&fs_devs->seed_list);
 383	if (fsid)
 384		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 385
 386	if (metadata_fsid)
 387		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
 388	else if (fsid)
 389		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
 390
 391	return fs_devs;
 392}
 393
 394void btrfs_free_device(struct btrfs_device *device)
 395{
 396	WARN_ON(!list_empty(&device->post_commit_list));
 397	rcu_string_free(device->name);
 398	extent_io_tree_release(&device->alloc_state);
 399	btrfs_destroy_dev_zone_info(device);
 400	kfree(device);
 401}
 402
 403static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
 404{
 405	struct btrfs_device *device;
 406
 407	WARN_ON(fs_devices->opened);
 408	while (!list_empty(&fs_devices->devices)) {
 409		device = list_entry(fs_devices->devices.next,
 410				    struct btrfs_device, dev_list);
 411		list_del(&device->dev_list);
 412		btrfs_free_device(device);
 
 413	}
 414	kfree(fs_devices);
 415}
 416
 417void __exit btrfs_cleanup_fs_uuids(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 418{
 419	struct btrfs_fs_devices *fs_devices;
 420
 421	while (!list_empty(&fs_uuids)) {
 422		fs_devices = list_entry(fs_uuids.next,
 423					struct btrfs_fs_devices, fs_list);
 424		list_del(&fs_devices->fs_list);
 425		free_fs_devices(fs_devices);
 426	}
 427}
 428
 429static noinline struct btrfs_fs_devices *find_fsid(
 430		const u8 *fsid, const u8 *metadata_fsid)
 431{
 432	struct btrfs_fs_devices *fs_devices;
 
 
 
 
 
 
 
 433
 434	ASSERT(fsid);
 435
 436	/* Handle non-split brain cases */
 437	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 438		if (metadata_fsid) {
 439			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
 440			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
 441				      BTRFS_FSID_SIZE) == 0)
 442				return fs_devices;
 443		} else {
 444			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 445				return fs_devices;
 
 
 
 
 
 
 
 446		}
 447	}
 448	return NULL;
 449}
 450
 451static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
 452				struct btrfs_super_block *disk_super)
 453{
 454
 455	struct btrfs_fs_devices *fs_devices;
 456
 457	/*
 458	 * Handle scanned device having completed its fsid change but
 459	 * belonging to a fs_devices that was created by first scanning
 460	 * a device which didn't have its fsid/metadata_uuid changed
 461	 * at all and the CHANGING_FSID_V2 flag set.
 462	 */
 463	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 464		if (fs_devices->fsid_change &&
 465		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
 466			   BTRFS_FSID_SIZE) == 0 &&
 467		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 468			   BTRFS_FSID_SIZE) == 0) {
 469			return fs_devices;
 470		}
 471	}
 472	/*
 473	 * Handle scanned device having completed its fsid change but
 474	 * belonging to a fs_devices that was created by a device that
 475	 * has an outdated pair of fsid/metadata_uuid and
 476	 * CHANGING_FSID_V2 flag set.
 477	 */
 478	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 479		if (fs_devices->fsid_change &&
 480		    memcmp(fs_devices->metadata_uuid,
 481			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
 482		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
 483			   BTRFS_FSID_SIZE) == 0) {
 484			return fs_devices;
 485		}
 486	}
 487
 488	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
 489}
 490
 491
 492static int
 493btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 494		      int flush, struct block_device **bdev,
 495		      struct btrfs_super_block **disk_super)
 496{
 497	int ret;
 498
 499	*bdev = blkdev_get_by_path(device_path, flags, holder);
 500
 501	if (IS_ERR(*bdev)) {
 502		ret = PTR_ERR(*bdev);
 
 503		goto error;
 504	}
 505
 506	if (flush)
 507		sync_blockdev(*bdev);
 508	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
 509	if (ret) {
 510		blkdev_put(*bdev, flags);
 511		goto error;
 512	}
 513	invalidate_bdev(*bdev);
 514	*disk_super = btrfs_read_dev_super(*bdev);
 515	if (IS_ERR(*disk_super)) {
 516		ret = PTR_ERR(*disk_super);
 517		blkdev_put(*bdev, flags);
 518		goto error;
 519	}
 520
 521	return 0;
 522
 523error:
 524	*bdev = NULL;
 
 525	return ret;
 526}
 527
 528/*
 529 *  Search and remove all stale devices (which are not mounted).  When both
 530 *  inputs are NULL, it will search and release all stale devices.
 531 *
 532 *  @devt:         Optional. When provided will it release all unmounted devices
 533 *                 matching this devt only.
 534 *  @skip_device:  Optional. Will skip this device when searching for the stale
 535 *                 devices.
 536 *
 537 *  Return:	0 for success or if @devt is 0.
 538 *		-EBUSY if @devt is a mounted device.
 539 *		-ENOENT if @devt does not match any device in the list.
 540 */
 541static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
 542{
 543	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
 544	struct btrfs_device *device, *tmp_device;
 545	int ret = 0;
 546
 547	lockdep_assert_held(&uuid_mutex);
 548
 549	if (devt)
 550		ret = -ENOENT;
 551
 552	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
 553
 554		mutex_lock(&fs_devices->device_list_mutex);
 555		list_for_each_entry_safe(device, tmp_device,
 556					 &fs_devices->devices, dev_list) {
 557			if (skip_device && skip_device == device)
 558				continue;
 559			if (devt && devt != device->devt)
 560				continue;
 561			if (fs_devices->opened) {
 562				/* for an already deleted device return 0 */
 563				if (devt && ret != 0)
 564					ret = -EBUSY;
 565				break;
 566			}
 567
 568			/* delete the stale device */
 569			fs_devices->num_devices--;
 570			list_del(&device->dev_list);
 571			btrfs_free_device(device);
 572
 573			ret = 0;
 574		}
 575		mutex_unlock(&fs_devices->device_list_mutex);
 576
 577		if (fs_devices->num_devices == 0) {
 578			btrfs_sysfs_remove_fsid(fs_devices);
 579			list_del(&fs_devices->fs_list);
 580			free_fs_devices(fs_devices);
 581		}
 582	}
 583
 584	return ret;
 585}
 586
 587/*
 588 * This is only used on mount, and we are protected from competing things
 589 * messing with our fs_devices by the uuid_mutex, thus we do not need the
 590 * fs_devices->device_list_mutex here.
 591 */
 592static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
 593			struct btrfs_device *device, fmode_t flags,
 594			void *holder)
 595{
 596	struct block_device *bdev;
 597	struct btrfs_super_block *disk_super;
 598	u64 devid;
 599	int ret;
 600
 601	if (device->bdev)
 602		return -EINVAL;
 603	if (!device->name)
 604		return -EINVAL;
 605
 606	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 607				    &bdev, &disk_super);
 608	if (ret)
 609		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610
 611	devid = btrfs_stack_device_id(&disk_super->dev_item);
 612	if (devid != device->devid)
 613		goto error_free_page;
 614
 615	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
 616		goto error_free_page;
 617
 618	device->generation = btrfs_super_generation(disk_super);
 619
 620	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 621		if (btrfs_super_incompat_flags(disk_super) &
 622		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
 623			pr_err(
 624		"BTRFS: Invalid seeding and uuid-changed device detected\n");
 625			goto error_free_page;
 626		}
 627
 628		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 629		fs_devices->seeding = true;
 
 
 
 
 
 
 630	} else {
 631		if (bdev_read_only(bdev))
 632			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 633		else
 634			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 635	}
 636
 637	if (!bdev_nonrot(bdev))
 638		fs_devices->rotating = true;
 639
 640	if (bdev_max_discard_sectors(bdev))
 641		fs_devices->discardable = true;
 642
 643	device->bdev = bdev;
 644	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
 645	device->mode = flags;
 646
 647	fs_devices->open_devices++;
 648	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
 649	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 650		fs_devices->rw_devices++;
 651		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
 
 
 
 
 
 
 
 
 
 
 652	}
 653	btrfs_release_disk_super(disk_super);
 654
 655	return 0;
 
 656
 657error_free_page:
 658	btrfs_release_disk_super(disk_super);
 659	blkdev_put(bdev, flags);
 660
 661	return -EINVAL;
 662}
 663
 664/*
 665 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
 666 * being created with a disk that has already completed its fsid change. Such
 667 * disk can belong to an fs which has its FSID changed or to one which doesn't.
 668 * Handle both cases here.
 669 */
 670static struct btrfs_fs_devices *find_fsid_inprogress(
 671					struct btrfs_super_block *disk_super)
 672{
 673	struct btrfs_fs_devices *fs_devices;
 
 
 
 
 
 
 
 
 
 
 
 674
 675	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 676		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 677			   BTRFS_FSID_SIZE) != 0 &&
 678		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 679			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
 680			return fs_devices;
 681		}
 682	}
 683
 684	return find_fsid(disk_super->fsid, NULL);
 685}
 
 
 
 
 
 
 
 
 
 
 
 
 
 686
 
 
 
 
 
 687
 688static struct btrfs_fs_devices *find_fsid_changed(
 689					struct btrfs_super_block *disk_super)
 690{
 691	struct btrfs_fs_devices *fs_devices;
 
 
 
 
 692
 693	/*
 694	 * Handles the case where scanned device is part of an fs that had
 695	 * multiple successful changes of FSID but currently device didn't
 696	 * observe it. Meaning our fsid will be different than theirs. We need
 697	 * to handle two subcases :
 698	 *  1 - The fs still continues to have different METADATA/FSID uuids.
 699	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
 700	 *  are equal).
 701	 */
 702	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 703		/* Changed UUIDs */
 704		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 705			   BTRFS_FSID_SIZE) != 0 &&
 706		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
 707			   BTRFS_FSID_SIZE) == 0 &&
 708		    memcmp(fs_devices->fsid, disk_super->fsid,
 709			   BTRFS_FSID_SIZE) != 0)
 710			return fs_devices;
 711
 712		/* Unchanged UUIDs */
 713		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
 714			   BTRFS_FSID_SIZE) == 0 &&
 715		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
 716			   BTRFS_FSID_SIZE) == 0)
 717			return fs_devices;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718	}
 719
 720	return NULL;
 
 
 
 
 
 
 
 
 
 
 721}
 722
 723static struct btrfs_fs_devices *find_fsid_reverted_metadata(
 724				struct btrfs_super_block *disk_super)
 725{
 726	struct btrfs_fs_devices *fs_devices;
 727
 728	/*
 729	 * Handle the case where the scanned device is part of an fs whose last
 730	 * metadata UUID change reverted it to the original FSID. At the same
 731	 * time * fs_devices was first created by another constitutent device
 732	 * which didn't fully observe the operation. This results in an
 733	 * btrfs_fs_devices created with metadata/fsid different AND
 734	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
 735	 * fs_devices equal to the FSID of the disk.
 736	 */
 737	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
 738		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
 739			   BTRFS_FSID_SIZE) != 0 &&
 740		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
 741			   BTRFS_FSID_SIZE) == 0 &&
 742		    fs_devices->fsid_change)
 743			return fs_devices;
 744	}
 745
 746	return NULL;
 
 747}
 
 748/*
 749 * Add new device to list of registered devices
 750 *
 751 * Returns:
 752 * device pointer which was just added or updated when successful
 753 * error pointer when failed
 
 754 */
 755static noinline struct btrfs_device *device_list_add(const char *path,
 756			   struct btrfs_super_block *disk_super,
 757			   bool *new_device_added)
 758{
 759	struct btrfs_device *device;
 760	struct btrfs_fs_devices *fs_devices = NULL;
 761	struct rcu_string *name;
 
 762	u64 found_transid = btrfs_super_generation(disk_super);
 763	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
 764	dev_t path_devt;
 765	int error;
 766	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
 767		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
 768	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
 769					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
 770
 771	error = lookup_bdev(path, &path_devt);
 772	if (error) {
 773		btrfs_err(NULL, "failed to lookup block device for path %s: %d",
 774			  path, error);
 775		return ERR_PTR(error);
 776	}
 777
 778	if (fsid_change_in_progress) {
 779		if (!has_metadata_uuid)
 780			fs_devices = find_fsid_inprogress(disk_super);
 781		else
 782			fs_devices = find_fsid_changed(disk_super);
 783	} else if (has_metadata_uuid) {
 784		fs_devices = find_fsid_with_metadata_uuid(disk_super);
 785	} else {
 786		fs_devices = find_fsid_reverted_metadata(disk_super);
 787		if (!fs_devices)
 788			fs_devices = find_fsid(disk_super->fsid, NULL);
 789	}
 790
 791
 
 792	if (!fs_devices) {
 793		if (has_metadata_uuid)
 794			fs_devices = alloc_fs_devices(disk_super->fsid,
 795						      disk_super->metadata_uuid);
 796		else
 797			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
 798
 799		if (IS_ERR(fs_devices))
 800			return ERR_CAST(fs_devices);
 801
 802		fs_devices->fsid_change = fsid_change_in_progress;
 803
 804		mutex_lock(&fs_devices->device_list_mutex);
 805		list_add(&fs_devices->fs_list, &fs_uuids);
 806
 807		device = NULL;
 808	} else {
 809		struct btrfs_dev_lookup_args args = {
 810			.devid = devid,
 811			.uuid = disk_super->dev_item.uuid,
 812		};
 813
 814		mutex_lock(&fs_devices->device_list_mutex);
 815		device = btrfs_find_device(fs_devices, &args);
 816
 817		/*
 818		 * If this disk has been pulled into an fs devices created by
 819		 * a device which had the CHANGING_FSID_V2 flag then replace the
 820		 * metadata_uuid/fsid values of the fs_devices.
 821		 */
 822		if (fs_devices->fsid_change &&
 823		    found_transid > fs_devices->latest_generation) {
 824			memcpy(fs_devices->fsid, disk_super->fsid,
 825					BTRFS_FSID_SIZE);
 826
 827			if (has_metadata_uuid)
 828				memcpy(fs_devices->metadata_uuid,
 829				       disk_super->metadata_uuid,
 830				       BTRFS_FSID_SIZE);
 831			else
 832				memcpy(fs_devices->metadata_uuid,
 833				       disk_super->fsid, BTRFS_FSID_SIZE);
 834
 835			fs_devices->fsid_change = false;
 836		}
 837	}
 838
 839	if (!device) {
 840		unsigned int nofs_flag;
 
 841
 842		if (fs_devices->opened) {
 843			btrfs_err(NULL,
 844		"device %s belongs to fsid %pU, and the fs is already mounted",
 845				  path, fs_devices->fsid);
 846			mutex_unlock(&fs_devices->device_list_mutex);
 847			return ERR_PTR(-EBUSY);
 848		}
 849
 850		nofs_flag = memalloc_nofs_save();
 851		device = btrfs_alloc_device(NULL, &devid,
 852					    disk_super->dev_item.uuid, path);
 853		memalloc_nofs_restore(nofs_flag);
 854		if (IS_ERR(device)) {
 855			mutex_unlock(&fs_devices->device_list_mutex);
 856			/* we can safely leave the fs_devices entry around */
 857			return device;
 858		}
 859
 860		device->devt = path_devt;
 
 
 
 
 
 861
 
 862		list_add_rcu(&device->dev_list, &fs_devices->devices);
 863		fs_devices->num_devices++;
 
 864
 
 865		device->fs_devices = fs_devices;
 866		*new_device_added = true;
 867
 868		if (disk_super->label[0])
 869			pr_info(
 870	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
 871				disk_super->label, devid, found_transid, path,
 872				current->comm, task_pid_nr(current));
 873		else
 874			pr_info(
 875	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
 876				disk_super->fsid, devid, found_transid, path,
 877				current->comm, task_pid_nr(current));
 878
 879	} else if (!device->name || strcmp(device->name->str, path)) {
 880		/*
 881		 * When FS is already mounted.
 882		 * 1. If you are here and if the device->name is NULL that
 883		 *    means this device was missing at time of FS mount.
 884		 * 2. If you are here and if the device->name is different
 885		 *    from 'path' that means either
 886		 *      a. The same device disappeared and reappeared with
 887		 *         different name. or
 888		 *      b. The missing-disk-which-was-replaced, has
 889		 *         reappeared now.
 890		 *
 891		 * We must allow 1 and 2a above. But 2b would be a spurious
 892		 * and unintentional.
 893		 *
 894		 * Further in case of 1 and 2a above, the disk at 'path'
 895		 * would have missed some transaction when it was away and
 896		 * in case of 2a the stale bdev has to be updated as well.
 897		 * 2b must not be allowed at all time.
 898		 */
 899
 900		/*
 901		 * For now, we do allow update to btrfs_fs_device through the
 902		 * btrfs dev scan cli after FS has been mounted.  We're still
 903		 * tracking a problem where systems fail mount by subvolume id
 904		 * when we reject replacement on a mounted FS.
 905		 */
 906		if (!fs_devices->opened && found_transid < device->generation) {
 907			/*
 908			 * That is if the FS is _not_ mounted and if you
 909			 * are here, that means there is more than one
 910			 * disk with same uuid and devid.We keep the one
 911			 * with larger generation number or the last-in if
 912			 * generation are equal.
 913			 */
 914			mutex_unlock(&fs_devices->device_list_mutex);
 915			btrfs_err(NULL,
 916"device %s already registered with a higher generation, found %llu expect %llu",
 917				  path, found_transid, device->generation);
 918			return ERR_PTR(-EEXIST);
 919		}
 920
 921		/*
 922		 * We are going to replace the device path for a given devid,
 923		 * make sure it's the same device if the device is mounted
 924		 *
 925		 * NOTE: the device->fs_info may not be reliable here so pass
 926		 * in a NULL to message helpers instead. This avoids a possible
 927		 * use-after-free when the fs_info and fs_info->sb are already
 928		 * torn down.
 929		 */
 930		if (device->bdev) {
 931			if (device->devt != path_devt) {
 932				mutex_unlock(&fs_devices->device_list_mutex);
 933				btrfs_warn_in_rcu(NULL,
 934	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
 935						  path, devid, found_transid,
 936						  current->comm,
 937						  task_pid_nr(current));
 938				return ERR_PTR(-EEXIST);
 939			}
 940			btrfs_info_in_rcu(NULL,
 941	"devid %llu device path %s changed to %s scanned by %s (%d)",
 942					  devid, btrfs_dev_name(device),
 943					  path, current->comm,
 944					  task_pid_nr(current));
 945		}
 946
 947		name = rcu_string_strdup(path, GFP_NOFS);
 948		if (!name) {
 949			mutex_unlock(&fs_devices->device_list_mutex);
 950			return ERR_PTR(-ENOMEM);
 951		}
 952		rcu_string_free(device->name);
 953		rcu_assign_pointer(device->name, name);
 954		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
 955			fs_devices->missing_devices--;
 956			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
 957		}
 958		device->devt = path_devt;
 959	}
 960
 961	/*
 962	 * Unmount does not free the btrfs_device struct but would zero
 963	 * generation along with most of the other members. So just update
 964	 * it back. We need it to pick the disk with largest generation
 965	 * (as above).
 966	 */
 967	if (!fs_devices->opened) {
 968		device->generation = found_transid;
 969		fs_devices->latest_generation = max_t(u64, found_transid,
 970						fs_devices->latest_generation);
 971	}
 
 972
 973	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
 974
 975	mutex_unlock(&fs_devices->device_list_mutex);
 976	return device;
 977}
 978
 979static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 980{
 981	struct btrfs_fs_devices *fs_devices;
 982	struct btrfs_device *device;
 983	struct btrfs_device *orig_dev;
 984	int ret = 0;
 985
 986	lockdep_assert_held(&uuid_mutex);
 987
 988	fs_devices = alloc_fs_devices(orig->fsid, NULL);
 989	if (IS_ERR(fs_devices))
 990		return fs_devices;
 991
 
 
 992	fs_devices->total_devices = orig->total_devices;
 993
 
 994	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 995		const char *dev_path = NULL;
 
 
 
 
 
 996
 997		/*
 998		 * This is ok to do without RCU read locked because we hold the
 999		 * uuid mutex so nothing we touch in here is going to disappear.
1000		 */
1001		if (orig_dev->name)
1002			dev_path = orig_dev->name->str;
1003
1004		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1005					    orig_dev->uuid, dev_path);
1006		if (IS_ERR(device)) {
1007			ret = PTR_ERR(device);
1008			goto error;
1009		}
1010
1011		if (orig_dev->zone_info) {
1012			struct btrfs_zoned_device_info *zone_info;
1013
1014			zone_info = btrfs_clone_dev_zone_info(orig_dev);
1015			if (!zone_info) {
1016				btrfs_free_device(device);
1017				ret = -ENOMEM;
1018				goto error;
1019			}
1020			device->zone_info = zone_info;
1021		}
1022
1023		list_add(&device->dev_list, &fs_devices->devices);
1024		device->fs_devices = fs_devices;
1025		fs_devices->num_devices++;
1026	}
1027	return fs_devices;
1028error:
1029	free_fs_devices(fs_devices);
1030	return ERR_PTR(ret);
1031}
1032
1033static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1034				      struct btrfs_device **latest_dev)
1035{
1036	struct btrfs_device *device, *next;
1037
 
 
 
 
 
 
1038	/* This is the initialized path, it is safe to release the devices. */
1039	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1040		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1041			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1042				      &device->dev_state) &&
1043			    !test_bit(BTRFS_DEV_STATE_MISSING,
1044				      &device->dev_state) &&
1045			    (!*latest_dev ||
1046			     device->generation > (*latest_dev)->generation)) {
1047				*latest_dev = device;
1048			}
1049			continue;
1050		}
1051
1052		/*
1053		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1054		 * in btrfs_init_dev_replace() so just continue.
1055		 */
1056		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1057			continue;
1058
 
 
 
 
 
 
 
 
1059		if (device->bdev) {
1060			blkdev_put(device->bdev, device->mode);
1061			device->bdev = NULL;
1062			fs_devices->open_devices--;
1063		}
1064		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1065			list_del_init(&device->dev_alloc_list);
1066			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1067			fs_devices->rw_devices--;
 
1068		}
1069		list_del_init(&device->dev_list);
1070		fs_devices->num_devices--;
1071		btrfs_free_device(device);
 
 
 
 
 
 
1072	}
1073
 
 
 
 
 
1074}
1075
1076/*
1077 * After we have read the system tree and know devids belonging to this
1078 * filesystem, remove the device which does not belong there.
1079 */
1080void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1081{
1082	struct btrfs_device *latest_dev = NULL;
1083	struct btrfs_fs_devices *seed_dev;
1084
1085	mutex_lock(&uuid_mutex);
1086	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1087
1088	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1089		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1090
1091	fs_devices->latest_dev = latest_dev;
 
1092
1093	mutex_unlock(&uuid_mutex);
 
1094}
1095
1096static void btrfs_close_bdev(struct btrfs_device *device)
1097{
1098	if (!device->bdev)
1099		return;
1100
1101	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1102		sync_blockdev(device->bdev);
1103		invalidate_bdev(device->bdev);
1104	}
1105
1106	blkdev_put(device->bdev, device->mode);
 
1107}
1108
1109static void btrfs_close_one_device(struct btrfs_device *device)
1110{
1111	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1112
1113	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1114	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1115		list_del_init(&device->dev_alloc_list);
1116		fs_devices->rw_devices--;
1117	}
1118
1119	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1120		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1121
1122	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1123		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1124		fs_devices->missing_devices--;
1125	}
1126
1127	btrfs_close_bdev(device);
1128	if (device->bdev) {
1129		fs_devices->open_devices--;
1130		device->bdev = NULL;
1131	}
1132	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1133	btrfs_destroy_dev_zone_info(device);
1134
1135	device->fs_info = NULL;
1136	atomic_set(&device->dev_stats_ccnt, 0);
1137	extent_io_tree_release(&device->alloc_state);
1138
1139	/*
1140	 * Reset the flush error record. We might have a transient flush error
1141	 * in this mount, and if so we aborted the current transaction and set
1142	 * the fs to an error state, guaranteeing no super blocks can be further
1143	 * committed. However that error might be transient and if we unmount the
1144	 * filesystem and mount it again, we should allow the mount to succeed
1145	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1146	 * filesystem again we still get flush errors, then we will again abort
1147	 * any transaction and set the error state, guaranteeing no commits of
1148	 * unsafe super blocks.
1149	 */
1150	device->last_flush_error = 0;
1151
1152	/* Verify the device is back in a pristine state  */
1153	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1154	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1155	ASSERT(list_empty(&device->dev_alloc_list));
1156	ASSERT(list_empty(&device->post_commit_list));
1157}
1158
1159static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1160{
1161	struct btrfs_device *device, *tmp;
1162
1163	lockdep_assert_held(&uuid_mutex);
 
 
 
 
 
1164
1165	if (--fs_devices->opened > 0)
1166		return;
1167
1168	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1169		btrfs_close_one_device(device);
 
1170
1171	WARN_ON(fs_devices->open_devices);
1172	WARN_ON(fs_devices->rw_devices);
1173	fs_devices->opened = 0;
1174	fs_devices->seeding = false;
1175	fs_devices->fs_info = NULL;
 
1176}
1177
1178void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1179{
1180	LIST_HEAD(list);
1181	struct btrfs_fs_devices *tmp;
1182
1183	mutex_lock(&uuid_mutex);
1184	close_fs_devices(fs_devices);
1185	if (!fs_devices->opened) {
1186		list_splice_init(&fs_devices->seed_list, &list);
1187
1188		/*
1189		 * If the struct btrfs_fs_devices is not assembled with any
1190		 * other device, it can be re-initialized during the next mount
1191		 * without the needing device-scan step. Therefore, it can be
1192		 * fully freed.
1193		 */
1194		if (fs_devices->num_devices == 1) {
1195			list_del(&fs_devices->fs_list);
1196			free_fs_devices(fs_devices);
1197		}
1198	}
 
1199
1200
1201	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1202		close_fs_devices(fs_devices);
1203		list_del(&fs_devices->seed_list);
1204		free_fs_devices(fs_devices);
1205	}
1206	mutex_unlock(&uuid_mutex);
 
 
 
 
 
 
1207}
1208
1209static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1210				fmode_t flags, void *holder)
1211{
 
 
 
1212	struct btrfs_device *device;
1213	struct btrfs_device *latest_dev = NULL;
1214	struct btrfs_device *tmp_device;
 
 
 
 
 
 
1215
1216	flags |= FMODE_EXCL;
1217
1218	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1219				 dev_list) {
1220		int ret;
 
 
1221
1222		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1223		if (ret == 0 &&
1224		    (!latest_dev || device->generation > latest_dev->generation)) {
1225			latest_dev = device;
1226		} else if (ret == -ENODATA) {
1227			fs_devices->num_devices--;
1228			list_del(&device->dev_list);
1229			btrfs_free_device(device);
 
 
 
 
 
 
 
 
 
 
 
1230		}
1231	}
1232	if (fs_devices->open_devices == 0)
1233		return -EINVAL;
1234
1235	fs_devices->opened = 1;
1236	fs_devices->latest_dev = latest_dev;
1237	fs_devices->total_rw_bytes = 0;
1238	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1239	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
 
1240
1241	return 0;
1242}
 
 
 
1243
1244static int devid_cmp(void *priv, const struct list_head *a,
1245		     const struct list_head *b)
1246{
1247	const struct btrfs_device *dev1, *dev2;
1248
1249	dev1 = list_entry(a, struct btrfs_device, dev_list);
1250	dev2 = list_entry(b, struct btrfs_device, dev_list);
1251
1252	if (dev1->devid < dev2->devid)
1253		return -1;
1254	else if (dev1->devid > dev2->devid)
1255		return 1;
1256	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1257}
1258
1259int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1260		       fmode_t flags, void *holder)
1261{
1262	int ret;
1263
1264	lockdep_assert_held(&uuid_mutex);
1265	/*
1266	 * The device_list_mutex cannot be taken here in case opening the
1267	 * underlying device takes further locks like open_mutex.
1268	 *
1269	 * We also don't need the lock here as this is called during mount and
1270	 * exclusion is provided by uuid_mutex
1271	 */
1272
1273	if (fs_devices->opened) {
1274		fs_devices->opened++;
1275		ret = 0;
1276	} else {
1277		list_sort(NULL, &fs_devices->devices, devid_cmp);
1278		ret = open_fs_devices(fs_devices, flags, holder);
1279	}
1280
1281	return ret;
1282}
1283
1284void btrfs_release_disk_super(struct btrfs_super_block *super)
1285{
1286	struct page *page = virt_to_page(super);
1287
1288	put_page(page);
1289}
1290
1291static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1292						       u64 bytenr, u64 bytenr_orig)
1293{
1294	struct btrfs_super_block *disk_super;
1295	struct page *page;
1296	void *p;
1297	pgoff_t index;
1298
1299	/* make sure our super fits in the device */
1300	if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1301		return ERR_PTR(-EINVAL);
1302
1303	/* make sure our super fits in the page */
1304	if (sizeof(*disk_super) > PAGE_SIZE)
1305		return ERR_PTR(-EINVAL);
1306
1307	/* make sure our super doesn't straddle pages on disk */
1308	index = bytenr >> PAGE_SHIFT;
1309	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1310		return ERR_PTR(-EINVAL);
1311
1312	/* pull in the page with our super */
1313	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1314
1315	if (IS_ERR(page))
1316		return ERR_CAST(page);
1317
1318	p = page_address(page);
1319
1320	/* align our pointer to the offset of the super block */
1321	disk_super = p + offset_in_page(bytenr);
1322
1323	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1324	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1325		btrfs_release_disk_super(p);
1326		return ERR_PTR(-EINVAL);
1327	}
1328
1329	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1330		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1331
1332	return disk_super;
1333}
1334
1335int btrfs_forget_devices(dev_t devt)
1336{
1337	int ret;
1338
1339	mutex_lock(&uuid_mutex);
1340	ret = btrfs_free_stale_devices(devt, NULL);
1341	mutex_unlock(&uuid_mutex);
1342
1343	return ret;
1344}
1345
1346/*
1347 * Look for a btrfs signature on a device. This may be called out of the mount path
1348 * and we are not allowed to call set_blocksize during the scan. The superblock
1349 * is read via pagecache
1350 */
1351struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1352					   void *holder)
1353{
1354	struct btrfs_super_block *disk_super;
1355	bool new_device_added = false;
1356	struct btrfs_device *device = NULL;
1357	struct block_device *bdev;
1358	u64 bytenr, bytenr_orig;
1359	int ret;
1360
1361	lockdep_assert_held(&uuid_mutex);
 
 
 
 
1362
1363	/*
1364	 * we would like to check all the supers, but that would make
1365	 * a btrfs mount succeed after a mkfs from a different FS.
1366	 * So, we need to add a special mount option to scan for
1367	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1368	 */
 
1369	flags |= FMODE_EXCL;
 
1370
1371	bdev = blkdev_get_by_path(path, flags, holder);
1372	if (IS_ERR(bdev))
1373		return ERR_CAST(bdev);
1374
1375	bytenr_orig = btrfs_sb_offset(0);
1376	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1377	if (ret) {
1378		device = ERR_PTR(ret);
1379		goto error_bdev_put;
1380	}
1381
1382	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1383	if (IS_ERR(disk_super)) {
1384		device = ERR_CAST(disk_super);
1385		goto error_bdev_put;
1386	}
1387
1388	device = device_list_add(path, disk_super, &new_device_added);
1389	if (!IS_ERR(device) && new_device_added)
1390		btrfs_free_stale_devices(device->devt, device);
1391
1392	btrfs_release_disk_super(disk_super);
 
 
 
1393
1394error_bdev_put:
1395	blkdev_put(bdev, flags);
 
1396
1397	return device;
1398}
1399
1400/*
1401 * Try to find a chunk that intersects [start, start + len] range and when one
1402 * such is found, record the end of it in *start
1403 */
1404static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1405				    u64 len)
1406{
1407	u64 physical_start, physical_end;
1408
1409	lockdep_assert_held(&device->fs_info->chunk_mutex);
 
1410
1411	if (!find_first_extent_bit(&device->alloc_state, *start,
1412				   &physical_start, &physical_end,
1413				   CHUNK_ALLOCATED, NULL)) {
1414
1415		if (in_range(physical_start, *start, len) ||
1416		    in_range(*start, physical_start,
1417			     physical_end - physical_start)) {
1418			*start = physical_end + 1;
1419			return true;
 
 
 
 
 
 
 
1420		}
1421	}
1422	return false;
1423}
1424
1425static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1426{
1427	switch (device->fs_devices->chunk_alloc_policy) {
1428	case BTRFS_CHUNK_ALLOC_REGULAR:
1429		return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
1430	case BTRFS_CHUNK_ALLOC_ZONED:
1431		/*
1432		 * We don't care about the starting region like regular
1433		 * allocator, because we anyway use/reserve the first two zones
1434		 * for superblock logging.
1435		 */
1436		return ALIGN(start, device->zone_info->zone_size);
1437	default:
1438		BUG();
1439	}
 
 
 
 
 
 
 
 
 
 
 
 
1440}
1441
1442static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1443					u64 *hole_start, u64 *hole_size,
1444					u64 num_bytes)
1445{
1446	u64 zone_size = device->zone_info->zone_size;
1447	u64 pos;
 
 
 
1448	int ret;
1449	bool changed = false;
 
1450
1451	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1452
1453	while (*hole_size > 0) {
1454		pos = btrfs_find_allocatable_zones(device, *hole_start,
1455						   *hole_start + *hole_size,
1456						   num_bytes);
1457		if (pos != *hole_start) {
1458			*hole_size = *hole_start + *hole_size - pos;
1459			*hole_start = pos;
1460			changed = true;
1461			if (*hole_size < num_bytes)
1462				break;
1463		}
1464
1465		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
 
 
 
1466
1467		/* Range is ensured to be empty */
1468		if (!ret)
1469			return changed;
 
 
 
 
 
 
 
 
 
1470
1471		/* Given hole range was invalid (outside of device) */
1472		if (ret == -ERANGE) {
1473			*hole_start += *hole_size;
1474			*hole_size = 0;
1475			return true;
 
 
 
 
 
 
1476		}
 
1477
1478		*hole_start += zone_size;
1479		*hole_size -= zone_size;
1480		changed = true;
1481	}
 
 
 
 
1482
1483	return changed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484}
1485
1486/*
1487 * Check if specified hole is suitable for allocation.
1488 *
1489 * @device:	the device which we have the hole
1490 * @hole_start: starting position of the hole
1491 * @hole_size:	the size of the hole
1492 * @num_bytes:	the size of the free space that we need
1493 *
1494 * This function may modify @hole_start and @hole_size to reflect the suitable
1495 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1496 */
1497static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1498				  u64 *hole_size, u64 num_bytes)
1499{
1500	bool changed = false;
1501	u64 hole_end = *hole_start + *hole_size;
1502
1503	for (;;) {
1504		/*
1505		 * Check before we set max_hole_start, otherwise we could end up
1506		 * sending back this offset anyway.
1507		 */
1508		if (contains_pending_extent(device, hole_start, *hole_size)) {
1509			if (hole_end >= *hole_start)
1510				*hole_size = hole_end - *hole_start;
1511			else
1512				*hole_size = 0;
1513			changed = true;
1514		}
1515
1516		switch (device->fs_devices->chunk_alloc_policy) {
1517		case BTRFS_CHUNK_ALLOC_REGULAR:
1518			/* No extra check */
1519			break;
1520		case BTRFS_CHUNK_ALLOC_ZONED:
1521			if (dev_extent_hole_check_zoned(device, hole_start,
1522							hole_size, num_bytes)) {
1523				changed = true;
1524				/*
1525				 * The changed hole can contain pending extent.
1526				 * Loop again to check that.
1527				 */
1528				continue;
1529			}
1530			break;
1531		default:
1532			BUG();
 
 
 
1533		}
1534
1535		break;
1536	}
1537
1538	return changed;
1539}
1540
 
1541/*
1542 * Find free space in the specified device.
1543 *
1544 * @device:	  the device which we search the free space in
1545 * @num_bytes:	  the size of the free space that we need
1546 * @search_start: the position from which to begin the search
1547 * @start:	  store the start of the free space.
1548 * @len:	  the size of the free space. that we find, or the size
1549 *		  of the max free space if we don't find suitable free space
1550 *
1551 * This does a pretty simple search, the expectation is that it is called very
1552 * infrequently and that a given device has a small number of extents.
1553 *
1554 * @start is used to store the start of the free space if we find. But if we
1555 * don't find suitable free space, it will be used to store the start position
1556 * of the max free space.
1557 *
1558 * @len is used to store the size of the free space that we find.
1559 * But if we don't find suitable free space, it is used to store the size of
1560 * the max free space.
1561 *
1562 * NOTE: This function will search *commit* root of device tree, and does extra
1563 * check to ensure dev extents are not double allocated.
1564 * This makes the function safe to allocate dev extents but may not report
1565 * correct usable device space, as device extent freed in current transaction
1566 * is not reported as available.
1567 */
1568static int find_free_dev_extent_start(struct btrfs_device *device,
1569				u64 num_bytes, u64 search_start, u64 *start,
1570				u64 *len)
1571{
1572	struct btrfs_fs_info *fs_info = device->fs_info;
1573	struct btrfs_root *root = fs_info->dev_root;
1574	struct btrfs_key key;
 
1575	struct btrfs_dev_extent *dev_extent;
1576	struct btrfs_path *path;
1577	u64 hole_size;
1578	u64 max_hole_start;
1579	u64 max_hole_size;
1580	u64 extent_end;
 
1581	u64 search_end = device->total_bytes;
1582	int ret;
1583	int slot;
1584	struct extent_buffer *l;
1585
1586	search_start = dev_extent_search_start(device, search_start);
1587
1588	WARN_ON(device->zone_info &&
1589		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
 
 
1590
1591	path = btrfs_alloc_path();
1592	if (!path)
1593		return -ENOMEM;
1594
1595	max_hole_start = search_start;
1596	max_hole_size = 0;
 
1597
1598again:
1599	if (search_start >= search_end ||
1600		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1601		ret = -ENOSPC;
1602		goto out;
1603	}
1604
1605	path->reada = READA_FORWARD;
1606	path->search_commit_root = 1;
1607	path->skip_locking = 1;
1608
1609	key.objectid = device->devid;
1610	key.offset = search_start;
1611	key.type = BTRFS_DEV_EXTENT_KEY;
1612
1613	ret = btrfs_search_backwards(root, &key, path);
1614	if (ret < 0)
1615		goto out;
 
 
 
 
 
1616
1617	while (search_start < search_end) {
1618		l = path->nodes[0];
1619		slot = path->slots[0];
1620		if (slot >= btrfs_header_nritems(l)) {
1621			ret = btrfs_next_leaf(root, path);
1622			if (ret == 0)
1623				continue;
1624			if (ret < 0)
1625				goto out;
1626
1627			break;
1628		}
1629		btrfs_item_key_to_cpu(l, &key, slot);
1630
1631		if (key.objectid < device->devid)
1632			goto next;
1633
1634		if (key.objectid > device->devid)
1635			break;
1636
1637		if (key.type != BTRFS_DEV_EXTENT_KEY)
1638			goto next;
1639
1640		if (key.offset > search_end)
1641			break;
1642
1643		if (key.offset > search_start) {
1644			hole_size = key.offset - search_start;
1645			dev_extent_hole_check(device, &search_start, &hole_size,
1646					      num_bytes);
 
 
 
 
 
 
 
1647
1648			if (hole_size > max_hole_size) {
1649				max_hole_start = search_start;
1650				max_hole_size = hole_size;
1651			}
1652
1653			/*
1654			 * If this free space is greater than which we need,
1655			 * it must be the max free space that we have found
1656			 * until now, so max_hole_start must point to the start
1657			 * of this free space and the length of this free space
1658			 * is stored in max_hole_size. Thus, we return
1659			 * max_hole_start and max_hole_size and go back to the
1660			 * caller.
1661			 */
1662			if (hole_size >= num_bytes) {
1663				ret = 0;
1664				goto out;
1665			}
1666		}
1667
1668		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1669		extent_end = key.offset + btrfs_dev_extent_length(l,
1670								  dev_extent);
1671		if (extent_end > search_start)
1672			search_start = extent_end;
1673next:
1674		path->slots[0]++;
1675		cond_resched();
1676	}
1677
1678	/*
1679	 * At this point, search_start should be the end of
1680	 * allocated dev extents, and when shrinking the device,
1681	 * search_end may be smaller than search_start.
1682	 */
1683	if (search_end > search_start) {
1684		hole_size = search_end - search_start;
1685		if (dev_extent_hole_check(device, &search_start, &hole_size,
1686					  num_bytes)) {
1687			btrfs_release_path(path);
1688			goto again;
1689		}
1690
1691		if (hole_size > max_hole_size) {
1692			max_hole_start = search_start;
1693			max_hole_size = hole_size;
1694		}
 
 
 
 
1695	}
1696
1697	/* See above. */
1698	if (max_hole_size < num_bytes)
1699		ret = -ENOSPC;
1700	else
1701		ret = 0;
1702
1703	ASSERT(max_hole_start + max_hole_size <= search_end);
1704out:
1705	btrfs_free_path(path);
1706	*start = max_hole_start;
1707	if (len)
1708		*len = max_hole_size;
1709	return ret;
1710}
1711
1712int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1713			 u64 *start, u64 *len)
1714{
1715	/* FIXME use last free of some kind */
1716	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1717}
1718
1719static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1720			  struct btrfs_device *device,
1721			  u64 start, u64 *dev_extent_len)
1722{
1723	struct btrfs_fs_info *fs_info = device->fs_info;
1724	struct btrfs_root *root = fs_info->dev_root;
1725	int ret;
1726	struct btrfs_path *path;
 
1727	struct btrfs_key key;
1728	struct btrfs_key found_key;
1729	struct extent_buffer *leaf = NULL;
1730	struct btrfs_dev_extent *extent = NULL;
1731
1732	path = btrfs_alloc_path();
1733	if (!path)
1734		return -ENOMEM;
1735
1736	key.objectid = device->devid;
1737	key.offset = start;
1738	key.type = BTRFS_DEV_EXTENT_KEY;
1739again:
1740	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1741	if (ret > 0) {
1742		ret = btrfs_previous_item(root, path, key.objectid,
1743					  BTRFS_DEV_EXTENT_KEY);
1744		if (ret)
1745			goto out;
1746		leaf = path->nodes[0];
1747		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1748		extent = btrfs_item_ptr(leaf, path->slots[0],
1749					struct btrfs_dev_extent);
1750		BUG_ON(found_key.offset > start || found_key.offset +
1751		       btrfs_dev_extent_length(leaf, extent) < start);
1752		key = found_key;
1753		btrfs_release_path(path);
1754		goto again;
1755	} else if (ret == 0) {
1756		leaf = path->nodes[0];
1757		extent = btrfs_item_ptr(leaf, path->slots[0],
1758					struct btrfs_dev_extent);
1759	} else {
 
1760		goto out;
1761	}
1762
1763	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1764
 
 
 
 
 
1765	ret = btrfs_del_item(trans, root, path);
1766	if (ret == 0)
1767		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768out:
1769	btrfs_free_path(path);
1770	return ret;
1771}
1772
1773static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1774{
1775	struct extent_map_tree *em_tree;
1776	struct extent_map *em;
1777	struct rb_node *n;
1778	u64 ret = 0;
1779
1780	em_tree = &fs_info->mapping_tree;
1781	read_lock(&em_tree->lock);
1782	n = rb_last(&em_tree->map.rb_root);
1783	if (n) {
1784		em = rb_entry(n, struct extent_map, rb_node);
1785		ret = em->start + em->len;
1786	}
1787	read_unlock(&em_tree->lock);
1788
1789	return ret;
1790}
1791
1792static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1793				    u64 *devid_ret)
1794{
1795	int ret;
1796	struct btrfs_key key;
1797	struct btrfs_key found_key;
1798	struct btrfs_path *path;
1799
1800	path = btrfs_alloc_path();
1801	if (!path)
1802		return -ENOMEM;
1803
1804	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1805	key.type = BTRFS_DEV_ITEM_KEY;
1806	key.offset = (u64)-1;
1807
1808	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1809	if (ret < 0)
1810		goto error;
1811
1812	if (ret == 0) {
1813		/* Corruption */
1814		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1815		ret = -EUCLEAN;
1816		goto error;
1817	}
1818
1819	ret = btrfs_previous_item(fs_info->chunk_root, path,
1820				  BTRFS_DEV_ITEMS_OBJECTID,
1821				  BTRFS_DEV_ITEM_KEY);
1822	if (ret) {
1823		*devid_ret = 1;
1824	} else {
1825		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1826				      path->slots[0]);
1827		*devid_ret = found_key.offset + 1;
1828	}
1829	ret = 0;
1830error:
1831	btrfs_free_path(path);
1832	return ret;
1833}
1834
1835/*
1836 * the device information is stored in the chunk root
1837 * the btrfs_device struct should be fully filled in
1838 */
1839static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
 
1840			    struct btrfs_device *device)
1841{
1842	int ret;
1843	struct btrfs_path *path;
1844	struct btrfs_dev_item *dev_item;
1845	struct extent_buffer *leaf;
1846	struct btrfs_key key;
1847	unsigned long ptr;
1848
 
 
1849	path = btrfs_alloc_path();
1850	if (!path)
1851		return -ENOMEM;
1852
1853	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1854	key.type = BTRFS_DEV_ITEM_KEY;
1855	key.offset = device->devid;
1856
1857	btrfs_reserve_chunk_metadata(trans, true);
1858	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1859				      &key, sizeof(*dev_item));
1860	btrfs_trans_release_chunk_metadata(trans);
1861	if (ret)
1862		goto out;
1863
1864	leaf = path->nodes[0];
1865	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1866
1867	btrfs_set_device_id(leaf, dev_item, device->devid);
1868	btrfs_set_device_generation(leaf, dev_item, 0);
1869	btrfs_set_device_type(leaf, dev_item, device->type);
1870	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1871	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1872	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1873	btrfs_set_device_total_bytes(leaf, dev_item,
1874				     btrfs_device_get_disk_total_bytes(device));
1875	btrfs_set_device_bytes_used(leaf, dev_item,
1876				    btrfs_device_get_bytes_used(device));
1877	btrfs_set_device_group(leaf, dev_item, 0);
1878	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1879	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1880	btrfs_set_device_start_offset(leaf, dev_item, 0);
1881
1882	ptr = btrfs_device_uuid(dev_item);
1883	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1884	ptr = btrfs_device_fsid(dev_item);
1885	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1886			    ptr, BTRFS_FSID_SIZE);
1887	btrfs_mark_buffer_dirty(leaf);
1888
1889	ret = 0;
1890out:
1891	btrfs_free_path(path);
1892	return ret;
1893}
1894
1895/*
1896 * Function to update ctime/mtime for a given device path.
1897 * Mainly used for ctime/mtime based probe like libblkid.
1898 *
1899 * We don't care about errors here, this is just to be kind to userspace.
1900 */
1901static void update_dev_time(const char *device_path)
1902{
1903	struct path path;
1904	struct timespec64 now;
1905	int ret;
1906
1907	ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1908	if (ret)
1909		return;
1910
1911	now = current_time(d_inode(path.dentry));
1912	inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1913	path_put(&path);
1914}
1915
1916static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1917			     struct btrfs_device *device)
1918{
1919	struct btrfs_root *root = device->fs_info->chunk_root;
1920	int ret;
1921	struct btrfs_path *path;
1922	struct btrfs_key key;
 
 
 
1923
1924	path = btrfs_alloc_path();
1925	if (!path)
1926		return -ENOMEM;
1927
 
 
 
 
 
1928	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1929	key.type = BTRFS_DEV_ITEM_KEY;
1930	key.offset = device->devid;
 
1931
1932	btrfs_reserve_chunk_metadata(trans, false);
1933	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1934	btrfs_trans_release_chunk_metadata(trans);
1935	if (ret) {
1936		if (ret > 0)
1937			ret = -ENOENT;
 
1938		goto out;
1939	}
1940
1941	ret = btrfs_del_item(trans, root, path);
 
 
1942out:
1943	btrfs_free_path(path);
 
 
1944	return ret;
1945}
1946
1947/*
1948 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1949 * filesystem. It's up to the caller to adjust that number regarding eg. device
1950 * replace.
1951 */
1952static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1953		u64 num_devices)
1954{
 
 
 
 
 
 
1955	u64 all_avail;
 
 
 
1956	unsigned seq;
1957	int i;
1958
1959	do {
1960		seq = read_seqbegin(&fs_info->profiles_lock);
1961
1962		all_avail = fs_info->avail_data_alloc_bits |
1963			    fs_info->avail_system_alloc_bits |
1964			    fs_info->avail_metadata_alloc_bits;
1965	} while (read_seqretry(&fs_info->profiles_lock, seq));
1966
1967	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1968		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1969			continue;
1970
1971		if (num_devices < btrfs_raid_array[i].devs_min)
1972			return btrfs_raid_array[i].mindev_error;
 
 
 
 
 
 
 
 
1973	}
 
1974
1975	return 0;
1976}
1977
1978static struct btrfs_device * btrfs_find_next_active_device(
1979		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1980{
1981	struct btrfs_device *next_device;
1982
1983	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1984		if (next_device != device &&
1985		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1986		    && next_device->bdev)
1987			return next_device;
1988	}
1989
1990	return NULL;
1991}
1992
1993/*
1994 * Helper function to check if the given device is part of s_bdev / latest_dev
1995 * and replace it with the provided or the next active device, in the context
1996 * where this function called, there should be always be another device (or
1997 * this_dev) which is active.
1998 */
1999void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2000					    struct btrfs_device *next_device)
2001{
2002	struct btrfs_fs_info *fs_info = device->fs_info;
2003
2004	if (!next_device)
2005		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2006							    device);
2007	ASSERT(next_device);
2008
2009	if (fs_info->sb->s_bdev &&
2010			(fs_info->sb->s_bdev == device->bdev))
2011		fs_info->sb->s_bdev = next_device->bdev;
2012
2013	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2014		fs_info->fs_devices->latest_dev = next_device;
2015}
2016
2017/*
2018 * Return btrfs_fs_devices::num_devices excluding the device that's being
2019 * currently replaced.
2020 */
2021static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2022{
2023	u64 num_devices = fs_info->fs_devices->num_devices;
2024
2025	down_read(&fs_info->dev_replace.rwsem);
2026	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2027		ASSERT(num_devices > 1);
2028		num_devices--;
2029	}
2030	up_read(&fs_info->dev_replace.rwsem);
2031
2032	return num_devices;
2033}
2034
2035static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
2036				     struct block_device *bdev, int copy_num)
2037{
2038	struct btrfs_super_block *disk_super;
2039	const size_t len = sizeof(disk_super->magic);
2040	const u64 bytenr = btrfs_sb_offset(copy_num);
2041	int ret;
2042
2043	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
2044	if (IS_ERR(disk_super))
2045		return;
2046
2047	memset(&disk_super->magic, 0, len);
2048	folio_mark_dirty(virt_to_folio(disk_super));
2049	btrfs_release_disk_super(disk_super);
2050
2051	ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
2052	if (ret)
2053		btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
2054			copy_num, ret);
2055}
2056
2057void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2058			       struct block_device *bdev,
2059			       const char *device_path)
2060{
2061	int copy_num;
2062
2063	if (!bdev)
2064		return;
2065
2066	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2067		if (bdev_is_zoned(bdev))
2068			btrfs_reset_sb_log_zones(bdev, copy_num);
2069		else
2070			btrfs_scratch_superblock(fs_info, bdev, copy_num);
2071	}
2072
2073	/* Notify udev that device has changed */
2074	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2075
2076	/* Update ctime/mtime for device path for libblkid */
2077	update_dev_time(device_path);
2078}
2079
2080int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2081		    struct btrfs_dev_lookup_args *args,
2082		    struct block_device **bdev, fmode_t *mode)
2083{
2084	struct btrfs_trans_handle *trans;
2085	struct btrfs_device *device;
2086	struct btrfs_fs_devices *cur_devices;
2087	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2088	u64 num_devices;
2089	int ret = 0;
2090
2091	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2092		btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2093		return -EINVAL;
2094	}
2095
2096	/*
2097	 * The device list in fs_devices is accessed without locks (neither
2098	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2099	 * filesystem and another device rm cannot run.
2100	 */
2101	num_devices = btrfs_num_devices(fs_info);
2102
2103	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2104	if (ret)
2105		return ret;
2106
2107	device = btrfs_find_device(fs_info->fs_devices, args);
2108	if (!device) {
2109		if (args->missing)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2110			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2111		else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112			ret = -ENOENT;
2113		return ret;
 
2114	}
2115
2116	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2117		btrfs_warn_in_rcu(fs_info,
2118		  "cannot remove device %s (devid %llu) due to active swapfile",
2119				  btrfs_dev_name(device), device->devid);
2120		return -ETXTBSY;
2121	}
2122
2123	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2124		return BTRFS_ERROR_DEV_TGT_REPLACE;
2125
2126	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2127	    fs_info->fs_devices->rw_devices == 1)
2128		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2129
2130	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2131		mutex_lock(&fs_info->chunk_mutex);
 
 
 
 
 
2132		list_del_init(&device->dev_alloc_list);
2133		device->fs_devices->rw_devices--;
2134		mutex_unlock(&fs_info->chunk_mutex);
 
2135	}
2136
 
2137	ret = btrfs_shrink_device(device, 0);
 
2138	if (ret)
2139		goto error_undo;
2140
2141	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2142	if (IS_ERR(trans)) {
2143		ret = PTR_ERR(trans);
 
 
 
 
2144		goto error_undo;
2145	}
2146
2147	ret = btrfs_rm_dev_item(trans, device);
2148	if (ret) {
2149		/* Any error in dev item removal is critical */
2150		btrfs_crit(fs_info,
2151			   "failed to remove device item for devid %llu: %d",
2152			   device->devid, ret);
2153		btrfs_abort_transaction(trans, ret);
2154		btrfs_end_transaction(trans);
2155		return ret;
2156	}
2157
2158	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2159	btrfs_scrub_cancel_dev(device);
2160
2161	/*
2162	 * the device list mutex makes sure that we don't change
2163	 * the device list while someone else is writing out all
2164	 * the device supers. Whoever is writing all supers, should
2165	 * lock the device list mutex before getting the number of
2166	 * devices in the super block (super_copy). Conversely,
2167	 * whoever updates the number of devices in the super block
2168	 * (super_copy) should hold the device list mutex.
2169	 */
2170
2171	/*
2172	 * In normal cases the cur_devices == fs_devices. But in case
2173	 * of deleting a seed device, the cur_devices should point to
2174	 * its own fs_devices listed under the fs_devices->seed_list.
2175	 */
2176	cur_devices = device->fs_devices;
2177	mutex_lock(&fs_devices->device_list_mutex);
2178	list_del_rcu(&device->dev_list);
2179
2180	cur_devices->num_devices--;
2181	cur_devices->total_devices--;
2182	/* Update total_devices of the parent fs_devices if it's seed */
2183	if (cur_devices != fs_devices)
2184		fs_devices->total_devices--;
2185
2186	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2187		cur_devices->missing_devices--;
2188
2189	btrfs_assign_next_active_device(device, NULL);
 
 
 
 
 
2190
2191	if (device->bdev) {
2192		cur_devices->open_devices--;
2193		/* remove sysfs entry */
2194		btrfs_sysfs_remove_device(device);
2195	}
2196
2197	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2198	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2199	mutex_unlock(&fs_devices->device_list_mutex);
2200
2201	/*
2202	 * At this point, the device is zero sized and detached from the
2203	 * devices list.  All that's left is to zero out the old supers and
2204	 * free the device.
2205	 *
2206	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2207	 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2208	 * block device and it's dependencies.  Instead just flush the device
2209	 * and let the caller do the final blkdev_put.
2210	 */
2211	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2212		btrfs_scratch_superblocks(fs_info, device->bdev,
2213					  device->name->str);
2214		if (device->bdev) {
2215			sync_blockdev(device->bdev);
2216			invalidate_bdev(device->bdev);
2217		}
 
 
 
 
 
 
2218	}
2219
2220	*bdev = device->bdev;
2221	*mode = device->mode;
2222	synchronize_rcu();
2223	btrfs_free_device(device);
2224
2225	/*
2226	 * This can happen if cur_devices is the private seed devices list.  We
2227	 * cannot call close_fs_devices() here because it expects the uuid_mutex
2228	 * to be held, but in fact we don't need that for the private
2229	 * seed_devices, we can simply decrement cur_devices->opened and then
2230	 * remove it from our list and free the fs_devices.
2231	 */
2232	if (cur_devices->num_devices == 0) {
2233		list_del_init(&cur_devices->seed_list);
2234		ASSERT(cur_devices->opened == 1);
2235		cur_devices->opened--;
2236		free_fs_devices(cur_devices);
 
 
2237	}
2238
2239	ret = btrfs_commit_transaction(trans);
2240
2241	return ret;
 
 
2242
 
 
 
 
 
 
 
2243error_undo:
2244	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2245		mutex_lock(&fs_info->chunk_mutex);
2246		list_add(&device->dev_alloc_list,
2247			 &fs_devices->alloc_list);
2248		device->fs_devices->rw_devices++;
2249		mutex_unlock(&fs_info->chunk_mutex);
2250	}
2251	return ret;
2252}
2253
2254void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
 
2255{
2256	struct btrfs_fs_devices *fs_devices;
2257
2258	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2259
2260	/*
2261	 * in case of fs with no seed, srcdev->fs_devices will point
2262	 * to fs_devices of fs_info. However when the dev being replaced is
2263	 * a seed dev it will point to the seed's local fs_devices. In short
2264	 * srcdev will have its correct fs_devices in both the cases.
2265	 */
2266	fs_devices = srcdev->fs_devices;
2267
2268	list_del_rcu(&srcdev->dev_list);
2269	list_del(&srcdev->dev_alloc_list);
2270	fs_devices->num_devices--;
2271	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2272		fs_devices->missing_devices--;
 
 
 
 
 
 
2273
2274	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2275		fs_devices->rw_devices--;
 
2276
2277	if (srcdev->bdev)
2278		fs_devices->open_devices--;
2279}
2280
2281void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
 
2282{
2283	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2284
2285	mutex_lock(&uuid_mutex);
2286
2287	btrfs_close_bdev(srcdev);
2288	synchronize_rcu();
2289	btrfs_free_device(srcdev);
2290
2291	/* if this is no devs we rather delete the fs_devices */
2292	if (!fs_devices->num_devices) {
2293		/*
2294		 * On a mounted FS, num_devices can't be zero unless it's a
2295		 * seed. In case of a seed device being replaced, the replace
2296		 * target added to the sprout FS, so there will be no more
2297		 * device left under the seed FS.
2298		 */
2299		ASSERT(fs_devices->seeding);
2300
2301		list_del_init(&fs_devices->seed_list);
2302		close_fs_devices(fs_devices);
2303		free_fs_devices(fs_devices);
 
 
2304	}
2305	mutex_unlock(&uuid_mutex);
2306}
2307
2308void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2309{
2310	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2311
2312	mutex_lock(&fs_devices->device_list_mutex);
2313
2314	btrfs_sysfs_remove_device(tgtdev);
2315
2316	if (tgtdev->bdev)
2317		fs_devices->open_devices--;
2318
2319	fs_devices->num_devices--;
2320
2321	btrfs_assign_next_active_device(tgtdev, NULL);
2322
 
 
 
 
 
 
2323	list_del_rcu(&tgtdev->dev_list);
2324
2325	mutex_unlock(&fs_devices->device_list_mutex);
2326
2327	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2328				  tgtdev->name->str);
2329
2330	btrfs_close_bdev(tgtdev);
2331	synchronize_rcu();
2332	btrfs_free_device(tgtdev);
2333}
2334
2335/*
2336 * Populate args from device at path.
2337 *
2338 * @fs_info:	the filesystem
2339 * @args:	the args to populate
2340 * @path:	the path to the device
2341 *
2342 * This will read the super block of the device at @path and populate @args with
2343 * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2344 * lookup a device to operate on, but need to do it before we take any locks.
2345 * This properly handles the special case of "missing" that a user may pass in,
2346 * and does some basic sanity checks.  The caller must make sure that @path is
2347 * properly NUL terminated before calling in, and must call
2348 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2349 * uuid buffers.
2350 *
2351 * Return: 0 for success, -errno for failure
2352 */
2353int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2354				 struct btrfs_dev_lookup_args *args,
2355				 const char *path)
2356{
 
2357	struct btrfs_super_block *disk_super;
 
 
2358	struct block_device *bdev;
2359	int ret;
2360
2361	if (!path || !path[0])
2362		return -EINVAL;
2363	if (!strcmp(path, "missing")) {
2364		args->missing = true;
2365		return 0;
2366	}
2367
2368	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2369	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2370	if (!args->uuid || !args->fsid) {
2371		btrfs_put_dev_args_from_path(args);
2372		return -ENOMEM;
2373	}
2374
2375	ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2376				    &bdev, &disk_super);
2377	if (ret) {
2378		btrfs_put_dev_args_from_path(args);
2379		return ret;
2380	}
2381
2382	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2383	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2384	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2385		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2386	else
2387		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2388	btrfs_release_disk_super(disk_super);
2389	blkdev_put(bdev, FMODE_READ);
2390	return 0;
2391}
2392
2393/*
2394 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2395 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2396 * that don't need to be freed.
2397 */
2398void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2399{
2400	kfree(args->uuid);
2401	kfree(args->fsid);
2402	args->uuid = NULL;
2403	args->fsid = NULL;
2404}
2405
2406struct btrfs_device *btrfs_find_device_by_devspec(
2407		struct btrfs_fs_info *fs_info, u64 devid,
2408		const char *device_path)
2409{
2410	BTRFS_DEV_LOOKUP_ARGS(args);
2411	struct btrfs_device *device;
2412	int ret;
 
 
 
 
2413
2414	if (devid) {
2415		args.devid = devid;
2416		device = btrfs_find_device(fs_info->fs_devices, &args);
2417		if (!device)
2418			return ERR_PTR(-ENOENT);
2419		return device;
2420	}
2421
2422	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2423	if (ret)
2424		return ERR_PTR(ret);
2425	device = btrfs_find_device(fs_info->fs_devices, &args);
2426	btrfs_put_dev_args_from_path(&args);
2427	if (!device)
2428		return ERR_PTR(-ENOENT);
2429	return device;
2430}
2431
2432static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
 
 
 
2433{
2434	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2435	struct btrfs_fs_devices *old_devices;
2436	struct btrfs_fs_devices *seed_devices;
 
 
 
2437
2438	lockdep_assert_held(&uuid_mutex);
2439	if (!fs_devices->seeding)
2440		return ERR_PTR(-EINVAL);
2441
2442	/*
2443	 * Private copy of the seed devices, anchored at
2444	 * fs_info->fs_devices->seed_list
2445	 */
2446	seed_devices = alloc_fs_devices(NULL, NULL);
2447	if (IS_ERR(seed_devices))
2448		return seed_devices;
2449
2450	/*
2451	 * It's necessary to retain a copy of the original seed fs_devices in
2452	 * fs_uuids so that filesystems which have been seeded can successfully
2453	 * reference the seed device from open_seed_devices. This also supports
2454	 * multiple fs seed.
2455	 */
2456	old_devices = clone_fs_devices(fs_devices);
2457	if (IS_ERR(old_devices)) {
2458		kfree(seed_devices);
2459		return old_devices;
2460	}
2461
2462	list_add(&old_devices->fs_list, &fs_uuids);
2463
2464	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2465	seed_devices->opened = 1;
2466	INIT_LIST_HEAD(&seed_devices->devices);
2467	INIT_LIST_HEAD(&seed_devices->alloc_list);
2468	mutex_init(&seed_devices->device_list_mutex);
2469
2470	return seed_devices;
2471}
2472
2473/*
2474 * Splice seed devices into the sprout fs_devices.
2475 * Generate a new fsid for the sprouted read-write filesystem.
2476 */
2477static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2478			       struct btrfs_fs_devices *seed_devices)
2479{
2480	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2481	struct btrfs_super_block *disk_super = fs_info->super_copy;
2482	struct btrfs_device *device;
2483	u64 super_flags;
2484
2485	/*
2486	 * We are updating the fsid, the thread leading to device_list_add()
2487	 * could race, so uuid_mutex is needed.
2488	 */
2489	lockdep_assert_held(&uuid_mutex);
2490
2491	/*
2492	 * The threads listed below may traverse dev_list but can do that without
2493	 * device_list_mutex:
2494	 * - All device ops and balance - as we are in btrfs_exclop_start.
2495	 * - Various dev_list readers - are using RCU.
2496	 * - btrfs_ioctl_fitrim() - is using RCU.
2497	 *
2498	 * For-read threads as below are using device_list_mutex:
2499	 * - Readonly scrub btrfs_scrub_dev()
2500	 * - Readonly scrub btrfs_scrub_progress()
2501	 * - btrfs_get_dev_stats()
2502	 */
2503	lockdep_assert_held(&fs_devices->device_list_mutex);
2504
2505	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2506			      synchronize_rcu);
2507	list_for_each_entry(device, &seed_devices->devices, dev_list)
 
 
2508		device->fs_devices = seed_devices;
 
2509
2510	fs_devices->seeding = false;
2511	fs_devices->num_devices = 0;
2512	fs_devices->open_devices = 0;
2513	fs_devices->missing_devices = 0;
2514	fs_devices->rotating = false;
2515	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2516
2517	generate_random_uuid(fs_devices->fsid);
2518	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2519	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
 
2520
2521	super_flags = btrfs_super_flags(disk_super) &
2522		      ~BTRFS_SUPER_FLAG_SEEDING;
2523	btrfs_set_super_flags(disk_super, super_flags);
 
 
2524}
2525
2526/*
2527 * Store the expected generation for seed devices in device items.
2528 */
2529static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
 
2530{
2531	BTRFS_DEV_LOOKUP_ARGS(args);
2532	struct btrfs_fs_info *fs_info = trans->fs_info;
2533	struct btrfs_root *root = fs_info->chunk_root;
2534	struct btrfs_path *path;
2535	struct extent_buffer *leaf;
2536	struct btrfs_dev_item *dev_item;
2537	struct btrfs_device *device;
2538	struct btrfs_key key;
2539	u8 fs_uuid[BTRFS_FSID_SIZE];
2540	u8 dev_uuid[BTRFS_UUID_SIZE];
 
2541	int ret;
2542
2543	path = btrfs_alloc_path();
2544	if (!path)
2545		return -ENOMEM;
2546
 
2547	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2548	key.offset = 0;
2549	key.type = BTRFS_DEV_ITEM_KEY;
2550
2551	while (1) {
2552		btrfs_reserve_chunk_metadata(trans, false);
2553		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2554		btrfs_trans_release_chunk_metadata(trans);
2555		if (ret < 0)
2556			goto error;
2557
2558		leaf = path->nodes[0];
2559next_slot:
2560		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2561			ret = btrfs_next_leaf(root, path);
2562			if (ret > 0)
2563				break;
2564			if (ret < 0)
2565				goto error;
2566			leaf = path->nodes[0];
2567			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2568			btrfs_release_path(path);
2569			continue;
2570		}
2571
2572		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2573		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2574		    key.type != BTRFS_DEV_ITEM_KEY)
2575			break;
2576
2577		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2578					  struct btrfs_dev_item);
2579		args.devid = btrfs_device_id(leaf, dev_item);
2580		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2581				   BTRFS_UUID_SIZE);
2582		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2583				   BTRFS_FSID_SIZE);
2584		args.uuid = dev_uuid;
2585		args.fsid = fs_uuid;
2586		device = btrfs_find_device(fs_info->fs_devices, &args);
2587		BUG_ON(!device); /* Logic error */
2588
2589		if (device->fs_devices->seeding) {
2590			btrfs_set_device_generation(leaf, dev_item,
2591						    device->generation);
2592			btrfs_mark_buffer_dirty(leaf);
2593		}
2594
2595		path->slots[0]++;
2596		goto next_slot;
2597	}
2598	ret = 0;
2599error:
2600	btrfs_free_path(path);
2601	return ret;
2602}
2603
2604int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2605{
2606	struct btrfs_root *root = fs_info->dev_root;
2607	struct btrfs_trans_handle *trans;
2608	struct btrfs_device *device;
2609	struct block_device *bdev;
2610	struct super_block *sb = fs_info->sb;
2611	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2612	struct btrfs_fs_devices *seed_devices;
2613	u64 orig_super_total_bytes;
2614	u64 orig_super_num_devices;
2615	int ret = 0;
2616	bool seeding_dev = false;
2617	bool locked = false;
2618
2619	if (sb_rdonly(sb) && !fs_devices->seeding)
2620		return -EROFS;
2621
2622	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2623				  fs_info->bdev_holder);
2624	if (IS_ERR(bdev))
2625		return PTR_ERR(bdev);
2626
2627	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2628		ret = -EINVAL;
2629		goto error;
2630	}
2631
2632	if (fs_devices->seeding) {
2633		seeding_dev = true;
2634		down_write(&sb->s_umount);
2635		mutex_lock(&uuid_mutex);
2636		locked = true;
2637	}
2638
2639	sync_blockdev(bdev);
2640
2641	rcu_read_lock();
2642	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
 
 
2643		if (device->bdev == bdev) {
2644			ret = -EEXIST;
2645			rcu_read_unlock();
 
2646			goto error;
2647		}
2648	}
2649	rcu_read_unlock();
2650
2651	device = btrfs_alloc_device(fs_info, NULL, NULL, device_path);
2652	if (IS_ERR(device)) {
2653		/* we can safely leave the fs_devices entry around */
2654		ret = PTR_ERR(device);
2655		goto error;
2656	}
2657
2658	device->fs_info = fs_info;
2659	device->bdev = bdev;
2660	ret = lookup_bdev(device_path, &device->devt);
2661	if (ret)
2662		goto error_free_device;
2663
2664	ret = btrfs_get_dev_zone_info(device, false);
2665	if (ret)
2666		goto error_free_device;
2667
2668	trans = btrfs_start_transaction(root, 0);
2669	if (IS_ERR(trans)) {
 
 
2670		ret = PTR_ERR(trans);
2671		goto error_free_zone;
2672	}
2673
2674	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
 
 
 
 
 
2675	device->generation = trans->transid;
2676	device->io_width = fs_info->sectorsize;
2677	device->io_align = fs_info->sectorsize;
2678	device->sector_size = fs_info->sectorsize;
2679	device->total_bytes =
2680		round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
2681	device->disk_total_bytes = device->total_bytes;
2682	device->commit_total_bytes = device->total_bytes;
2683	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2684	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
 
2685	device->mode = FMODE_EXCL;
2686	device->dev_stats_valid = 1;
2687	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2688
2689	if (seeding_dev) {
2690		btrfs_clear_sb_rdonly(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2691
2692		/* GFP_KERNEL allocation must not be under device_list_mutex */
2693		seed_devices = btrfs_init_sprout(fs_info);
2694		if (IS_ERR(seed_devices)) {
2695			ret = PTR_ERR(seed_devices);
2696			btrfs_abort_transaction(trans, ret);
 
 
 
 
 
 
 
 
 
 
2697			goto error_trans;
2698		}
2699	}
2700
2701	mutex_lock(&fs_devices->device_list_mutex);
2702	if (seeding_dev) {
2703		btrfs_setup_sprout(fs_info, seed_devices);
2704		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2705						device);
2706	}
2707
2708	device->fs_devices = fs_devices;
2709
2710	mutex_lock(&fs_info->chunk_mutex);
2711	list_add_rcu(&device->dev_list, &fs_devices->devices);
2712	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2713	fs_devices->num_devices++;
2714	fs_devices->open_devices++;
2715	fs_devices->rw_devices++;
2716	fs_devices->total_devices++;
2717	fs_devices->total_rw_bytes += device->total_bytes;
2718
2719	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2720
2721	if (!bdev_nonrot(bdev))
2722		fs_devices->rotating = true;
2723
2724	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2725	btrfs_set_super_total_bytes(fs_info->super_copy,
2726		round_down(orig_super_total_bytes + device->total_bytes,
2727			   fs_info->sectorsize));
2728
2729	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2730	btrfs_set_super_num_devices(fs_info->super_copy,
2731				    orig_super_num_devices + 1);
2732
2733	/*
2734	 * we've got more storage, clear any full flags on the space
2735	 * infos
2736	 */
2737	btrfs_clear_space_info_full(fs_info);
2738
2739	mutex_unlock(&fs_info->chunk_mutex);
2740
2741	/* Add sysfs device entry */
2742	btrfs_sysfs_add_device(device);
2743
2744	mutex_unlock(&fs_devices->device_list_mutex);
2745
2746	if (seeding_dev) {
2747		mutex_lock(&fs_info->chunk_mutex);
2748		ret = init_first_rw_device(trans);
2749		mutex_unlock(&fs_info->chunk_mutex);
2750		if (ret) {
2751			btrfs_abort_transaction(trans, ret);
2752			goto error_sysfs;
2753		}
2754	}
2755
2756	ret = btrfs_add_dev_item(trans, device);
2757	if (ret) {
2758		btrfs_abort_transaction(trans, ret);
2759		goto error_sysfs;
2760	}
2761
2762	if (seeding_dev) {
2763		ret = btrfs_finish_sprout(trans);
2764		if (ret) {
2765			btrfs_abort_transaction(trans, ret);
2766			goto error_sysfs;
2767		}
2768
2769		/*
2770		 * fs_devices now represents the newly sprouted filesystem and
2771		 * its fsid has been changed by btrfs_sprout_splice().
2772		 */
2773		btrfs_sysfs_update_sprout_fsid(fs_devices);
2774	}
2775
2776	ret = btrfs_commit_transaction(trans);
2777
2778	if (seeding_dev) {
2779		mutex_unlock(&uuid_mutex);
2780		up_write(&sb->s_umount);
2781		locked = false;
2782
2783		if (ret) /* transaction commit */
2784			return ret;
2785
2786		ret = btrfs_relocate_sys_chunks(fs_info);
2787		if (ret < 0)
2788			btrfs_handle_fs_error(fs_info, ret,
2789				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
 
 
2790		trans = btrfs_attach_transaction(root);
2791		if (IS_ERR(trans)) {
2792			if (PTR_ERR(trans) == -ENOENT)
2793				return 0;
2794			ret = PTR_ERR(trans);
2795			trans = NULL;
2796			goto error_sysfs;
2797		}
2798		ret = btrfs_commit_transaction(trans);
2799	}
2800
2801	/*
2802	 * Now that we have written a new super block to this device, check all
2803	 * other fs_devices list if device_path alienates any other scanned
2804	 * device.
2805	 * We can ignore the return value as it typically returns -EINVAL and
2806	 * only succeeds if the device was an alien.
2807	 */
2808	btrfs_forget_devices(device->devt);
2809
2810	/* Update ctime/mtime for blkid or udev */
2811	update_dev_time(device_path);
2812
2813	return ret;
2814
2815error_sysfs:
2816	btrfs_sysfs_remove_device(device);
2817	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2818	mutex_lock(&fs_info->chunk_mutex);
2819	list_del_rcu(&device->dev_list);
2820	list_del(&device->dev_alloc_list);
2821	fs_info->fs_devices->num_devices--;
2822	fs_info->fs_devices->open_devices--;
2823	fs_info->fs_devices->rw_devices--;
2824	fs_info->fs_devices->total_devices--;
2825	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2826	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2827	btrfs_set_super_total_bytes(fs_info->super_copy,
2828				    orig_super_total_bytes);
2829	btrfs_set_super_num_devices(fs_info->super_copy,
2830				    orig_super_num_devices);
2831	mutex_unlock(&fs_info->chunk_mutex);
2832	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2833error_trans:
2834	if (seeding_dev)
2835		btrfs_set_sb_rdonly(sb);
2836	if (trans)
2837		btrfs_end_transaction(trans);
2838error_free_zone:
2839	btrfs_destroy_dev_zone_info(device);
2840error_free_device:
2841	btrfs_free_device(device);
2842error:
2843	blkdev_put(bdev, FMODE_EXCL);
2844	if (locked) {
2845		mutex_unlock(&uuid_mutex);
2846		up_write(&sb->s_umount);
2847	}
2848	return ret;
2849}
2850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2851static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2852					struct btrfs_device *device)
2853{
2854	int ret;
2855	struct btrfs_path *path;
2856	struct btrfs_root *root = device->fs_info->chunk_root;
2857	struct btrfs_dev_item *dev_item;
2858	struct extent_buffer *leaf;
2859	struct btrfs_key key;
2860
 
 
2861	path = btrfs_alloc_path();
2862	if (!path)
2863		return -ENOMEM;
2864
2865	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2866	key.type = BTRFS_DEV_ITEM_KEY;
2867	key.offset = device->devid;
2868
2869	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2870	if (ret < 0)
2871		goto out;
2872
2873	if (ret > 0) {
2874		ret = -ENOENT;
2875		goto out;
2876	}
2877
2878	leaf = path->nodes[0];
2879	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2880
2881	btrfs_set_device_id(leaf, dev_item, device->devid);
2882	btrfs_set_device_type(leaf, dev_item, device->type);
2883	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2884	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2885	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2886	btrfs_set_device_total_bytes(leaf, dev_item,
2887				     btrfs_device_get_disk_total_bytes(device));
2888	btrfs_set_device_bytes_used(leaf, dev_item,
2889				    btrfs_device_get_bytes_used(device));
2890	btrfs_mark_buffer_dirty(leaf);
2891
2892out:
2893	btrfs_free_path(path);
2894	return ret;
2895}
2896
2897int btrfs_grow_device(struct btrfs_trans_handle *trans,
2898		      struct btrfs_device *device, u64 new_size)
2899{
2900	struct btrfs_fs_info *fs_info = device->fs_info;
2901	struct btrfs_super_block *super_copy = fs_info->super_copy;
2902	u64 old_total;
2903	u64 diff;
2904	int ret;
2905
2906	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2907		return -EACCES;
2908
2909	new_size = round_down(new_size, fs_info->sectorsize);
2910
2911	mutex_lock(&fs_info->chunk_mutex);
2912	old_total = btrfs_super_total_bytes(super_copy);
2913	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2914
2915	if (new_size <= device->total_bytes ||
2916	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2917		mutex_unlock(&fs_info->chunk_mutex);
2918		return -EINVAL;
2919	}
2920
2921	btrfs_set_super_total_bytes(super_copy,
2922			round_down(old_total + diff, fs_info->sectorsize));
2923	device->fs_devices->total_rw_bytes += diff;
2924
2925	btrfs_device_set_total_bytes(device, new_size);
2926	btrfs_device_set_disk_total_bytes(device, new_size);
2927	btrfs_clear_space_info_full(device->fs_info);
2928	if (list_empty(&device->post_commit_list))
2929		list_add_tail(&device->post_commit_list,
2930			      &trans->transaction->dev_update_list);
2931	mutex_unlock(&fs_info->chunk_mutex);
2932
2933	btrfs_reserve_chunk_metadata(trans, false);
2934	ret = btrfs_update_device(trans, device);
2935	btrfs_trans_release_chunk_metadata(trans);
2936
 
 
 
 
 
 
 
2937	return ret;
2938}
2939
2940static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
 
 
 
2941{
2942	struct btrfs_fs_info *fs_info = trans->fs_info;
2943	struct btrfs_root *root = fs_info->chunk_root;
2944	int ret;
2945	struct btrfs_path *path;
2946	struct btrfs_key key;
2947
 
2948	path = btrfs_alloc_path();
2949	if (!path)
2950		return -ENOMEM;
2951
2952	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2953	key.offset = chunk_offset;
2954	key.type = BTRFS_CHUNK_ITEM_KEY;
2955
2956	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2957	if (ret < 0)
2958		goto out;
2959	else if (ret > 0) { /* Logic error or corruption */
2960		btrfs_handle_fs_error(fs_info, -ENOENT,
2961				      "Failed lookup while freeing chunk.");
2962		ret = -ENOENT;
2963		goto out;
2964	}
2965
2966	ret = btrfs_del_item(trans, root, path);
2967	if (ret < 0)
2968		btrfs_handle_fs_error(fs_info, ret,
2969				      "Failed to delete chunk item.");
2970out:
2971	btrfs_free_path(path);
2972	return ret;
2973}
2974
2975static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
 
2976{
2977	struct btrfs_super_block *super_copy = fs_info->super_copy;
2978	struct btrfs_disk_key *disk_key;
2979	struct btrfs_chunk *chunk;
2980	u8 *ptr;
2981	int ret = 0;
2982	u32 num_stripes;
2983	u32 array_size;
2984	u32 len = 0;
2985	u32 cur;
2986	struct btrfs_key key;
2987
2988	lockdep_assert_held(&fs_info->chunk_mutex);
2989	array_size = btrfs_super_sys_array_size(super_copy);
2990
2991	ptr = super_copy->sys_chunk_array;
2992	cur = 0;
2993
2994	while (cur < array_size) {
2995		disk_key = (struct btrfs_disk_key *)ptr;
2996		btrfs_disk_key_to_cpu(&key, disk_key);
2997
2998		len = sizeof(*disk_key);
2999
3000		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3001			chunk = (struct btrfs_chunk *)(ptr + len);
3002			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3003			len += btrfs_chunk_item_size(num_stripes);
3004		} else {
3005			ret = -EIO;
3006			break;
3007		}
3008		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3009		    key.offset == chunk_offset) {
3010			memmove(ptr, ptr + len, array_size - (cur + len));
3011			array_size -= len;
3012			btrfs_set_super_sys_array_size(super_copy, array_size);
3013		} else {
3014			ptr += len;
3015			cur += len;
3016		}
3017	}
3018	return ret;
3019}
3020
3021/*
3022 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3023 * @logical: Logical block offset in bytes.
3024 * @length: Length of extent in bytes.
3025 *
3026 * Return: Chunk mapping or ERR_PTR.
3027 */
3028struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3029				       u64 logical, u64 length)
3030{
3031	struct extent_map_tree *em_tree;
 
 
3032	struct extent_map *em;
3033
3034	em_tree = &fs_info->mapping_tree;
3035	read_lock(&em_tree->lock);
3036	em = lookup_extent_mapping(em_tree, logical, length);
3037	read_unlock(&em_tree->lock);
3038
3039	if (!em) {
3040		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
3041			   logical, length);
3042		return ERR_PTR(-EINVAL);
3043	}
3044
3045	if (em->start > logical || em->start + em->len < logical) {
3046		btrfs_crit(fs_info,
3047			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3048			   logical, length, em->start, em->start + em->len);
3049		free_extent_map(em);
3050		return ERR_PTR(-EINVAL);
3051	}
3052
3053	/* callers are responsible for dropping em's ref. */
3054	return em;
3055}
3056
3057static int remove_chunk_item(struct btrfs_trans_handle *trans,
3058			     struct map_lookup *map, u64 chunk_offset)
3059{
3060	int i;
3061
3062	/*
3063	 * Removing chunk items and updating the device items in the chunks btree
3064	 * requires holding the chunk_mutex.
3065	 * See the comment at btrfs_chunk_alloc() for the details.
3066	 */
3067	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3068
3069	for (i = 0; i < map->num_stripes; i++) {
3070		int ret;
3071
3072		ret = btrfs_update_device(trans, map->stripes[i].dev);
3073		if (ret)
3074			return ret;
3075	}
3076
3077	return btrfs_free_chunk(trans, chunk_offset);
3078}
 
3079
3080int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3081{
3082	struct btrfs_fs_info *fs_info = trans->fs_info;
3083	struct extent_map *em;
3084	struct map_lookup *map;
3085	u64 dev_extent_len = 0;
3086	int i, ret = 0;
3087	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3088
3089	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3090	if (IS_ERR(em)) {
3091		/*
3092		 * This is a logic error, but we don't want to just rely on the
3093		 * user having built with ASSERT enabled, so if ASSERT doesn't
3094		 * do anything we still error out.
3095		 */
3096		ASSERT(0);
3097		return PTR_ERR(em);
3098	}
3099	map = em->map_lookup;
3100
3101	/*
3102	 * First delete the device extent items from the devices btree.
3103	 * We take the device_list_mutex to avoid racing with the finishing phase
3104	 * of a device replace operation. See the comment below before acquiring
3105	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3106	 * because that can result in a deadlock when deleting the device extent
3107	 * items from the devices btree - COWing an extent buffer from the btree
3108	 * may result in allocating a new metadata chunk, which would attempt to
3109	 * lock again fs_info->chunk_mutex.
3110	 */
3111	mutex_lock(&fs_devices->device_list_mutex);
3112	for (i = 0; i < map->num_stripes; i++) {
3113		struct btrfs_device *device = map->stripes[i].dev;
3114		ret = btrfs_free_dev_extent(trans, device,
3115					    map->stripes[i].physical,
3116					    &dev_extent_len);
3117		if (ret) {
3118			mutex_unlock(&fs_devices->device_list_mutex);
3119			btrfs_abort_transaction(trans, ret);
3120			goto out;
3121		}
3122
3123		if (device->bytes_used > 0) {
3124			mutex_lock(&fs_info->chunk_mutex);
3125			btrfs_device_set_bytes_used(device,
3126					device->bytes_used - dev_extent_len);
3127			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3128			btrfs_clear_space_info_full(fs_info);
3129			mutex_unlock(&fs_info->chunk_mutex);
3130		}
3131	}
3132	mutex_unlock(&fs_devices->device_list_mutex);
3133
3134	/*
3135	 * We acquire fs_info->chunk_mutex for 2 reasons:
3136	 *
3137	 * 1) Just like with the first phase of the chunk allocation, we must
3138	 *    reserve system space, do all chunk btree updates and deletions, and
3139	 *    update the system chunk array in the superblock while holding this
3140	 *    mutex. This is for similar reasons as explained on the comment at
3141	 *    the top of btrfs_chunk_alloc();
3142	 *
3143	 * 2) Prevent races with the final phase of a device replace operation
3144	 *    that replaces the device object associated with the map's stripes,
3145	 *    because the device object's id can change at any time during that
3146	 *    final phase of the device replace operation
3147	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3148	 *    replaced device and then see it with an ID of
3149	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3150	 *    the device item, which does not exists on the chunk btree.
3151	 *    The finishing phase of device replace acquires both the
3152	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3153	 *    safe by just acquiring the chunk_mutex.
3154	 */
3155	trans->removing_chunk = true;
3156	mutex_lock(&fs_info->chunk_mutex);
3157
3158	check_system_chunk(trans, map->type);
3159
3160	ret = remove_chunk_item(trans, map, chunk_offset);
3161	/*
3162	 * Normally we should not get -ENOSPC since we reserved space before
3163	 * through the call to check_system_chunk().
3164	 *
3165	 * Despite our system space_info having enough free space, we may not
3166	 * be able to allocate extents from its block groups, because all have
3167	 * an incompatible profile, which will force us to allocate a new system
3168	 * block group with the right profile, or right after we called
3169	 * check_system_space() above, a scrub turned the only system block group
3170	 * with enough free space into RO mode.
3171	 * This is explained with more detail at do_chunk_alloc().
3172	 *
3173	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3174	 */
3175	if (ret == -ENOSPC) {
3176		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3177		struct btrfs_block_group *sys_bg;
3178
3179		sys_bg = btrfs_create_chunk(trans, sys_flags);
3180		if (IS_ERR(sys_bg)) {
3181			ret = PTR_ERR(sys_bg);
3182			btrfs_abort_transaction(trans, ret);
3183			goto out;
3184		}
3185
3186		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3187		if (ret) {
3188			btrfs_abort_transaction(trans, ret);
3189			goto out;
3190		}
3191
3192		ret = remove_chunk_item(trans, map, chunk_offset);
3193		if (ret) {
3194			btrfs_abort_transaction(trans, ret);
3195			goto out;
3196		}
3197	} else if (ret) {
3198		btrfs_abort_transaction(trans, ret);
3199		goto out;
3200	}
 
 
 
 
3201
3202	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3203
3204	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3205		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3206		if (ret) {
3207			btrfs_abort_transaction(trans, ret);
3208			goto out;
3209		}
3210	}
3211
3212	mutex_unlock(&fs_info->chunk_mutex);
3213	trans->removing_chunk = false;
3214
3215	/*
3216	 * We are done with chunk btree updates and deletions, so release the
3217	 * system space we previously reserved (with check_system_chunk()).
3218	 */
3219	btrfs_trans_release_chunk_metadata(trans);
3220
3221	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3222	if (ret) {
3223		btrfs_abort_transaction(trans, ret);
3224		goto out;
3225	}
3226
3227out:
3228	if (trans->removing_chunk) {
3229		mutex_unlock(&fs_info->chunk_mutex);
3230		trans->removing_chunk = false;
3231	}
3232	/* once for us */
3233	free_extent_map(em);
3234	return ret;
3235}
3236
3237int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3238{
3239	struct btrfs_root *root = fs_info->chunk_root;
3240	struct btrfs_trans_handle *trans;
3241	struct btrfs_block_group *block_group;
3242	u64 length;
3243	int ret;
3244
3245	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3246		btrfs_err(fs_info,
3247			  "relocate: not supported on extent tree v2 yet");
3248		return -EINVAL;
3249	}
3250
3251	/*
3252	 * Prevent races with automatic removal of unused block groups.
3253	 * After we relocate and before we remove the chunk with offset
3254	 * chunk_offset, automatic removal of the block group can kick in,
3255	 * resulting in a failure when calling btrfs_remove_chunk() below.
3256	 *
3257	 * Make sure to acquire this mutex before doing a tree search (dev
3258	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3259	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3260	 * we release the path used to search the chunk/dev tree and before
3261	 * the current task acquires this mutex and calls us.
3262	 */
3263	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3264
3265	/* step one, relocate all the extents inside this chunk */
3266	btrfs_scrub_pause(fs_info);
3267	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3268	btrfs_scrub_continue(fs_info);
3269	if (ret)
3270		return ret;
3271
3272	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3273	if (!block_group)
3274		return -ENOENT;
3275	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3276	length = block_group->length;
3277	btrfs_put_block_group(block_group);
3278
3279	/*
3280	 * On a zoned file system, discard the whole block group, this will
3281	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3282	 * resetting the zone fails, don't treat it as a fatal problem from the
3283	 * filesystem's point of view.
3284	 */
3285	if (btrfs_is_zoned(fs_info)) {
3286		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3287		if (ret)
3288			btrfs_info(fs_info,
3289				"failed to reset zone %llu after relocation",
3290				chunk_offset);
3291	}
3292
3293	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3294						     chunk_offset);
3295	if (IS_ERR(trans)) {
3296		ret = PTR_ERR(trans);
3297		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3298		return ret;
3299	}
3300
3301	/*
3302	 * step two, delete the device extents and the
3303	 * chunk tree entries
3304	 */
3305	ret = btrfs_remove_chunk(trans, chunk_offset);
3306	btrfs_end_transaction(trans);
3307	return ret;
3308}
3309
3310static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3311{
3312	struct btrfs_root *chunk_root = fs_info->chunk_root;
3313	struct btrfs_path *path;
3314	struct extent_buffer *leaf;
3315	struct btrfs_chunk *chunk;
3316	struct btrfs_key key;
3317	struct btrfs_key found_key;
 
3318	u64 chunk_type;
3319	bool retried = false;
3320	int failed = 0;
3321	int ret;
3322
3323	path = btrfs_alloc_path();
3324	if (!path)
3325		return -ENOMEM;
3326
3327again:
3328	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3329	key.offset = (u64)-1;
3330	key.type = BTRFS_CHUNK_ITEM_KEY;
3331
3332	while (1) {
3333		mutex_lock(&fs_info->reclaim_bgs_lock);
3334		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3335		if (ret < 0) {
3336			mutex_unlock(&fs_info->reclaim_bgs_lock);
3337			goto error;
3338		}
3339		BUG_ON(ret == 0); /* Corruption */
3340
3341		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3342					  key.type);
3343		if (ret)
3344			mutex_unlock(&fs_info->reclaim_bgs_lock);
3345		if (ret < 0)
3346			goto error;
3347		if (ret > 0)
3348			break;
3349
3350		leaf = path->nodes[0];
3351		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3352
3353		chunk = btrfs_item_ptr(leaf, path->slots[0],
3354				       struct btrfs_chunk);
3355		chunk_type = btrfs_chunk_type(leaf, chunk);
3356		btrfs_release_path(path);
3357
3358		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3359			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
 
 
3360			if (ret == -ENOSPC)
3361				failed++;
3362			else
3363				BUG_ON(ret);
3364		}
3365		mutex_unlock(&fs_info->reclaim_bgs_lock);
3366
3367		if (found_key.offset == 0)
3368			break;
3369		key.offset = found_key.offset - 1;
3370	}
3371	ret = 0;
3372	if (failed && !retried) {
3373		failed = 0;
3374		retried = true;
3375		goto again;
3376	} else if (WARN_ON(failed && retried)) {
3377		ret = -ENOSPC;
3378	}
3379error:
3380	btrfs_free_path(path);
3381	return ret;
3382}
3383
3384/*
3385 * return 1 : allocate a data chunk successfully,
3386 * return <0: errors during allocating a data chunk,
3387 * return 0 : no need to allocate a data chunk.
3388 */
3389static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3390				      u64 chunk_offset)
3391{
3392	struct btrfs_block_group *cache;
3393	u64 bytes_used;
3394	u64 chunk_type;
3395
3396	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3397	ASSERT(cache);
3398	chunk_type = cache->flags;
3399	btrfs_put_block_group(cache);
3400
3401	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3402		return 0;
3403
3404	spin_lock(&fs_info->data_sinfo->lock);
3405	bytes_used = fs_info->data_sinfo->bytes_used;
3406	spin_unlock(&fs_info->data_sinfo->lock);
3407
3408	if (!bytes_used) {
3409		struct btrfs_trans_handle *trans;
3410		int ret;
3411
3412		trans =	btrfs_join_transaction(fs_info->tree_root);
3413		if (IS_ERR(trans))
3414			return PTR_ERR(trans);
3415
3416		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3417		btrfs_end_transaction(trans);
3418		if (ret < 0)
3419			return ret;
3420		return 1;
3421	}
3422
3423	return 0;
3424}
3425
3426static int insert_balance_item(struct btrfs_fs_info *fs_info,
3427			       struct btrfs_balance_control *bctl)
3428{
3429	struct btrfs_root *root = fs_info->tree_root;
3430	struct btrfs_trans_handle *trans;
3431	struct btrfs_balance_item *item;
3432	struct btrfs_disk_balance_args disk_bargs;
3433	struct btrfs_path *path;
3434	struct extent_buffer *leaf;
3435	struct btrfs_key key;
3436	int ret, err;
3437
3438	path = btrfs_alloc_path();
3439	if (!path)
3440		return -ENOMEM;
3441
3442	trans = btrfs_start_transaction(root, 0);
3443	if (IS_ERR(trans)) {
3444		btrfs_free_path(path);
3445		return PTR_ERR(trans);
3446	}
3447
3448	key.objectid = BTRFS_BALANCE_OBJECTID;
3449	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3450	key.offset = 0;
3451
3452	ret = btrfs_insert_empty_item(trans, root, path, &key,
3453				      sizeof(*item));
3454	if (ret)
3455		goto out;
3456
3457	leaf = path->nodes[0];
3458	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3459
3460	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3461
3462	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3463	btrfs_set_balance_data(leaf, item, &disk_bargs);
3464	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3465	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3466	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3467	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3468
3469	btrfs_set_balance_flags(leaf, item, bctl->flags);
3470
3471	btrfs_mark_buffer_dirty(leaf);
3472out:
3473	btrfs_free_path(path);
3474	err = btrfs_commit_transaction(trans);
3475	if (err && !ret)
3476		ret = err;
3477	return ret;
3478}
3479
3480static int del_balance_item(struct btrfs_fs_info *fs_info)
3481{
3482	struct btrfs_root *root = fs_info->tree_root;
3483	struct btrfs_trans_handle *trans;
3484	struct btrfs_path *path;
3485	struct btrfs_key key;
3486	int ret, err;
3487
3488	path = btrfs_alloc_path();
3489	if (!path)
3490		return -ENOMEM;
3491
3492	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3493	if (IS_ERR(trans)) {
3494		btrfs_free_path(path);
3495		return PTR_ERR(trans);
3496	}
3497
3498	key.objectid = BTRFS_BALANCE_OBJECTID;
3499	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3500	key.offset = 0;
3501
3502	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3503	if (ret < 0)
3504		goto out;
3505	if (ret > 0) {
3506		ret = -ENOENT;
3507		goto out;
3508	}
3509
3510	ret = btrfs_del_item(trans, root, path);
3511out:
3512	btrfs_free_path(path);
3513	err = btrfs_commit_transaction(trans);
3514	if (err && !ret)
3515		ret = err;
3516	return ret;
3517}
3518
3519/*
3520 * This is a heuristic used to reduce the number of chunks balanced on
3521 * resume after balance was interrupted.
3522 */
3523static void update_balance_args(struct btrfs_balance_control *bctl)
3524{
3525	/*
3526	 * Turn on soft mode for chunk types that were being converted.
3527	 */
3528	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3529		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3530	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3531		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3532	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3533		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3534
3535	/*
3536	 * Turn on usage filter if is not already used.  The idea is
3537	 * that chunks that we have already balanced should be
3538	 * reasonably full.  Don't do it for chunks that are being
3539	 * converted - that will keep us from relocating unconverted
3540	 * (albeit full) chunks.
3541	 */
3542	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3543	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3544	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3545		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3546		bctl->data.usage = 90;
3547	}
3548	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3549	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3550	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3551		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3552		bctl->sys.usage = 90;
3553	}
3554	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3555	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3556	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3557		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3558		bctl->meta.usage = 90;
3559	}
3560}
3561
3562/*
3563 * Clear the balance status in fs_info and delete the balance item from disk.
 
 
3564 */
3565static void reset_balance_state(struct btrfs_fs_info *fs_info)
 
 
 
 
 
 
 
 
 
 
 
3566{
3567	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3568	int ret;
3569
3570	BUG_ON(!fs_info->balance_ctl);
3571
3572	spin_lock(&fs_info->balance_lock);
3573	fs_info->balance_ctl = NULL;
3574	spin_unlock(&fs_info->balance_lock);
3575
3576	kfree(bctl);
3577	ret = del_balance_item(fs_info);
3578	if (ret)
3579		btrfs_handle_fs_error(fs_info, ret, NULL);
3580}
3581
3582/*
3583 * Balance filters.  Return 1 if chunk should be filtered out
3584 * (should not be balanced).
3585 */
3586static int chunk_profiles_filter(u64 chunk_type,
3587				 struct btrfs_balance_args *bargs)
3588{
3589	chunk_type = chunk_to_extended(chunk_type) &
3590				BTRFS_EXTENDED_PROFILE_MASK;
3591
3592	if (bargs->profiles & chunk_type)
3593		return 0;
3594
3595	return 1;
3596}
3597
3598static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3599			      struct btrfs_balance_args *bargs)
3600{
3601	struct btrfs_block_group *cache;
3602	u64 chunk_used;
3603	u64 user_thresh_min;
3604	u64 user_thresh_max;
3605	int ret = 1;
3606
3607	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3608	chunk_used = cache->used;
3609
3610	if (bargs->usage_min == 0)
3611		user_thresh_min = 0;
3612	else
3613		user_thresh_min = mult_perc(cache->length, bargs->usage_min);
3614
3615	if (bargs->usage_max == 0)
3616		user_thresh_max = 1;
3617	else if (bargs->usage_max > 100)
3618		user_thresh_max = cache->length;
3619	else
3620		user_thresh_max = mult_perc(cache->length, bargs->usage_max);
3621
3622	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3623		ret = 0;
3624
3625	btrfs_put_block_group(cache);
3626	return ret;
3627}
3628
3629static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3630		u64 chunk_offset, struct btrfs_balance_args *bargs)
3631{
3632	struct btrfs_block_group *cache;
3633	u64 chunk_used, user_thresh;
3634	int ret = 1;
3635
3636	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3637	chunk_used = cache->used;
3638
3639	if (bargs->usage_min == 0)
3640		user_thresh = 1;
3641	else if (bargs->usage > 100)
3642		user_thresh = cache->length;
3643	else
3644		user_thresh = mult_perc(cache->length, bargs->usage);
 
3645
3646	if (chunk_used < user_thresh)
3647		ret = 0;
3648
3649	btrfs_put_block_group(cache);
3650	return ret;
3651}
3652
3653static int chunk_devid_filter(struct extent_buffer *leaf,
3654			      struct btrfs_chunk *chunk,
3655			      struct btrfs_balance_args *bargs)
3656{
3657	struct btrfs_stripe *stripe;
3658	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3659	int i;
3660
3661	for (i = 0; i < num_stripes; i++) {
3662		stripe = btrfs_stripe_nr(chunk, i);
3663		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3664			return 0;
3665	}
3666
3667	return 1;
3668}
3669
3670static u64 calc_data_stripes(u64 type, int num_stripes)
3671{
3672	const int index = btrfs_bg_flags_to_raid_index(type);
3673	const int ncopies = btrfs_raid_array[index].ncopies;
3674	const int nparity = btrfs_raid_array[index].nparity;
3675
3676	return (num_stripes - nparity) / ncopies;
3677}
3678
3679/* [pstart, pend) */
3680static int chunk_drange_filter(struct extent_buffer *leaf,
3681			       struct btrfs_chunk *chunk,
 
3682			       struct btrfs_balance_args *bargs)
3683{
3684	struct btrfs_stripe *stripe;
3685	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3686	u64 stripe_offset;
3687	u64 stripe_length;
3688	u64 type;
3689	int factor;
3690	int i;
3691
3692	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3693		return 0;
3694
3695	type = btrfs_chunk_type(leaf, chunk);
3696	factor = calc_data_stripes(type, num_stripes);
 
 
 
 
 
 
 
 
3697
3698	for (i = 0; i < num_stripes; i++) {
3699		stripe = btrfs_stripe_nr(chunk, i);
3700		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3701			continue;
3702
3703		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3704		stripe_length = btrfs_chunk_length(leaf, chunk);
3705		stripe_length = div_u64(stripe_length, factor);
3706
3707		if (stripe_offset < bargs->pend &&
3708		    stripe_offset + stripe_length > bargs->pstart)
3709			return 0;
3710	}
3711
3712	return 1;
3713}
3714
3715/* [vstart, vend) */
3716static int chunk_vrange_filter(struct extent_buffer *leaf,
3717			       struct btrfs_chunk *chunk,
3718			       u64 chunk_offset,
3719			       struct btrfs_balance_args *bargs)
3720{
3721	if (chunk_offset < bargs->vend &&
3722	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3723		/* at least part of the chunk is inside this vrange */
3724		return 0;
3725
3726	return 1;
3727}
3728
3729static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3730			       struct btrfs_chunk *chunk,
3731			       struct btrfs_balance_args *bargs)
3732{
3733	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3734
3735	if (bargs->stripes_min <= num_stripes
3736			&& num_stripes <= bargs->stripes_max)
3737		return 0;
3738
3739	return 1;
3740}
3741
3742static int chunk_soft_convert_filter(u64 chunk_type,
3743				     struct btrfs_balance_args *bargs)
3744{
3745	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3746		return 0;
3747
3748	chunk_type = chunk_to_extended(chunk_type) &
3749				BTRFS_EXTENDED_PROFILE_MASK;
3750
3751	if (bargs->target == chunk_type)
3752		return 1;
3753
3754	return 0;
3755}
3756
3757static int should_balance_chunk(struct extent_buffer *leaf,
 
3758				struct btrfs_chunk *chunk, u64 chunk_offset)
3759{
3760	struct btrfs_fs_info *fs_info = leaf->fs_info;
3761	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3762	struct btrfs_balance_args *bargs = NULL;
3763	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3764
3765	/* type filter */
3766	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3767	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3768		return 0;
3769	}
3770
3771	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3772		bargs = &bctl->data;
3773	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3774		bargs = &bctl->sys;
3775	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3776		bargs = &bctl->meta;
3777
3778	/* profiles filter */
3779	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3780	    chunk_profiles_filter(chunk_type, bargs)) {
3781		return 0;
3782	}
3783
3784	/* usage filter */
3785	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3786	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3787		return 0;
3788	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3789	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3790		return 0;
3791	}
3792
3793	/* devid filter */
3794	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3795	    chunk_devid_filter(leaf, chunk, bargs)) {
3796		return 0;
3797	}
3798
3799	/* drange filter, makes sense only with devid filter */
3800	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3801	    chunk_drange_filter(leaf, chunk, bargs)) {
3802		return 0;
3803	}
3804
3805	/* vrange filter */
3806	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3807	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3808		return 0;
3809	}
3810
3811	/* stripes filter */
3812	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3813	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3814		return 0;
3815	}
3816
3817	/* soft profile changing mode */
3818	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3819	    chunk_soft_convert_filter(chunk_type, bargs)) {
3820		return 0;
3821	}
3822
3823	/*
3824	 * limited by count, must be the last filter
3825	 */
3826	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3827		if (bargs->limit == 0)
3828			return 0;
3829		else
3830			bargs->limit--;
3831	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3832		/*
3833		 * Same logic as the 'limit' filter; the minimum cannot be
3834		 * determined here because we do not have the global information
3835		 * about the count of all chunks that satisfy the filters.
3836		 */
3837		if (bargs->limit_max == 0)
3838			return 0;
3839		else
3840			bargs->limit_max--;
3841	}
3842
3843	return 1;
3844}
3845
3846static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3847{
3848	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3849	struct btrfs_root *chunk_root = fs_info->chunk_root;
3850	u64 chunk_type;
 
 
 
 
3851	struct btrfs_chunk *chunk;
3852	struct btrfs_path *path = NULL;
3853	struct btrfs_key key;
3854	struct btrfs_key found_key;
 
3855	struct extent_buffer *leaf;
3856	int slot;
3857	int ret;
3858	int enospc_errors = 0;
3859	bool counting = true;
3860	/* The single value limit and min/max limits use the same bytes in the */
3861	u64 limit_data = bctl->data.limit;
3862	u64 limit_meta = bctl->meta.limit;
3863	u64 limit_sys = bctl->sys.limit;
3864	u32 count_data = 0;
3865	u32 count_meta = 0;
3866	u32 count_sys = 0;
3867	int chunk_reserved = 0;
3868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3869	path = btrfs_alloc_path();
3870	if (!path) {
3871		ret = -ENOMEM;
3872		goto error;
3873	}
3874
3875	/* zero out stat counters */
3876	spin_lock(&fs_info->balance_lock);
3877	memset(&bctl->stat, 0, sizeof(bctl->stat));
3878	spin_unlock(&fs_info->balance_lock);
3879again:
3880	if (!counting) {
3881		/*
3882		 * The single value limit and min/max limits use the same bytes
3883		 * in the
3884		 */
3885		bctl->data.limit = limit_data;
3886		bctl->meta.limit = limit_meta;
3887		bctl->sys.limit = limit_sys;
3888	}
3889	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3890	key.offset = (u64)-1;
3891	key.type = BTRFS_CHUNK_ITEM_KEY;
3892
3893	while (1) {
3894		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3895		    atomic_read(&fs_info->balance_cancel_req)) {
3896			ret = -ECANCELED;
3897			goto error;
3898		}
3899
3900		mutex_lock(&fs_info->reclaim_bgs_lock);
3901		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3902		if (ret < 0) {
3903			mutex_unlock(&fs_info->reclaim_bgs_lock);
3904			goto error;
3905		}
3906
3907		/*
3908		 * this shouldn't happen, it means the last relocate
3909		 * failed
3910		 */
3911		if (ret == 0)
3912			BUG(); /* FIXME break ? */
3913
3914		ret = btrfs_previous_item(chunk_root, path, 0,
3915					  BTRFS_CHUNK_ITEM_KEY);
3916		if (ret) {
3917			mutex_unlock(&fs_info->reclaim_bgs_lock);
3918			ret = 0;
3919			break;
3920		}
3921
3922		leaf = path->nodes[0];
3923		slot = path->slots[0];
3924		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3925
3926		if (found_key.objectid != key.objectid) {
3927			mutex_unlock(&fs_info->reclaim_bgs_lock);
3928			break;
3929		}
3930
3931		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3932		chunk_type = btrfs_chunk_type(leaf, chunk);
3933
3934		if (!counting) {
3935			spin_lock(&fs_info->balance_lock);
3936			bctl->stat.considered++;
3937			spin_unlock(&fs_info->balance_lock);
3938		}
3939
3940		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3941
3942		btrfs_release_path(path);
3943		if (!ret) {
3944			mutex_unlock(&fs_info->reclaim_bgs_lock);
3945			goto loop;
3946		}
3947
3948		if (counting) {
3949			mutex_unlock(&fs_info->reclaim_bgs_lock);
3950			spin_lock(&fs_info->balance_lock);
3951			bctl->stat.expected++;
3952			spin_unlock(&fs_info->balance_lock);
3953
3954			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3955				count_data++;
3956			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3957				count_sys++;
3958			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3959				count_meta++;
3960
3961			goto loop;
3962		}
3963
3964		/*
3965		 * Apply limit_min filter, no need to check if the LIMITS
3966		 * filter is used, limit_min is 0 by default
3967		 */
3968		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3969					count_data < bctl->data.limit_min)
3970				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3971					count_meta < bctl->meta.limit_min)
3972				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3973					count_sys < bctl->sys.limit_min)) {
3974			mutex_unlock(&fs_info->reclaim_bgs_lock);
3975			goto loop;
3976		}
3977
3978		if (!chunk_reserved) {
3979			/*
3980			 * We may be relocating the only data chunk we have,
3981			 * which could potentially end up with losing data's
3982			 * raid profile, so lets allocate an empty one in
3983			 * advance.
3984			 */
3985			ret = btrfs_may_alloc_data_chunk(fs_info,
3986							 found_key.offset);
3987			if (ret < 0) {
3988				mutex_unlock(&fs_info->reclaim_bgs_lock);
3989				goto error;
3990			} else if (ret == 1) {
3991				chunk_reserved = 1;
3992			}
3993		}
3994
3995		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3996		mutex_unlock(&fs_info->reclaim_bgs_lock);
3997		if (ret == -ENOSPC) {
3998			enospc_errors++;
3999		} else if (ret == -ETXTBSY) {
4000			btrfs_info(fs_info,
4001	   "skipping relocation of block group %llu due to active swapfile",
4002				   found_key.offset);
4003			ret = 0;
4004		} else if (ret) {
4005			goto error;
4006		} else {
4007			spin_lock(&fs_info->balance_lock);
4008			bctl->stat.completed++;
4009			spin_unlock(&fs_info->balance_lock);
4010		}
4011loop:
4012		if (found_key.offset == 0)
4013			break;
4014		key.offset = found_key.offset - 1;
4015	}
4016
4017	if (counting) {
4018		btrfs_release_path(path);
4019		counting = false;
4020		goto again;
4021	}
4022error:
4023	btrfs_free_path(path);
4024	if (enospc_errors) {
4025		btrfs_info(fs_info, "%d enospc errors during balance",
4026			   enospc_errors);
4027		if (!ret)
4028			ret = -ENOSPC;
4029	}
4030
4031	return ret;
4032}
4033
4034/*
4035 * See if a given profile is valid and reduced.
4036 *
4037 * @flags:     profile to validate
4038 * @extended:  if true @flags is treated as an extended profile
4039 */
4040static int alloc_profile_is_valid(u64 flags, int extended)
4041{
4042	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4043			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
4044
4045	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4046
4047	/* 1) check that all other bits are zeroed */
4048	if (flags & ~mask)
4049		return 0;
4050
4051	/* 2) see if profile is reduced */
4052	if (flags == 0)
4053		return !extended; /* "0" is valid for usual profiles */
4054
4055	return has_single_bit_set(flags);
 
4056}
4057
4058static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4059{
4060	/* cancel requested || normal exit path */
4061	return atomic_read(&fs_info->balance_cancel_req) ||
4062		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4063		 atomic_read(&fs_info->balance_cancel_req) == 0);
4064}
4065
4066/*
4067 * Validate target profile against allowed profiles and return true if it's OK.
4068 * Otherwise print the error message and return false.
4069 */
4070static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4071		const struct btrfs_balance_args *bargs,
4072		u64 allowed, const char *type)
4073{
4074	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4075		return true;
4076
4077	/* Profile is valid and does not have bits outside of the allowed set */
4078	if (alloc_profile_is_valid(bargs->target, 1) &&
4079	    (bargs->target & ~allowed) == 0)
4080		return true;
4081
4082	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4083			type, btrfs_bg_type_to_raid_name(bargs->target));
4084	return false;
4085}
4086
4087/*
4088 * Fill @buf with textual description of balance filter flags @bargs, up to
4089 * @size_buf including the terminating null. The output may be trimmed if it
4090 * does not fit into the provided buffer.
4091 */
4092static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4093				 u32 size_buf)
4094{
4095	int ret;
4096	u32 size_bp = size_buf;
4097	char *bp = buf;
4098	u64 flags = bargs->flags;
4099	char tmp_buf[128] = {'\0'};
4100
4101	if (!flags)
4102		return;
4103
4104#define CHECK_APPEND_NOARG(a)						\
4105	do {								\
4106		ret = snprintf(bp, size_bp, (a));			\
4107		if (ret < 0 || ret >= size_bp)				\
4108			goto out_overflow;				\
4109		size_bp -= ret;						\
4110		bp += ret;						\
4111	} while (0)
4112
4113#define CHECK_APPEND_1ARG(a, v1)					\
4114	do {								\
4115		ret = snprintf(bp, size_bp, (a), (v1));			\
4116		if (ret < 0 || ret >= size_bp)				\
4117			goto out_overflow;				\
4118		size_bp -= ret;						\
4119		bp += ret;						\
4120	} while (0)
4121
4122#define CHECK_APPEND_2ARG(a, v1, v2)					\
4123	do {								\
4124		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4125		if (ret < 0 || ret >= size_bp)				\
4126			goto out_overflow;				\
4127		size_bp -= ret;						\
4128		bp += ret;						\
4129	} while (0)
4130
4131	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4132		CHECK_APPEND_1ARG("convert=%s,",
4133				  btrfs_bg_type_to_raid_name(bargs->target));
4134
4135	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4136		CHECK_APPEND_NOARG("soft,");
4137
4138	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4139		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4140					    sizeof(tmp_buf));
4141		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4142	}
4143
4144	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4145		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4146
4147	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4148		CHECK_APPEND_2ARG("usage=%u..%u,",
4149				  bargs->usage_min, bargs->usage_max);
4150
4151	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4152		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4153
4154	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4155		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4156				  bargs->pstart, bargs->pend);
4157
4158	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4159		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4160				  bargs->vstart, bargs->vend);
4161
4162	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4163		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4164
4165	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4166		CHECK_APPEND_2ARG("limit=%u..%u,",
4167				bargs->limit_min, bargs->limit_max);
4168
4169	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4170		CHECK_APPEND_2ARG("stripes=%u..%u,",
4171				  bargs->stripes_min, bargs->stripes_max);
4172
4173#undef CHECK_APPEND_2ARG
4174#undef CHECK_APPEND_1ARG
4175#undef CHECK_APPEND_NOARG
4176
4177out_overflow:
4178
4179	if (size_bp < size_buf)
4180		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4181	else
4182		buf[0] = '\0';
4183}
4184
4185static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4186{
4187	u32 size_buf = 1024;
4188	char tmp_buf[192] = {'\0'};
4189	char *buf;
4190	char *bp;
4191	u32 size_bp = size_buf;
4192	int ret;
4193	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4194
4195	buf = kzalloc(size_buf, GFP_KERNEL);
4196	if (!buf)
4197		return;
4198
4199	bp = buf;
4200
4201#define CHECK_APPEND_1ARG(a, v1)					\
4202	do {								\
4203		ret = snprintf(bp, size_bp, (a), (v1));			\
4204		if (ret < 0 || ret >= size_bp)				\
4205			goto out_overflow;				\
4206		size_bp -= ret;						\
4207		bp += ret;						\
4208	} while (0)
4209
4210	if (bctl->flags & BTRFS_BALANCE_FORCE)
4211		CHECK_APPEND_1ARG("%s", "-f ");
4212
4213	if (bctl->flags & BTRFS_BALANCE_DATA) {
4214		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4215		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4216	}
4217
4218	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4219		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4220		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4221	}
4222
4223	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4224		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4225		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4226	}
4227
4228#undef CHECK_APPEND_1ARG
4229
4230out_overflow:
4231
4232	if (size_bp < size_buf)
4233		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4234	btrfs_info(fs_info, "balance: %s %s",
4235		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4236		   "resume" : "start", buf);
4237
4238	kfree(buf);
4239}
4240
4241/*
4242 * Should be called with balance mutexe held
4243 */
4244int btrfs_balance(struct btrfs_fs_info *fs_info,
4245		  struct btrfs_balance_control *bctl,
4246		  struct btrfs_ioctl_balance_args *bargs)
4247{
4248	u64 meta_target, data_target;
4249	u64 allowed;
4250	int mixed = 0;
4251	int ret;
4252	u64 num_devices;
4253	unsigned seq;
4254	bool reducing_redundancy;
4255	int i;
4256
4257	if (btrfs_fs_closing(fs_info) ||
4258	    atomic_read(&fs_info->balance_pause_req) ||
4259	    btrfs_should_cancel_balance(fs_info)) {
4260		ret = -EINVAL;
4261		goto out;
4262	}
4263
4264	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4265	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4266		mixed = 1;
4267
4268	/*
4269	 * In case of mixed groups both data and meta should be picked,
4270	 * and identical options should be given for both of them.
4271	 */
4272	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4273	if (mixed && (bctl->flags & allowed)) {
4274		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4275		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4276		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4277			btrfs_err(fs_info,
4278	  "balance: mixed groups data and metadata options must be the same");
4279			ret = -EINVAL;
4280			goto out;
4281		}
4282	}
4283
4284	/*
4285	 * rw_devices will not change at the moment, device add/delete/replace
4286	 * are exclusive
4287	 */
4288	num_devices = fs_info->fs_devices->rw_devices;
4289
4290	/*
4291	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4292	 * special bit for it, to make it easier to distinguish.  Thus we need
4293	 * to set it manually, or balance would refuse the profile.
4294	 */
4295	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4296	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4297		if (num_devices >= btrfs_raid_array[i].devs_min)
4298			allowed |= btrfs_raid_array[i].bg_flag;
4299
4300	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4301	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4302	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4303		ret = -EINVAL;
4304		goto out;
4305	}
4306
4307	/*
4308	 * Allow to reduce metadata or system integrity only if force set for
4309	 * profiles with redundancy (copies, parity)
4310	 */
4311	allowed = 0;
4312	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4313		if (btrfs_raid_array[i].ncopies >= 2 ||
4314		    btrfs_raid_array[i].tolerated_failures >= 1)
4315			allowed |= btrfs_raid_array[i].bg_flag;
4316	}
 
 
 
 
 
 
4317	do {
4318		seq = read_seqbegin(&fs_info->profiles_lock);
4319
4320		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4321		     (fs_info->avail_system_alloc_bits & allowed) &&
4322		     !(bctl->sys.target & allowed)) ||
4323		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4324		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4325		     !(bctl->meta.target & allowed)))
4326			reducing_redundancy = true;
4327		else
4328			reducing_redundancy = false;
4329
4330		/* if we're not converting, the target field is uninitialized */
4331		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4332			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4333		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4334			bctl->data.target : fs_info->avail_data_alloc_bits;
4335	} while (read_seqretry(&fs_info->profiles_lock, seq));
4336
4337	if (reducing_redundancy) {
4338		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4339			btrfs_info(fs_info,
4340			   "balance: force reducing metadata redundancy");
4341		} else {
4342			btrfs_err(fs_info,
4343	"balance: reduces metadata redundancy, use --force if you want this");
4344			ret = -EINVAL;
4345			goto out;
4346		}
4347	}
 
 
 
 
4348
4349	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4350		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4351		btrfs_warn(fs_info,
4352	"balance: metadata profile %s has lower redundancy than data profile %s",
4353				btrfs_bg_type_to_raid_name(meta_target),
4354				btrfs_bg_type_to_raid_name(data_target));
4355	}
4356
4357	ret = insert_balance_item(fs_info, bctl);
4358	if (ret && ret != -EEXIST)
4359		goto out;
4360
4361	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4362		BUG_ON(ret == -EEXIST);
4363		BUG_ON(fs_info->balance_ctl);
4364		spin_lock(&fs_info->balance_lock);
4365		fs_info->balance_ctl = bctl;
4366		spin_unlock(&fs_info->balance_lock);
4367	} else {
4368		BUG_ON(ret != -EEXIST);
4369		spin_lock(&fs_info->balance_lock);
4370		update_balance_args(bctl);
4371		spin_unlock(&fs_info->balance_lock);
4372	}
4373
4374	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4375	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4376	describe_balance_start_or_resume(fs_info);
4377	mutex_unlock(&fs_info->balance_mutex);
4378
4379	ret = __btrfs_balance(fs_info);
4380
4381	mutex_lock(&fs_info->balance_mutex);
4382	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4383		btrfs_info(fs_info, "balance: paused");
4384		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4385	}
4386	/*
4387	 * Balance can be canceled by:
4388	 *
4389	 * - Regular cancel request
4390	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4391	 *
4392	 * - Fatal signal to "btrfs" process
4393	 *   Either the signal caught by wait_reserve_ticket() and callers
4394	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4395	 *   got -ECANCELED.
4396	 *   Either way, in this case balance_cancel_req = 0, and
4397	 *   ret == -EINTR or ret == -ECANCELED.
4398	 *
4399	 * So here we only check the return value to catch canceled balance.
4400	 */
4401	else if (ret == -ECANCELED || ret == -EINTR)
4402		btrfs_info(fs_info, "balance: canceled");
4403	else
4404		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4405
4406	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
 
 
 
4407
4408	if (bargs) {
4409		memset(bargs, 0, sizeof(*bargs));
4410		btrfs_update_ioctl_balance_args(fs_info, bargs);
4411	}
4412
4413	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4414	    balance_need_close(fs_info)) {
4415		reset_balance_state(fs_info);
4416		btrfs_exclop_finish(fs_info);
4417	}
4418
4419	wake_up(&fs_info->balance_wait_q);
4420
4421	return ret;
4422out:
4423	if (bctl->flags & BTRFS_BALANCE_RESUME)
4424		reset_balance_state(fs_info);
4425	else
4426		kfree(bctl);
4427	btrfs_exclop_finish(fs_info);
4428
4429	return ret;
4430}
4431
4432static int balance_kthread(void *data)
4433{
4434	struct btrfs_fs_info *fs_info = data;
4435	int ret = 0;
4436
4437	sb_start_write(fs_info->sb);
4438	mutex_lock(&fs_info->balance_mutex);
4439	if (fs_info->balance_ctl)
4440		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
 
 
 
 
4441	mutex_unlock(&fs_info->balance_mutex);
4442	sb_end_write(fs_info->sb);
4443
4444	return ret;
4445}
4446
4447int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4448{
4449	struct task_struct *tsk;
4450
4451	mutex_lock(&fs_info->balance_mutex);
4452	if (!fs_info->balance_ctl) {
4453		mutex_unlock(&fs_info->balance_mutex);
4454		return 0;
4455	}
4456	mutex_unlock(&fs_info->balance_mutex);
4457
4458	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4459		btrfs_info(fs_info, "balance: resume skipped");
4460		return 0;
4461	}
4462
4463	spin_lock(&fs_info->super_lock);
4464	ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4465	fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4466	spin_unlock(&fs_info->super_lock);
4467	/*
4468	 * A ro->rw remount sequence should continue with the paused balance
4469	 * regardless of who pauses it, system or the user as of now, so set
4470	 * the resume flag.
4471	 */
4472	spin_lock(&fs_info->balance_lock);
4473	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4474	spin_unlock(&fs_info->balance_lock);
4475
4476	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4477	return PTR_ERR_OR_ZERO(tsk);
4478}
4479
4480int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4481{
4482	struct btrfs_balance_control *bctl;
4483	struct btrfs_balance_item *item;
4484	struct btrfs_disk_balance_args disk_bargs;
4485	struct btrfs_path *path;
4486	struct extent_buffer *leaf;
4487	struct btrfs_key key;
4488	int ret;
4489
4490	path = btrfs_alloc_path();
4491	if (!path)
4492		return -ENOMEM;
4493
4494	key.objectid = BTRFS_BALANCE_OBJECTID;
4495	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4496	key.offset = 0;
4497
4498	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4499	if (ret < 0)
4500		goto out;
4501	if (ret > 0) { /* ret = -ENOENT; */
4502		ret = 0;
4503		goto out;
4504	}
4505
4506	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4507	if (!bctl) {
4508		ret = -ENOMEM;
4509		goto out;
4510	}
4511
4512	leaf = path->nodes[0];
4513	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4514
 
4515	bctl->flags = btrfs_balance_flags(leaf, item);
4516	bctl->flags |= BTRFS_BALANCE_RESUME;
4517
4518	btrfs_balance_data(leaf, item, &disk_bargs);
4519	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4520	btrfs_balance_meta(leaf, item, &disk_bargs);
4521	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4522	btrfs_balance_sys(leaf, item, &disk_bargs);
4523	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4524
4525	/*
4526	 * This should never happen, as the paused balance state is recovered
4527	 * during mount without any chance of other exclusive ops to collide.
4528	 *
4529	 * This gives the exclusive op status to balance and keeps in paused
4530	 * state until user intervention (cancel or umount). If the ownership
4531	 * cannot be assigned, show a message but do not fail. The balance
4532	 * is in a paused state and must have fs_info::balance_ctl properly
4533	 * set up.
4534	 */
4535	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4536		btrfs_warn(fs_info,
4537	"balance: cannot set exclusive op status, resume manually");
4538
4539	btrfs_release_path(path);
4540
 
4541	mutex_lock(&fs_info->balance_mutex);
4542	BUG_ON(fs_info->balance_ctl);
4543	spin_lock(&fs_info->balance_lock);
4544	fs_info->balance_ctl = bctl;
4545	spin_unlock(&fs_info->balance_lock);
4546	mutex_unlock(&fs_info->balance_mutex);
 
4547out:
4548	btrfs_free_path(path);
4549	return ret;
4550}
4551
4552int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4553{
4554	int ret = 0;
4555
4556	mutex_lock(&fs_info->balance_mutex);
4557	if (!fs_info->balance_ctl) {
4558		mutex_unlock(&fs_info->balance_mutex);
4559		return -ENOTCONN;
4560	}
4561
4562	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4563		atomic_inc(&fs_info->balance_pause_req);
4564		mutex_unlock(&fs_info->balance_mutex);
4565
4566		wait_event(fs_info->balance_wait_q,
4567			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4568
4569		mutex_lock(&fs_info->balance_mutex);
4570		/* we are good with balance_ctl ripped off from under us */
4571		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4572		atomic_dec(&fs_info->balance_pause_req);
4573	} else {
4574		ret = -ENOTCONN;
4575	}
4576
4577	mutex_unlock(&fs_info->balance_mutex);
4578	return ret;
4579}
4580
4581int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4582{
 
 
 
4583	mutex_lock(&fs_info->balance_mutex);
4584	if (!fs_info->balance_ctl) {
4585		mutex_unlock(&fs_info->balance_mutex);
4586		return -ENOTCONN;
4587	}
4588
4589	/*
4590	 * A paused balance with the item stored on disk can be resumed at
4591	 * mount time if the mount is read-write. Otherwise it's still paused
4592	 * and we must not allow cancelling as it deletes the item.
4593	 */
4594	if (sb_rdonly(fs_info->sb)) {
4595		mutex_unlock(&fs_info->balance_mutex);
4596		return -EROFS;
4597	}
4598
4599	atomic_inc(&fs_info->balance_cancel_req);
4600	/*
4601	 * if we are running just wait and return, balance item is
4602	 * deleted in btrfs_balance in this case
4603	 */
4604	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4605		mutex_unlock(&fs_info->balance_mutex);
4606		wait_event(fs_info->balance_wait_q,
4607			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4608		mutex_lock(&fs_info->balance_mutex);
4609	} else {
 
4610		mutex_unlock(&fs_info->balance_mutex);
4611		/*
4612		 * Lock released to allow other waiters to continue, we'll
4613		 * reexamine the status again.
4614		 */
4615		mutex_lock(&fs_info->balance_mutex);
4616
4617		if (fs_info->balance_ctl) {
4618			reset_balance_state(fs_info);
4619			btrfs_exclop_finish(fs_info);
4620			btrfs_info(fs_info, "balance: canceled");
4621		}
4622	}
4623
4624	BUG_ON(fs_info->balance_ctl ||
4625		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4626	atomic_dec(&fs_info->balance_cancel_req);
4627	mutex_unlock(&fs_info->balance_mutex);
4628	return 0;
4629}
4630
4631int btrfs_uuid_scan_kthread(void *data)
4632{
4633	struct btrfs_fs_info *fs_info = data;
4634	struct btrfs_root *root = fs_info->tree_root;
4635	struct btrfs_key key;
 
4636	struct btrfs_path *path = NULL;
4637	int ret = 0;
4638	struct extent_buffer *eb;
4639	int slot;
4640	struct btrfs_root_item root_item;
4641	u32 item_size;
4642	struct btrfs_trans_handle *trans = NULL;
4643	bool closing = false;
4644
4645	path = btrfs_alloc_path();
4646	if (!path) {
4647		ret = -ENOMEM;
4648		goto out;
4649	}
4650
4651	key.objectid = 0;
4652	key.type = BTRFS_ROOT_ITEM_KEY;
4653	key.offset = 0;
4654
 
 
 
 
 
 
4655	while (1) {
4656		if (btrfs_fs_closing(fs_info)) {
4657			closing = true;
4658			break;
4659		}
4660		ret = btrfs_search_forward(root, &key, path,
4661				BTRFS_OLDEST_GENERATION);
4662		if (ret) {
4663			if (ret > 0)
4664				ret = 0;
4665			break;
4666		}
4667
4668		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4669		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4670		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4671		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4672			goto skip;
4673
4674		eb = path->nodes[0];
4675		slot = path->slots[0];
4676		item_size = btrfs_item_size(eb, slot);
4677		if (item_size < sizeof(root_item))
4678			goto skip;
4679
4680		read_extent_buffer(eb, &root_item,
4681				   btrfs_item_ptr_offset(eb, slot),
4682				   (int)sizeof(root_item));
4683		if (btrfs_root_refs(&root_item) == 0)
4684			goto skip;
4685
4686		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4687		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4688			if (trans)
4689				goto update_tree;
4690
4691			btrfs_release_path(path);
4692			/*
4693			 * 1 - subvol uuid item
4694			 * 1 - received_subvol uuid item
4695			 */
4696			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4697			if (IS_ERR(trans)) {
4698				ret = PTR_ERR(trans);
4699				break;
4700			}
4701			continue;
4702		} else {
4703			goto skip;
4704		}
4705update_tree:
4706		btrfs_release_path(path);
4707		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4708			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
 
4709						  BTRFS_UUID_KEY_SUBVOL,
4710						  key.objectid);
4711			if (ret < 0) {
4712				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4713					ret);
4714				break;
4715			}
4716		}
4717
4718		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4719			ret = btrfs_uuid_tree_add(trans,
4720						  root_item.received_uuid,
4721						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4722						  key.objectid);
4723			if (ret < 0) {
4724				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4725					ret);
4726				break;
4727			}
4728		}
4729
4730skip:
4731		btrfs_release_path(path);
4732		if (trans) {
4733			ret = btrfs_end_transaction(trans);
4734			trans = NULL;
4735			if (ret)
4736				break;
4737		}
4738
 
4739		if (key.offset < (u64)-1) {
4740			key.offset++;
4741		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4742			key.offset = 0;
4743			key.type = BTRFS_ROOT_ITEM_KEY;
4744		} else if (key.objectid < (u64)-1) {
4745			key.offset = 0;
4746			key.type = BTRFS_ROOT_ITEM_KEY;
4747			key.objectid++;
4748		} else {
4749			break;
4750		}
4751		cond_resched();
4752	}
4753
4754out:
4755	btrfs_free_path(path);
4756	if (trans && !IS_ERR(trans))
4757		btrfs_end_transaction(trans);
4758	if (ret)
4759		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4760	else if (!closing)
4761		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4762	up(&fs_info->uuid_tree_rescan_sem);
4763	return 0;
4764}
4765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4766int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4767{
4768	struct btrfs_trans_handle *trans;
4769	struct btrfs_root *tree_root = fs_info->tree_root;
4770	struct btrfs_root *uuid_root;
4771	struct task_struct *task;
4772	int ret;
4773
4774	/*
4775	 * 1 - root node
4776	 * 1 - root item
4777	 */
4778	trans = btrfs_start_transaction(tree_root, 2);
4779	if (IS_ERR(trans))
4780		return PTR_ERR(trans);
4781
4782	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
 
4783	if (IS_ERR(uuid_root)) {
4784		ret = PTR_ERR(uuid_root);
4785		btrfs_abort_transaction(trans, ret);
4786		btrfs_end_transaction(trans);
4787		return ret;
4788	}
4789
4790	fs_info->uuid_root = uuid_root;
4791
4792	ret = btrfs_commit_transaction(trans);
4793	if (ret)
4794		return ret;
4795
4796	down(&fs_info->uuid_tree_rescan_sem);
4797	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4798	if (IS_ERR(task)) {
4799		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4800		btrfs_warn(fs_info, "failed to start uuid_scan task");
4801		up(&fs_info->uuid_tree_rescan_sem);
4802		return PTR_ERR(task);
4803	}
4804
4805	return 0;
4806}
4807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4808/*
4809 * shrinking a device means finding all of the device extents past
4810 * the new size, and then following the back refs to the chunks.
4811 * The chunk relocation code actually frees the device extent
4812 */
4813int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4814{
4815	struct btrfs_fs_info *fs_info = device->fs_info;
4816	struct btrfs_root *root = fs_info->dev_root;
4817	struct btrfs_trans_handle *trans;
 
4818	struct btrfs_dev_extent *dev_extent = NULL;
4819	struct btrfs_path *path;
4820	u64 length;
 
 
4821	u64 chunk_offset;
4822	int ret;
4823	int slot;
4824	int failed = 0;
4825	bool retried = false;
4826	struct extent_buffer *l;
4827	struct btrfs_key key;
4828	struct btrfs_super_block *super_copy = fs_info->super_copy;
4829	u64 old_total = btrfs_super_total_bytes(super_copy);
4830	u64 old_size = btrfs_device_get_total_bytes(device);
4831	u64 diff;
4832	u64 start;
4833
4834	new_size = round_down(new_size, fs_info->sectorsize);
4835	start = new_size;
4836	diff = round_down(old_size - new_size, fs_info->sectorsize);
4837
4838	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4839		return -EINVAL;
4840
4841	path = btrfs_alloc_path();
4842	if (!path)
4843		return -ENOMEM;
4844
4845	path->reada = READA_BACK;
4846
4847	trans = btrfs_start_transaction(root, 0);
4848	if (IS_ERR(trans)) {
4849		btrfs_free_path(path);
4850		return PTR_ERR(trans);
4851	}
4852
4853	mutex_lock(&fs_info->chunk_mutex);
4854
4855	btrfs_device_set_total_bytes(device, new_size);
4856	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4857		device->fs_devices->total_rw_bytes -= diff;
4858		atomic64_sub(diff, &fs_info->free_chunk_space);
4859	}
4860
4861	/*
4862	 * Once the device's size has been set to the new size, ensure all
4863	 * in-memory chunks are synced to disk so that the loop below sees them
4864	 * and relocates them accordingly.
4865	 */
4866	if (contains_pending_extent(device, &start, diff)) {
4867		mutex_unlock(&fs_info->chunk_mutex);
4868		ret = btrfs_commit_transaction(trans);
4869		if (ret)
4870			goto done;
4871	} else {
4872		mutex_unlock(&fs_info->chunk_mutex);
4873		btrfs_end_transaction(trans);
4874	}
 
4875
4876again:
4877	key.objectid = device->devid;
4878	key.offset = (u64)-1;
4879	key.type = BTRFS_DEV_EXTENT_KEY;
4880
4881	do {
4882		mutex_lock(&fs_info->reclaim_bgs_lock);
4883		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4884		if (ret < 0) {
4885			mutex_unlock(&fs_info->reclaim_bgs_lock);
4886			goto done;
4887		}
4888
4889		ret = btrfs_previous_item(root, path, 0, key.type);
 
 
4890		if (ret) {
4891			mutex_unlock(&fs_info->reclaim_bgs_lock);
4892			if (ret < 0)
4893				goto done;
4894			ret = 0;
4895			btrfs_release_path(path);
4896			break;
4897		}
4898
4899		l = path->nodes[0];
4900		slot = path->slots[0];
4901		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4902
4903		if (key.objectid != device->devid) {
4904			mutex_unlock(&fs_info->reclaim_bgs_lock);
4905			btrfs_release_path(path);
4906			break;
4907		}
4908
4909		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4910		length = btrfs_dev_extent_length(l, dev_extent);
4911
4912		if (key.offset + length <= new_size) {
4913			mutex_unlock(&fs_info->reclaim_bgs_lock);
4914			btrfs_release_path(path);
4915			break;
4916		}
4917
 
 
4918		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4919		btrfs_release_path(path);
4920
4921		/*
4922		 * We may be relocating the only data chunk we have,
4923		 * which could potentially end up with losing data's
4924		 * raid profile, so lets allocate an empty one in
4925		 * advance.
4926		 */
4927		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4928		if (ret < 0) {
4929			mutex_unlock(&fs_info->reclaim_bgs_lock);
4930			goto done;
4931		}
4932
4933		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4934		mutex_unlock(&fs_info->reclaim_bgs_lock);
4935		if (ret == -ENOSPC) {
4936			failed++;
4937		} else if (ret) {
4938			if (ret == -ETXTBSY) {
4939				btrfs_warn(fs_info,
4940		   "could not shrink block group %llu due to active swapfile",
4941					   chunk_offset);
4942			}
4943			goto done;
4944		}
 
4945	} while (key.offset-- > 0);
4946
4947	if (failed && !retried) {
4948		failed = 0;
4949		retried = true;
4950		goto again;
4951	} else if (failed && retried) {
4952		ret = -ENOSPC;
 
 
 
 
 
 
 
 
 
4953		goto done;
4954	}
4955
4956	/* Shrinking succeeded, else we would be at "done". */
4957	trans = btrfs_start_transaction(root, 0);
4958	if (IS_ERR(trans)) {
4959		ret = PTR_ERR(trans);
4960		goto done;
4961	}
4962
4963	mutex_lock(&fs_info->chunk_mutex);
4964	/* Clear all state bits beyond the shrunk device size */
4965	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4966			  CHUNK_STATE_MASK);
4967
4968	btrfs_device_set_disk_total_bytes(device, new_size);
4969	if (list_empty(&device->post_commit_list))
4970		list_add_tail(&device->post_commit_list,
4971			      &trans->transaction->dev_update_list);
4972
4973	WARN_ON(diff > old_total);
4974	btrfs_set_super_total_bytes(super_copy,
4975			round_down(old_total - diff, fs_info->sectorsize));
4976	mutex_unlock(&fs_info->chunk_mutex);
4977
4978	btrfs_reserve_chunk_metadata(trans, false);
4979	/* Now btrfs_update_device() will change the on-disk size. */
4980	ret = btrfs_update_device(trans, device);
4981	btrfs_trans_release_chunk_metadata(trans);
4982	if (ret < 0) {
4983		btrfs_abort_transaction(trans, ret);
4984		btrfs_end_transaction(trans);
4985	} else {
4986		ret = btrfs_commit_transaction(trans);
4987	}
 
 
 
 
4988done:
4989	btrfs_free_path(path);
4990	if (ret) {
4991		mutex_lock(&fs_info->chunk_mutex);
4992		btrfs_device_set_total_bytes(device, old_size);
4993		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4994			device->fs_devices->total_rw_bytes += diff;
4995		atomic64_add(diff, &fs_info->free_chunk_space);
4996		mutex_unlock(&fs_info->chunk_mutex);
4997	}
4998	return ret;
4999}
5000
5001static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
5002			   struct btrfs_key *key,
5003			   struct btrfs_chunk *chunk, int item_size)
5004{
5005	struct btrfs_super_block *super_copy = fs_info->super_copy;
5006	struct btrfs_disk_key disk_key;
5007	u32 array_size;
5008	u8 *ptr;
5009
5010	lockdep_assert_held(&fs_info->chunk_mutex);
5011
5012	array_size = btrfs_super_sys_array_size(super_copy);
5013	if (array_size + item_size + sizeof(disk_key)
5014			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5015		return -EFBIG;
5016
5017	ptr = super_copy->sys_chunk_array + array_size;
5018	btrfs_cpu_key_to_disk(&disk_key, key);
5019	memcpy(ptr, &disk_key, sizeof(disk_key));
5020	ptr += sizeof(disk_key);
5021	memcpy(ptr, chunk, item_size);
5022	item_size += sizeof(disk_key);
5023	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5024
5025	return 0;
5026}
5027
5028/*
5029 * sort the devices in descending order by max_avail, total_avail
5030 */
5031static int btrfs_cmp_device_info(const void *a, const void *b)
5032{
5033	const struct btrfs_device_info *di_a = a;
5034	const struct btrfs_device_info *di_b = b;
5035
5036	if (di_a->max_avail > di_b->max_avail)
5037		return -1;
5038	if (di_a->max_avail < di_b->max_avail)
5039		return 1;
5040	if (di_a->total_avail > di_b->total_avail)
5041		return -1;
5042	if (di_a->total_avail < di_b->total_avail)
5043		return 1;
5044	return 0;
5045}
5046
5047static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5048{
5049	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5050		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5051
5052	btrfs_set_fs_incompat(info, RAID56);
 
 
 
5053}
5054
5055static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5056{
5057	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5058		return;
5059
5060	btrfs_set_fs_incompat(info, RAID1C34);
5061}
5062
5063/*
5064 * Structure used internally for btrfs_create_chunk() function.
5065 * Wraps needed parameters.
5066 */
5067struct alloc_chunk_ctl {
5068	u64 start;
5069	u64 type;
5070	/* Total number of stripes to allocate */
5071	int num_stripes;
5072	/* sub_stripes info for map */
5073	int sub_stripes;
5074	/* Stripes per device */
5075	int dev_stripes;
5076	/* Maximum number of devices to use */
5077	int devs_max;
5078	/* Minimum number of devices to use */
5079	int devs_min;
5080	/* ndevs has to be a multiple of this */
5081	int devs_increment;
5082	/* Number of copies */
5083	int ncopies;
5084	/* Number of stripes worth of bytes to store parity information */
5085	int nparity;
5086	u64 max_stripe_size;
5087	u64 max_chunk_size;
5088	u64 dev_extent_min;
5089	u64 stripe_size;
5090	u64 chunk_size;
 
5091	int ndevs;
5092};
5093
5094static void init_alloc_chunk_ctl_policy_regular(
5095				struct btrfs_fs_devices *fs_devices,
5096				struct alloc_chunk_ctl *ctl)
5097{
5098	struct btrfs_space_info *space_info;
5099
5100	space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5101	ASSERT(space_info);
5102
5103	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5104	ctl->max_stripe_size = ctl->max_chunk_size;
5105
5106	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5107		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5108
5109	/* We don't want a chunk larger than 10% of writable space */
5110	ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
5111				  ctl->max_chunk_size);
5112	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5113}
5114
5115static void init_alloc_chunk_ctl_policy_zoned(
5116				      struct btrfs_fs_devices *fs_devices,
5117				      struct alloc_chunk_ctl *ctl)
5118{
5119	u64 zone_size = fs_devices->fs_info->zone_size;
5120	u64 limit;
5121	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5122	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5123	u64 min_chunk_size = min_data_stripes * zone_size;
5124	u64 type = ctl->type;
5125
5126	ctl->max_stripe_size = zone_size;
5127	if (type & BTRFS_BLOCK_GROUP_DATA) {
5128		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5129						 zone_size);
5130	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5131		ctl->max_chunk_size = ctl->max_stripe_size;
 
 
 
 
 
5132	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5133		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5134		ctl->devs_max = min_t(int, ctl->devs_max,
5135				      BTRFS_MAX_DEVS_SYS_CHUNK);
5136	} else {
5137		BUG();
 
 
5138	}
5139
5140	/* We don't want a chunk larger than 10% of writable space */
5141	limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
5142			       zone_size),
5143		    min_chunk_size);
5144	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5145	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5146}
5147
5148static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5149				 struct alloc_chunk_ctl *ctl)
5150{
5151	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5152
5153	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5154	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5155	ctl->devs_max = btrfs_raid_array[index].devs_max;
5156	if (!ctl->devs_max)
5157		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5158	ctl->devs_min = btrfs_raid_array[index].devs_min;
5159	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5160	ctl->ncopies = btrfs_raid_array[index].ncopies;
5161	ctl->nparity = btrfs_raid_array[index].nparity;
5162	ctl->ndevs = 0;
5163
5164	switch (fs_devices->chunk_alloc_policy) {
5165	case BTRFS_CHUNK_ALLOC_REGULAR:
5166		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5167		break;
5168	case BTRFS_CHUNK_ALLOC_ZONED:
5169		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5170		break;
5171	default:
5172		BUG();
5173	}
5174}
5175
5176static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5177			      struct alloc_chunk_ctl *ctl,
5178			      struct btrfs_device_info *devices_info)
5179{
5180	struct btrfs_fs_info *info = fs_devices->fs_info;
5181	struct btrfs_device *device;
5182	u64 total_avail;
5183	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5184	int ret;
5185	int ndevs = 0;
5186	u64 max_avail;
5187	u64 dev_offset;
5188
5189	/*
5190	 * in the first pass through the devices list, we gather information
5191	 * about the available holes on each device.
5192	 */
5193	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5194		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
 
 
 
 
 
 
 
 
 
5195			WARN(1, KERN_ERR
5196			       "BTRFS: read-only device in alloc_list\n");
5197			continue;
5198		}
5199
5200		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5201					&device->dev_state) ||
5202		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5203			continue;
5204
5205		if (device->total_bytes > device->bytes_used)
5206			total_avail = device->total_bytes - device->bytes_used;
5207		else
5208			total_avail = 0;
5209
5210		/* If there is no space on this device, skip it. */
5211		if (total_avail < ctl->dev_extent_min)
5212			continue;
5213
5214		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5215					   &max_avail);
 
5216		if (ret && ret != -ENOSPC)
5217			return ret;
5218
5219		if (ret == 0)
5220			max_avail = dev_extent_want;
5221
5222		if (max_avail < ctl->dev_extent_min) {
5223			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5224				btrfs_debug(info,
5225			"%s: devid %llu has no free space, have=%llu want=%llu",
5226					    __func__, device->devid, max_avail,
5227					    ctl->dev_extent_min);
5228			continue;
5229		}
5230
5231		if (ndevs == fs_devices->rw_devices) {
5232			WARN(1, "%s: found more than %llu devices\n",
5233			     __func__, fs_devices->rw_devices);
5234			break;
5235		}
5236		devices_info[ndevs].dev_offset = dev_offset;
5237		devices_info[ndevs].max_avail = max_avail;
5238		devices_info[ndevs].total_avail = total_avail;
5239		devices_info[ndevs].dev = device;
5240		++ndevs;
5241	}
5242	ctl->ndevs = ndevs;
5243
5244	/*
5245	 * now sort the devices by hole size / available space
5246	 */
5247	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5248	     btrfs_cmp_device_info, NULL);
5249
5250	return 0;
5251}
5252
5253static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5254				      struct btrfs_device_info *devices_info)
5255{
5256	/* Number of stripes that count for block group size */
5257	int data_stripes;
5258
5259	/*
5260	 * The primary goal is to maximize the number of stripes, so use as
5261	 * many devices as possible, even if the stripes are not maximum sized.
5262	 *
5263	 * The DUP profile stores more than one stripe per device, the
5264	 * max_avail is the total size so we have to adjust.
5265	 */
5266	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5267				   ctl->dev_stripes);
5268	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5269
5270	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5271	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
 
 
5272
 
 
5273	/*
5274	 * Use the number of data stripes to figure out how big this chunk is
5275	 * really going to be in terms of logical address space, and compare
5276	 * that answer with the max chunk size. If it's higher, we try to
5277	 * reduce stripe_size.
5278	 */
5279	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5280		/*
5281		 * Reduce stripe_size, round it up to a 16MB boundary again and
5282		 * then use it, unless it ends up being even bigger than the
5283		 * previous value we had already.
5284		 */
5285		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5286							data_stripes), SZ_16M),
5287				       ctl->stripe_size);
5288	}
5289
5290	/* Stripe size should not go beyond 1G. */
5291	ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5292
5293	/* Align to BTRFS_STRIPE_LEN */
5294	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5295	ctl->chunk_size = ctl->stripe_size * data_stripes;
5296
5297	return 0;
5298}
5299
5300static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5301				    struct btrfs_device_info *devices_info)
5302{
5303	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5304	/* Number of stripes that count for block group size */
5305	int data_stripes;
5306
5307	/*
5308	 * It should hold because:
5309	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5310	 */
5311	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5312
5313	ctl->stripe_size = zone_size;
5314	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5315	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5316
5317	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5318	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5319		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5320					     ctl->stripe_size) + ctl->nparity,
5321				     ctl->dev_stripes);
5322		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5323		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5324		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5325	}
5326
5327	ctl->chunk_size = ctl->stripe_size * data_stripes;
5328
5329	return 0;
5330}
5331
5332static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5333			      struct alloc_chunk_ctl *ctl,
5334			      struct btrfs_device_info *devices_info)
5335{
5336	struct btrfs_fs_info *info = fs_devices->fs_info;
5337
5338	/*
5339	 * Round down to number of usable stripes, devs_increment can be any
5340	 * number so we can't use round_down() that requires power of 2, while
5341	 * rounddown is safe.
5342	 */
5343	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
 
 
 
5344
5345	if (ctl->ndevs < ctl->devs_min) {
5346		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5347			btrfs_debug(info,
5348	"%s: not enough devices with free space: have=%d minimum required=%d",
5349				    __func__, ctl->ndevs, ctl->devs_min);
5350		}
5351		return -ENOSPC;
5352	}
5353
5354	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5355
5356	switch (fs_devices->chunk_alloc_policy) {
5357	case BTRFS_CHUNK_ALLOC_REGULAR:
5358		return decide_stripe_size_regular(ctl, devices_info);
5359	case BTRFS_CHUNK_ALLOC_ZONED:
5360		return decide_stripe_size_zoned(ctl, devices_info);
5361	default:
5362		BUG();
5363	}
5364}
5365
5366static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5367			struct alloc_chunk_ctl *ctl,
5368			struct btrfs_device_info *devices_info)
5369{
5370	struct btrfs_fs_info *info = trans->fs_info;
5371	struct map_lookup *map = NULL;
5372	struct extent_map_tree *em_tree;
5373	struct btrfs_block_group *block_group;
5374	struct extent_map *em;
5375	u64 start = ctl->start;
5376	u64 type = ctl->type;
5377	int ret;
5378	int i;
5379	int j;
5380
5381	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5382	if (!map)
5383		return ERR_PTR(-ENOMEM);
5384	map->num_stripes = ctl->num_stripes;
5385
5386	for (i = 0; i < ctl->ndevs; ++i) {
5387		for (j = 0; j < ctl->dev_stripes; ++j) {
5388			int s = i * ctl->dev_stripes + j;
 
 
 
 
 
 
 
5389			map->stripes[s].dev = devices_info[i].dev;
5390			map->stripes[s].physical = devices_info[i].dev_offset +
5391						   j * ctl->stripe_size;
5392		}
5393	}
5394	map->stripe_len = BTRFS_STRIPE_LEN;
5395	map->io_align = BTRFS_STRIPE_LEN;
5396	map->io_width = BTRFS_STRIPE_LEN;
 
5397	map->type = type;
5398	map->sub_stripes = ctl->sub_stripes;
5399
5400	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
 
 
5401
5402	em = alloc_extent_map();
5403	if (!em) {
5404		kfree(map);
5405		return ERR_PTR(-ENOMEM);
5406	}
5407	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5408	em->map_lookup = map;
5409	em->start = start;
5410	em->len = ctl->chunk_size;
5411	em->block_start = 0;
5412	em->block_len = em->len;
5413	em->orig_block_len = ctl->stripe_size;
5414
5415	em_tree = &info->mapping_tree;
5416	write_lock(&em_tree->lock);
5417	ret = add_extent_mapping(em_tree, em, 0);
 
 
 
 
 
5418	if (ret) {
5419		write_unlock(&em_tree->lock);
5420		free_extent_map(em);
5421		return ERR_PTR(ret);
5422	}
5423	write_unlock(&em_tree->lock);
5424
5425	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5426	if (IS_ERR(block_group))
 
 
5427		goto error_del_extent;
5428
5429	for (i = 0; i < map->num_stripes; i++) {
5430		struct btrfs_device *dev = map->stripes[i].dev;
5431
5432		btrfs_device_set_bytes_used(dev,
5433					    dev->bytes_used + ctl->stripe_size);
5434		if (list_empty(&dev->post_commit_list))
5435			list_add_tail(&dev->post_commit_list,
5436				      &trans->transaction->dev_update_list);
5437	}
5438
5439	atomic64_sub(ctl->stripe_size * map->num_stripes,
5440		     &info->free_chunk_space);
5441
5442	free_extent_map(em);
5443	check_raid56_incompat_flag(info, type);
5444	check_raid1c34_incompat_flag(info, type);
5445
5446	return block_group;
 
5447
5448error_del_extent:
5449	write_lock(&em_tree->lock);
5450	remove_extent_mapping(em_tree, em);
5451	write_unlock(&em_tree->lock);
5452
5453	/* One for our allocation */
5454	free_extent_map(em);
5455	/* One for the tree reference */
5456	free_extent_map(em);
5457
5458	return block_group;
5459}
5460
5461struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5462					    u64 type)
5463{
5464	struct btrfs_fs_info *info = trans->fs_info;
5465	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5466	struct btrfs_device_info *devices_info = NULL;
5467	struct alloc_chunk_ctl ctl;
5468	struct btrfs_block_group *block_group;
5469	int ret;
5470
5471	lockdep_assert_held(&info->chunk_mutex);
5472
5473	if (!alloc_profile_is_valid(type, 0)) {
5474		ASSERT(0);
5475		return ERR_PTR(-EINVAL);
5476	}
5477
5478	if (list_empty(&fs_devices->alloc_list)) {
5479		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5480			btrfs_debug(info, "%s: no writable device", __func__);
5481		return ERR_PTR(-ENOSPC);
5482	}
5483
5484	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5485		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5486		ASSERT(0);
5487		return ERR_PTR(-EINVAL);
5488	}
5489
5490	ctl.start = find_next_chunk(info);
5491	ctl.type = type;
5492	init_alloc_chunk_ctl(fs_devices, &ctl);
5493
5494	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5495			       GFP_NOFS);
5496	if (!devices_info)
5497		return ERR_PTR(-ENOMEM);
5498
5499	ret = gather_device_info(fs_devices, &ctl, devices_info);
5500	if (ret < 0) {
5501		block_group = ERR_PTR(ret);
5502		goto out;
5503	}
5504
5505	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5506	if (ret < 0) {
5507		block_group = ERR_PTR(ret);
5508		goto out;
5509	}
5510
5511	block_group = create_chunk(trans, &ctl, devices_info);
5512
5513out:
5514	kfree(devices_info);
5515	return block_group;
5516}
5517
5518/*
5519 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5520 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5521 * chunks.
5522 *
5523 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5524 * phases.
5525 */
5526int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5527				     struct btrfs_block_group *bg)
5528{
5529	struct btrfs_fs_info *fs_info = trans->fs_info;
5530	struct btrfs_root *chunk_root = fs_info->chunk_root;
5531	struct btrfs_key key;
 
 
5532	struct btrfs_chunk *chunk;
5533	struct btrfs_stripe *stripe;
 
5534	struct extent_map *em;
5535	struct map_lookup *map;
5536	size_t item_size;
5537	int i;
 
 
5538	int ret;
5539
5540	/*
5541	 * We take the chunk_mutex for 2 reasons:
5542	 *
5543	 * 1) Updates and insertions in the chunk btree must be done while holding
5544	 *    the chunk_mutex, as well as updating the system chunk array in the
5545	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5546	 *    details;
5547	 *
5548	 * 2) To prevent races with the final phase of a device replace operation
5549	 *    that replaces the device object associated with the map's stripes,
5550	 *    because the device object's id can change at any time during that
5551	 *    final phase of the device replace operation
5552	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5553	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5554	 *    which would cause a failure when updating the device item, which does
5555	 *    not exists, or persisting a stripe of the chunk item with such ID.
5556	 *    Here we can't use the device_list_mutex because our caller already
5557	 *    has locked the chunk_mutex, and the final phase of device replace
5558	 *    acquires both mutexes - first the device_list_mutex and then the
5559	 *    chunk_mutex. Using any of those two mutexes protects us from a
5560	 *    concurrent device replace.
5561	 */
5562	lockdep_assert_held(&fs_info->chunk_mutex);
5563
5564	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5565	if (IS_ERR(em)) {
5566		ret = PTR_ERR(em);
5567		btrfs_abort_transaction(trans, ret);
5568		return ret;
5569	}
5570
5571	map = em->map_lookup;
 
 
 
 
 
 
 
 
5572	item_size = btrfs_chunk_item_size(map->num_stripes);
 
5573
5574	chunk = kzalloc(item_size, GFP_NOFS);
5575	if (!chunk) {
5576		ret = -ENOMEM;
5577		btrfs_abort_transaction(trans, ret);
5578		goto out;
5579	}
5580
5581	for (i = 0; i < map->num_stripes; i++) {
5582		struct btrfs_device *device = map->stripes[i].dev;
 
5583
 
5584		ret = btrfs_update_device(trans, device);
5585		if (ret)
5586			goto out;
 
 
 
 
 
 
 
5587	}
5588
 
 
 
 
 
5589	stripe = &chunk->stripe;
5590	for (i = 0; i < map->num_stripes; i++) {
5591		struct btrfs_device *device = map->stripes[i].dev;
5592		const u64 dev_offset = map->stripes[i].physical;
5593
5594		btrfs_set_stack_stripe_devid(stripe, device->devid);
5595		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5596		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5597		stripe++;
5598	}
5599
5600	btrfs_set_stack_chunk_length(chunk, bg->length);
5601	btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5602	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5603	btrfs_set_stack_chunk_type(chunk, map->type);
5604	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5605	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5606	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5607	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5608	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5609
5610	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5611	key.type = BTRFS_CHUNK_ITEM_KEY;
5612	key.offset = bg->start;
5613
5614	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5615	if (ret)
5616		goto out;
5617
5618	set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
5619
5620	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5621		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5622		if (ret)
5623			goto out;
5624	}
5625
5626out:
5627	kfree(chunk);
5628	free_extent_map(em);
5629	return ret;
5630}
5631
5632static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
 
 
 
 
 
 
 
 
5633{
5634	struct btrfs_fs_info *fs_info = trans->fs_info;
5635	u64 alloc_profile;
5636	struct btrfs_block_group *meta_bg;
5637	struct btrfs_block_group *sys_bg;
5638
5639	/*
5640	 * When adding a new device for sprouting, the seed device is read-only
5641	 * so we must first allocate a metadata and a system chunk. But before
5642	 * adding the block group items to the extent, device and chunk btrees,
5643	 * we must first:
5644	 *
5645	 * 1) Create both chunks without doing any changes to the btrees, as
5646	 *    otherwise we would get -ENOSPC since the block groups from the
5647	 *    seed device are read-only;
5648	 *
5649	 * 2) Add the device item for the new sprout device - finishing the setup
5650	 *    of a new block group requires updating the device item in the chunk
5651	 *    btree, so it must exist when we attempt to do it. The previous step
5652	 *    ensures this does not fail with -ENOSPC.
5653	 *
5654	 * After that we can add the block group items to their btrees:
5655	 * update existing device item in the chunk btree, add a new block group
5656	 * item to the extent btree, add a new chunk item to the chunk btree and
5657	 * finally add the new device extent items to the devices btree.
5658	 */
5659
5660	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5661	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5662	if (IS_ERR(meta_bg))
5663		return PTR_ERR(meta_bg);
5664
5665	alloc_profile = btrfs_system_alloc_profile(fs_info);
5666	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5667	if (IS_ERR(sys_bg))
5668		return PTR_ERR(sys_bg);
5669
5670	return 0;
 
5671}
5672
5673static inline int btrfs_chunk_max_errors(struct map_lookup *map)
 
 
5674{
5675	const int index = btrfs_bg_flags_to_raid_index(map->type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5676
5677	return btrfs_raid_array[index].tolerated_failures;
 
 
 
 
5678}
5679
5680bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5681{
5682	struct extent_map *em;
5683	struct map_lookup *map;
5684	int miss_ndevs = 0;
 
5685	int i;
5686	bool ret = true;
5687
5688	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5689	if (IS_ERR(em))
5690		return false;
 
 
 
 
 
 
 
5691
5692	map = em->map_lookup;
5693	for (i = 0; i < map->num_stripes; i++) {
5694		if (test_bit(BTRFS_DEV_STATE_MISSING,
5695					&map->stripes[i].dev->dev_state)) {
5696			miss_ndevs++;
5697			continue;
5698		}
5699		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5700					&map->stripes[i].dev->dev_state)) {
5701			ret = false;
5702			goto end;
5703		}
5704	}
5705
5706	/*
5707	 * If the number of missing devices is larger than max errors, we can
5708	 * not write the data into that chunk successfully.
5709	 */
5710	if (miss_ndevs > btrfs_chunk_max_errors(map))
5711		ret = false;
5712end:
5713	free_extent_map(em);
5714	return ret;
 
 
 
 
 
5715}
5716
5717void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5718{
5719	struct extent_map *em;
5720
5721	while (1) {
5722		write_lock(&tree->lock);
5723		em = lookup_extent_mapping(tree, 0, (u64)-1);
5724		if (em)
5725			remove_extent_mapping(tree, em);
5726		write_unlock(&tree->lock);
5727		if (!em)
5728			break;
 
5729		/* once for us */
5730		free_extent_map(em);
5731		/* once for the tree */
5732		free_extent_map(em);
5733	}
5734}
5735
5736int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5737{
 
5738	struct extent_map *em;
5739	struct map_lookup *map;
5740	enum btrfs_raid_types index;
5741	int ret = 1;
5742
5743	em = btrfs_get_chunk_map(fs_info, logical, len);
5744	if (IS_ERR(em))
5745		/*
5746		 * We could return errors for these cases, but that could get
5747		 * ugly and we'd probably do the same thing which is just not do
5748		 * anything else and exit, so return 1 so the callers don't try
5749		 * to use other copies.
5750		 */
 
 
 
 
5751		return 1;
 
5752
5753	map = em->map_lookup;
5754	index = btrfs_bg_flags_to_raid_index(map->type);
 
 
 
 
 
5755
5756	/* Non-RAID56, use their ncopies from btrfs_raid_array. */
5757	if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5758		ret = btrfs_raid_array[index].ncopies;
 
 
5759	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5760		ret = 2;
5761	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5762		/*
5763		 * There could be two corrupted data stripes, we need
5764		 * to loop retry in order to rebuild the correct data.
5765		 *
5766		 * Fail a stripe at a time on every retry except the
5767		 * stripe under reconstruction.
5768		 */
5769		ret = map->num_stripes;
5770	free_extent_map(em);
5771
5772	down_read(&fs_info->dev_replace.rwsem);
5773	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5774	    fs_info->dev_replace.tgtdev)
5775		ret++;
5776	up_read(&fs_info->dev_replace.rwsem);
5777
5778	return ret;
5779}
5780
5781unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
 
5782				    u64 logical)
5783{
5784	struct extent_map *em;
5785	struct map_lookup *map;
5786	unsigned long len = fs_info->sectorsize;
5787
5788	if (!btrfs_fs_incompat(fs_info, RAID56))
5789		return len;
5790
5791	em = btrfs_get_chunk_map(fs_info, logical, len);
 
 
 
5792
5793	if (!WARN_ON(IS_ERR(em))) {
5794		map = em->map_lookup;
5795		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5796			len = map->stripe_len * nr_data_stripes(map);
5797		free_extent_map(em);
5798	}
 
5799	return len;
5800}
5801
5802int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
 
5803{
5804	struct extent_map *em;
5805	struct map_lookup *map;
 
5806	int ret = 0;
5807
5808	if (!btrfs_fs_incompat(fs_info, RAID56))
5809		return 0;
5810
5811	em = btrfs_get_chunk_map(fs_info, logical, len);
5812
5813	if(!WARN_ON(IS_ERR(em))) {
5814		map = em->map_lookup;
5815		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5816			ret = 1;
5817		free_extent_map(em);
5818	}
5819	return ret;
5820}
5821
5822static int find_live_mirror(struct btrfs_fs_info *fs_info,
5823			    struct map_lookup *map, int first,
5824			    int dev_replace_is_ongoing)
5825{
5826	int i;
5827	int num_stripes;
5828	int preferred_mirror;
5829	int tolerance;
5830	struct btrfs_device *srcdev;
5831
5832	ASSERT((map->type &
5833		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5834
5835	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5836		num_stripes = map->sub_stripes;
5837	else
5838		num_stripes = map->num_stripes;
5839
5840	switch (fs_info->fs_devices->read_policy) {
5841	default:
5842		/* Shouldn't happen, just warn and use pid instead of failing */
5843		btrfs_warn_rl(fs_info,
5844			      "unknown read_policy type %u, reset to pid",
5845			      fs_info->fs_devices->read_policy);
5846		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5847		fallthrough;
5848	case BTRFS_READ_POLICY_PID:
5849		preferred_mirror = first + (current->pid % num_stripes);
5850		break;
5851	}
5852
5853	if (dev_replace_is_ongoing &&
5854	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5855	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5856		srcdev = fs_info->dev_replace.srcdev;
5857	else
5858		srcdev = NULL;
5859
5860	/*
5861	 * try to avoid the drive that is the source drive for a
5862	 * dev-replace procedure, only choose it if no other non-missing
5863	 * mirror is available
5864	 */
5865	for (tolerance = 0; tolerance < 2; tolerance++) {
5866		if (map->stripes[preferred_mirror].dev->bdev &&
5867		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5868			return preferred_mirror;
5869		for (i = first; i < first + num_stripes; i++) {
5870			if (map->stripes[i].dev->bdev &&
5871			    (tolerance || map->stripes[i].dev != srcdev))
5872				return i;
5873		}
5874	}
5875
5876	/* we couldn't find one that doesn't fail.  Just return something
5877	 * and the io error handling code will clean up eventually
5878	 */
5879	return preferred_mirror;
 
 
 
 
 
5880}
5881
5882/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5883static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5884{
 
5885	int i;
 
5886	int again = 1;
5887
5888	while (again) {
5889		again = 0;
5890		for (i = 0; i < num_stripes - 1; i++) {
5891			/* Swap if parity is on a smaller index */
5892			if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5893				swap(bioc->stripes[i], bioc->stripes[i + 1]);
5894				swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
 
 
 
5895				again = 1;
5896			}
5897		}
5898	}
5899}
5900
5901static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
5902						       int total_stripes,
5903						       int real_stripes)
5904{
5905	struct btrfs_io_context *bioc = kzalloc(
5906		 /* The size of btrfs_io_context */
5907		sizeof(struct btrfs_io_context) +
5908		/* Plus the variable array for the stripes */
5909		sizeof(struct btrfs_io_stripe) * (total_stripes) +
5910		/* Plus the variable array for the tgt dev */
5911		sizeof(int) * (real_stripes) +
5912		/*
5913		 * Plus the raid_map, which includes both the tgt dev
5914		 * and the stripes.
5915		 */
5916		sizeof(u64) * (total_stripes),
5917		GFP_NOFS);
5918
5919	if (!bioc)
5920		return NULL;
5921
5922	refcount_set(&bioc->refs, 1);
5923
5924	bioc->fs_info = fs_info;
5925	bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5926	bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5927
5928	return bioc;
5929}
5930
5931void btrfs_get_bioc(struct btrfs_io_context *bioc)
5932{
5933	WARN_ON(!refcount_read(&bioc->refs));
5934	refcount_inc(&bioc->refs);
5935}
5936
5937void btrfs_put_bioc(struct btrfs_io_context *bioc)
5938{
5939	if (!bioc)
5940		return;
5941	if (refcount_dec_and_test(&bioc->refs))
5942		kfree(bioc);
5943}
5944
5945/*
5946 * Please note that, discard won't be sent to target device of device
5947 * replace.
5948 */
5949struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
5950					       u64 logical, u64 *length_ret,
5951					       u32 *num_stripes)
5952{
5953	struct extent_map *em;
5954	struct map_lookup *map;
5955	struct btrfs_discard_stripe *stripes;
5956	u64 length = *length_ret;
5957	u64 offset;
 
 
5958	u64 stripe_nr;
 
5959	u64 stripe_nr_end;
5960	u64 stripe_end_offset;
5961	u64 stripe_cnt;
5962	u64 stripe_len;
5963	u64 stripe_offset;
5964	u32 stripe_index;
5965	u32 factor = 0;
5966	u32 sub_stripes = 0;
5967	u64 stripes_per_dev = 0;
5968	u32 remaining_stripes = 0;
5969	u32 last_stripe = 0;
5970	int ret;
5971	int i;
 
 
 
 
 
 
 
 
 
 
5972
5973	em = btrfs_get_chunk_map(fs_info, logical, length);
5974	if (IS_ERR(em))
5975		return ERR_CAST(em);
5976
5977	map = em->map_lookup;
5978
5979	/* we don't discard raid56 yet */
5980	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5981		ret = -EOPNOTSUPP;
5982		goto out_free_map;
5983}
 
 
 
 
 
 
5984
 
5985	offset = logical - em->start;
5986	length = min_t(u64, em->start + em->len - logical, length);
5987	*length_ret = length;
5988
5989	stripe_len = map->stripe_len;
 
5990	/*
5991	 * stripe_nr counts the total number of stripes we have to stride
5992	 * to get to this block
5993	 */
5994	stripe_nr = div64_u64(offset, stripe_len);
5995
5996	/* stripe_offset is the offset of this block in its stripe */
5997	stripe_offset = offset - stripe_nr * stripe_len;
5998
5999	stripe_nr_end = round_up(offset + length, map->stripe_len);
6000	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
6001	stripe_cnt = stripe_nr_end - stripe_nr;
6002	stripe_end_offset = stripe_nr_end * map->stripe_len -
6003			    (offset + length);
6004	/*
6005	 * after this, stripe_nr is the number of stripes on this
6006	 * device we have to walk to find the data, and stripe_index is
6007	 * the number of our device in the stripe array
6008	 */
6009	*num_stripes = 1;
6010	stripe_index = 0;
6011	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6012			 BTRFS_BLOCK_GROUP_RAID10)) {
6013		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6014			sub_stripes = 1;
6015		else
6016			sub_stripes = map->sub_stripes;
6017
6018		factor = map->num_stripes / sub_stripes;
6019		*num_stripes = min_t(u64, map->num_stripes,
6020				    sub_stripes * stripe_cnt);
6021		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6022		stripe_index *= sub_stripes;
6023		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6024					      &remaining_stripes);
6025		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6026		last_stripe *= sub_stripes;
6027	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6028				BTRFS_BLOCK_GROUP_DUP)) {
6029		*num_stripes = map->num_stripes;
6030	} else {
6031		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6032					&stripe_index);
6033	}
6034
6035	stripes = kcalloc(*num_stripes, sizeof(*stripes), GFP_NOFS);
6036	if (!stripes) {
6037		ret = -ENOMEM;
6038		goto out_free_map;
 
6039	}
6040
6041	for (i = 0; i < *num_stripes; i++) {
6042		stripes[i].physical =
6043			map->stripes[stripe_index].physical +
6044			stripe_offset + stripe_nr * map->stripe_len;
6045		stripes[i].dev = map->stripes[stripe_index].dev;
6046
6047		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6048				 BTRFS_BLOCK_GROUP_RAID10)) {
6049			stripes[i].length = stripes_per_dev * map->stripe_len;
6050
6051			if (i / sub_stripes < remaining_stripes)
6052				stripes[i].length += map->stripe_len;
6053
6054			/*
6055			 * Special for the first stripe and
6056			 * the last stripe:
6057			 *
6058			 * |-------|...|-------|
6059			 *     |----------|
6060			 *    off     end_off
6061			 */
6062			if (i < sub_stripes)
6063				stripes[i].length -= stripe_offset;
6064
6065			if (stripe_index >= last_stripe &&
6066			    stripe_index <= (last_stripe +
6067					     sub_stripes - 1))
6068				stripes[i].length -= stripe_end_offset;
6069
6070			if (i == sub_stripes - 1)
6071				stripe_offset = 0;
6072		} else {
6073			stripes[i].length = length;
6074		}
6075
6076		stripe_index++;
6077		if (stripe_index == map->num_stripes) {
6078			stripe_index = 0;
6079			stripe_nr++;
 
 
 
 
 
 
 
 
6080		}
 
 
 
6081	}
6082
6083	free_extent_map(em);
6084	return stripes;
6085out_free_map:
6086	free_extent_map(em);
6087	return ERR_PTR(ret);
6088}
6089
6090/*
6091 * In dev-replace case, for repair case (that's the only case where the mirror
6092 * is selected explicitly when calling btrfs_map_block), blocks left of the
6093 * left cursor can also be read from the target drive.
6094 *
6095 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6096 * array of stripes.
6097 * For READ, it also needs to be supported using the same mirror number.
6098 *
6099 * If the requested block is not left of the left cursor, EIO is returned. This
6100 * can happen because btrfs_num_copies() returns one more in the dev-replace
6101 * case.
6102 */
6103static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6104					 u64 logical, u64 length,
6105					 u64 srcdev_devid, int *mirror_num,
6106					 u64 *physical)
6107{
6108	struct btrfs_io_context *bioc = NULL;
6109	int num_stripes;
6110	int index_srcdev = 0;
6111	int found = 0;
6112	u64 physical_of_found = 0;
6113	int i;
6114	int ret = 0;
6115
6116	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6117				logical, &length, &bioc, NULL, NULL, 0);
6118	if (ret) {
6119		ASSERT(bioc == NULL);
6120		return ret;
6121	}
6122
6123	num_stripes = bioc->num_stripes;
6124	if (*mirror_num > num_stripes) {
6125		/*
6126		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6127		 * that means that the requested area is not left of the left
6128		 * cursor
6129		 */
6130		btrfs_put_bioc(bioc);
6131		return -EIO;
6132	}
6133
6134	/*
6135	 * process the rest of the function using the mirror_num of the source
6136	 * drive. Therefore look it up first.  At the end, patch the device
6137	 * pointer to the one of the target drive.
6138	 */
6139	for (i = 0; i < num_stripes; i++) {
6140		if (bioc->stripes[i].dev->devid != srcdev_devid)
6141			continue;
6142
6143		/*
6144		 * In case of DUP, in order to keep it simple, only add the
6145		 * mirror with the lowest physical address
6146		 */
6147		if (found &&
6148		    physical_of_found <= bioc->stripes[i].physical)
6149			continue;
6150
6151		index_srcdev = i;
6152		found = 1;
6153		physical_of_found = bioc->stripes[i].physical;
6154	}
6155
6156	btrfs_put_bioc(bioc);
6157
6158	ASSERT(found);
6159	if (!found)
6160		return -EIO;
6161
6162	*mirror_num = index_srcdev + 1;
6163	*physical = physical_of_found;
6164	return ret;
6165}
6166
6167static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6168{
6169	struct btrfs_block_group *cache;
6170	bool ret;
6171
6172	/* Non zoned filesystem does not use "to_copy" flag */
6173	if (!btrfs_is_zoned(fs_info))
6174		return false;
6175
6176	cache = btrfs_lookup_block_group(fs_info, logical);
6177
6178	ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
6179
6180	btrfs_put_block_group(cache);
6181	return ret;
6182}
6183
6184static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6185				      struct btrfs_io_context **bioc_ret,
6186				      struct btrfs_dev_replace *dev_replace,
6187				      u64 logical,
6188				      int *num_stripes_ret, int *max_errors_ret)
6189{
6190	struct btrfs_io_context *bioc = *bioc_ret;
6191	u64 srcdev_devid = dev_replace->srcdev->devid;
6192	int tgtdev_indexes = 0;
6193	int num_stripes = *num_stripes_ret;
6194	int max_errors = *max_errors_ret;
6195	int i;
6196
6197	if (op == BTRFS_MAP_WRITE) {
6198		int index_where_to_add;
6199
6200		/*
6201		 * A block group which have "to_copy" set will eventually
6202		 * copied by dev-replace process. We can avoid cloning IO here.
6203		 */
6204		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6205			return;
6206
 
 
 
6207		/*
6208		 * duplicate the write operations while the dev replace
6209		 * procedure is running. Since the copying of the old disk to
6210		 * the new disk takes place at run time while the filesystem is
6211		 * mounted writable, the regular write operations to the old
6212		 * disk have to be duplicated to go to the new disk as well.
6213		 *
6214		 * Note that device->missing is handled by the caller, and that
6215		 * the write to the old disk is already set up in the stripes
6216		 * array.
 
6217		 */
6218		index_where_to_add = num_stripes;
6219		for (i = 0; i < num_stripes; i++) {
6220			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6221				/* write to new disk, too */
6222				struct btrfs_io_stripe *new =
6223					bioc->stripes + index_where_to_add;
6224				struct btrfs_io_stripe *old =
6225					bioc->stripes + i;
6226
6227				new->physical = old->physical;
6228				new->dev = dev_replace->tgtdev;
6229				bioc->tgtdev_map[i] = index_where_to_add;
6230				index_where_to_add++;
6231				max_errors++;
6232				tgtdev_indexes++;
6233			}
6234		}
6235		num_stripes = index_where_to_add;
6236	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6237		int index_srcdev = 0;
6238		int found = 0;
6239		u64 physical_of_found = 0;
6240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6241		/*
6242		 * During the dev-replace procedure, the target drive can also
6243		 * be used to read data in case it is needed to repair a corrupt
6244		 * block elsewhere. This is possible if the requested area is
6245		 * left of the left cursor. In this area, the target drive is a
6246		 * full copy of the source drive.
6247		 */
6248		for (i = 0; i < num_stripes; i++) {
6249			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6250				/*
6251				 * In case of DUP, in order to keep it simple,
6252				 * only add the mirror with the lowest physical
6253				 * address
6254				 */
6255				if (found &&
6256				    physical_of_found <= bioc->stripes[i].physical)
 
6257					continue;
6258				index_srcdev = i;
6259				found = 1;
6260				physical_of_found = bioc->stripes[i].physical;
 
6261			}
6262		}
6263		if (found) {
6264			struct btrfs_io_stripe *tgtdev_stripe =
6265				bioc->stripes + num_stripes;
6266
6267			tgtdev_stripe->physical = physical_of_found;
6268			tgtdev_stripe->dev = dev_replace->tgtdev;
6269			bioc->tgtdev_map[index_srcdev] = num_stripes;
6270
6271			tgtdev_indexes++;
6272			num_stripes++;
6273		}
6274	}
6275
6276	*num_stripes_ret = num_stripes;
6277	*max_errors_ret = max_errors;
6278	bioc->num_tgtdevs = tgtdev_indexes;
6279	*bioc_ret = bioc;
6280}
6281
6282static bool need_full_stripe(enum btrfs_map_op op)
6283{
6284	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6285}
6286
6287/*
6288 * Calculate the geometry of a particular (address, len) tuple. This
6289 * information is used to calculate how big a particular bio can get before it
6290 * straddles a stripe.
6291 *
6292 * @fs_info: the filesystem
6293 * @em:      mapping containing the logical extent
6294 * @op:      type of operation - write or read
6295 * @logical: address that we want to figure out the geometry of
6296 * @io_geom: pointer used to return values
6297 *
6298 * Returns < 0 in case a chunk for the given logical address cannot be found,
6299 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6300 */
6301int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6302			  enum btrfs_map_op op, u64 logical,
6303			  struct btrfs_io_geometry *io_geom)
6304{
6305	struct map_lookup *map;
6306	u64 len;
6307	u64 offset;
6308	u64 stripe_offset;
6309	u64 stripe_nr;
6310	u32 stripe_len;
6311	u64 raid56_full_stripe_start = (u64)-1;
6312	int data_stripes;
6313
6314	ASSERT(op != BTRFS_MAP_DISCARD);
6315
6316	map = em->map_lookup;
6317	/* Offset of this logical address in the chunk */
6318	offset = logical - em->start;
6319	/* Len of a stripe in a chunk */
6320	stripe_len = map->stripe_len;
6321	/*
6322	 * Stripe_nr is where this block falls in
6323	 * stripe_offset is the offset of this block in its stripe.
6324	 */
6325	stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset);
6326	ASSERT(stripe_offset < U32_MAX);
6327
6328	data_stripes = nr_data_stripes(map);
6329
6330	/* Only stripe based profiles needs to check against stripe length. */
6331	if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) {
6332		u64 max_len = stripe_len - stripe_offset;
6333
6334		/*
6335		 * In case of raid56, we need to know the stripe aligned start
6336		 */
6337		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6338			unsigned long full_stripe_len = stripe_len * data_stripes;
6339			raid56_full_stripe_start = offset;
6340
6341			/*
6342			 * Allow a write of a full stripe, but make sure we
6343			 * don't allow straddling of stripes
6344			 */
6345			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6346					full_stripe_len);
6347			raid56_full_stripe_start *= full_stripe_len;
6348
6349			/*
6350			 * For writes to RAID[56], allow a full stripeset across
6351			 * all disks. For other RAID types and for RAID[56]
6352			 * reads, just allow a single stripe (on a single disk).
6353			 */
6354			if (op == BTRFS_MAP_WRITE) {
6355				max_len = stripe_len * data_stripes -
6356					  (offset - raid56_full_stripe_start);
6357			}
6358		}
6359		len = min_t(u64, em->len - offset, max_len);
6360	} else {
6361		len = em->len - offset;
6362	}
6363
6364	io_geom->len = len;
6365	io_geom->offset = offset;
6366	io_geom->stripe_len = stripe_len;
6367	io_geom->stripe_nr = stripe_nr;
6368	io_geom->stripe_offset = stripe_offset;
6369	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6370
6371	return 0;
6372}
6373
6374static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
6375		          u32 stripe_index, u64 stripe_offset, u64 stripe_nr)
6376{
6377	dst->dev = map->stripes[stripe_index].dev;
6378	dst->physical = map->stripes[stripe_index].physical +
6379			stripe_offset + stripe_nr * map->stripe_len;
6380}
6381
6382int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6383		      u64 logical, u64 *length,
6384		      struct btrfs_io_context **bioc_ret,
6385		      struct btrfs_io_stripe *smap, int *mirror_num_ret,
6386		      int need_raid_map)
6387{
6388	struct extent_map *em;
6389	struct map_lookup *map;
6390	u64 stripe_offset;
6391	u64 stripe_nr;
6392	u64 stripe_len;
6393	u32 stripe_index;
6394	int data_stripes;
6395	int i;
6396	int ret = 0;
6397	int mirror_num = (mirror_num_ret ? *mirror_num_ret : 0);
6398	int num_stripes;
6399	int max_errors = 0;
6400	int tgtdev_indexes = 0;
6401	struct btrfs_io_context *bioc = NULL;
6402	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6403	int dev_replace_is_ongoing = 0;
6404	int num_alloc_stripes;
6405	int patch_the_first_stripe_for_dev_replace = 0;
6406	u64 physical_to_patch_in_first_stripe = 0;
6407	u64 raid56_full_stripe_start = (u64)-1;
6408	struct btrfs_io_geometry geom;
6409
6410	ASSERT(bioc_ret);
6411	ASSERT(op != BTRFS_MAP_DISCARD);
6412
6413	em = btrfs_get_chunk_map(fs_info, logical, *length);
6414	ASSERT(!IS_ERR(em));
6415
6416	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6417	if (ret < 0)
6418		return ret;
6419
6420	map = em->map_lookup;
6421
6422	*length = geom.len;
6423	stripe_len = geom.stripe_len;
6424	stripe_nr = geom.stripe_nr;
6425	stripe_offset = geom.stripe_offset;
6426	raid56_full_stripe_start = geom.raid56_stripe_offset;
6427	data_stripes = nr_data_stripes(map);
6428
6429	down_read(&dev_replace->rwsem);
6430	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6431	/*
6432	 * Hold the semaphore for read during the whole operation, write is
6433	 * requested at commit time but must wait.
6434	 */
6435	if (!dev_replace_is_ongoing)
6436		up_read(&dev_replace->rwsem);
6437
6438	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6439	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6440		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6441						    dev_replace->srcdev->devid,
6442						    &mirror_num,
6443					    &physical_to_patch_in_first_stripe);
6444		if (ret)
6445			goto out;
6446		else
6447			patch_the_first_stripe_for_dev_replace = 1;
6448	} else if (mirror_num > map->num_stripes) {
6449		mirror_num = 0;
6450	}
6451
6452	num_stripes = 1;
6453	stripe_index = 0;
 
 
 
 
 
 
6454	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6455		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6456				&stripe_index);
6457		if (!need_full_stripe(op))
6458			mirror_num = 1;
6459	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6460		if (need_full_stripe(op))
6461			num_stripes = map->num_stripes;
6462		else if (mirror_num)
6463			stripe_index = mirror_num - 1;
6464		else {
6465			stripe_index = find_live_mirror(fs_info, map, 0,
 
 
6466					    dev_replace_is_ongoing);
6467			mirror_num = stripe_index + 1;
6468		}
6469
6470	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6471		if (need_full_stripe(op)) {
6472			num_stripes = map->num_stripes;
6473		} else if (mirror_num) {
6474			stripe_index = mirror_num - 1;
6475		} else {
6476			mirror_num = 1;
6477		}
6478
6479	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6480		u32 factor = map->num_stripes / map->sub_stripes;
6481
6482		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6483		stripe_index *= map->sub_stripes;
6484
6485		if (need_full_stripe(op))
6486			num_stripes = map->sub_stripes;
 
 
 
 
6487		else if (mirror_num)
6488			stripe_index += mirror_num - 1;
6489		else {
6490			int old_stripe_index = stripe_index;
6491			stripe_index = find_live_mirror(fs_info, map,
6492					      stripe_index,
 
 
6493					      dev_replace_is_ongoing);
6494			mirror_num = stripe_index - old_stripe_index + 1;
6495		}
6496
6497	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6498		ASSERT(map->stripe_len == BTRFS_STRIPE_LEN);
6499		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
 
 
 
 
 
6500			/* push stripe_nr back to the start of the full stripe */
6501			stripe_nr = div64_u64(raid56_full_stripe_start,
6502					stripe_len * data_stripes);
 
 
6503
6504			/* RAID[56] write or recovery. Return all stripes */
6505			num_stripes = map->num_stripes;
6506			max_errors = btrfs_chunk_max_errors(map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6507
6508			/* Return the length to the full stripe end */
6509			*length = min(logical + *length,
6510				      raid56_full_stripe_start + em->start +
6511				      data_stripes * stripe_len) - logical;
6512			stripe_index = 0;
6513			stripe_offset = 0;
6514		} else {
6515			/*
6516			 * Mirror #0 or #1 means the original data block.
6517			 * Mirror #2 is RAID5 parity block.
6518			 * Mirror #3 is RAID6 Q block.
6519			 */
6520			stripe_nr = div_u64_rem(stripe_nr,
6521					data_stripes, &stripe_index);
6522			if (mirror_num > 1)
6523				stripe_index = data_stripes + mirror_num - 2;
 
6524
6525			/* We distribute the parity blocks across stripes */
6526			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6527					&stripe_index);
6528			if (!need_full_stripe(op) && mirror_num <= 1)
6529				mirror_num = 1;
6530		}
6531	} else {
6532		/*
6533		 * after this, stripe_nr is the number of stripes on this
6534		 * device we have to walk to find the data, and stripe_index is
6535		 * the number of our device in the stripe array
6536		 */
6537		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6538				&stripe_index);
6539		mirror_num = stripe_index + 1;
6540	}
6541	if (stripe_index >= map->num_stripes) {
6542		btrfs_crit(fs_info,
6543			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6544			   stripe_index, map->num_stripes);
6545		ret = -EINVAL;
6546		goto out;
6547	}
6548
6549	num_alloc_stripes = num_stripes;
6550	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6551		if (op == BTRFS_MAP_WRITE)
6552			num_alloc_stripes <<= 1;
6553		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6554			num_alloc_stripes++;
6555		tgtdev_indexes = num_stripes;
6556	}
6557
6558	/*
6559	 * If this I/O maps to a single device, try to return the device and
6560	 * physical block information on the stack instead of allocating an
6561	 * I/O context structure.
6562	 */
6563	if (smap && num_alloc_stripes == 1 &&
6564	    !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) &&
6565	    (!need_full_stripe(op) || !dev_replace_is_ongoing ||
6566	     !dev_replace->tgtdev)) {
6567		if (patch_the_first_stripe_for_dev_replace) {
6568			smap->dev = dev_replace->tgtdev;
6569			smap->physical = physical_to_patch_in_first_stripe;
6570			*mirror_num_ret = map->num_stripes + 1;
6571		} else {
6572			set_io_stripe(smap, map, stripe_index, stripe_offset,
6573				      stripe_nr);
6574			*mirror_num_ret = mirror_num;
6575		}
6576		*bioc_ret = NULL;
6577		ret = 0;
6578		goto out;
6579	}
6580
6581	bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
6582	if (!bioc) {
6583		ret = -ENOMEM;
6584		goto out;
6585	}
 
6586
6587	for (i = 0; i < num_stripes; i++) {
6588		set_io_stripe(&bioc->stripes[i], map, stripe_index, stripe_offset,
6589			      stripe_nr);
6590		stripe_index++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6591	}
6592
6593	/* Build raid_map */
6594	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6595	    (need_full_stripe(op) || mirror_num > 1)) {
6596		u64 tmp;
6597		unsigned rot;
 
 
 
 
 
6598
6599		/* Work out the disk rotation on this stripe-set */
6600		div_u64_rem(stripe_nr, num_stripes, &rot);
 
 
6601
6602		/* Fill in the logical address of each stripe */
6603		tmp = stripe_nr * data_stripes;
6604		for (i = 0; i < data_stripes; i++)
6605			bioc->raid_map[(i + rot) % num_stripes] =
6606				em->start + (tmp + i) * map->stripe_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6607
6608		bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6609		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6610			bioc->raid_map[(i + rot + 1) % num_stripes] =
6611				RAID6_Q_STRIPE;
 
 
 
 
 
 
 
 
 
 
6612
6613		sort_parity_stripes(bioc, num_stripes);
6614	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6615
6616	if (need_full_stripe(op))
6617		max_errors = btrfs_chunk_max_errors(map);
 
 
 
 
 
 
 
6618
6619	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6620	    need_full_stripe(op)) {
6621		handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6622					  &num_stripes, &max_errors);
6623	}
6624
6625	*bioc_ret = bioc;
6626	bioc->map_type = map->type;
6627	bioc->num_stripes = num_stripes;
6628	bioc->max_errors = max_errors;
6629	bioc->mirror_num = mirror_num;
6630
6631	/*
6632	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6633	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6634	 * available as a mirror
6635	 */
6636	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6637		WARN_ON(num_stripes > 1);
6638		bioc->stripes[0].dev = dev_replace->tgtdev;
6639		bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6640		bioc->mirror_num = map->num_stripes + 1;
 
 
 
 
6641	}
6642out:
6643	if (dev_replace_is_ongoing) {
6644		lockdep_assert_held(&dev_replace->rwsem);
6645		/* Unlock and let waiting writers proceed */
6646		up_read(&dev_replace->rwsem);
6647	}
6648	free_extent_map(em);
6649	return ret;
6650}
6651
6652int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6653		      u64 logical, u64 *length,
6654		      struct btrfs_io_context **bioc_ret, int mirror_num)
6655{
6656	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6657				 NULL, &mirror_num, 0);
6658}
6659
6660/* For Scrub/replace */
6661int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6662		     u64 logical, u64 *length,
6663		     struct btrfs_io_context **bioc_ret)
6664{
6665	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6666				 NULL, NULL, 1);
6667}
 
 
 
 
 
 
6668
6669static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6670				      const struct btrfs_fs_devices *fs_devices)
6671{
6672	if (args->fsid == NULL)
6673		return true;
6674	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6675		return true;
6676	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6677}
6678
6679static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6680				  const struct btrfs_device *device)
6681{
6682	if (args->missing) {
6683		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6684		    !device->bdev)
6685			return true;
6686		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6687	}
6688
6689	if (device->devid != args->devid)
6690		return false;
6691	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6692		return false;
6693	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6694}
6695
6696/*
6697 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6698 * return NULL.
6699 *
6700 * If devid and uuid are both specified, the match must be exact, otherwise
6701 * only devid is used.
6702 */
6703struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6704				       const struct btrfs_dev_lookup_args *args)
 
6705{
6706	struct btrfs_device *device;
6707	struct btrfs_fs_devices *seed_devs;
6708
6709	if (dev_args_match_fs_devices(args, fs_devices)) {
6710		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6711			if (dev_args_match_device(args, device))
6712				return device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6713		}
 
6714	}
6715
6716	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6717		if (!dev_args_match_fs_devices(args, seed_devs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6718			continue;
6719		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6720			if (dev_args_match_device(args, device))
6721				return device;
6722		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6723	}
 
 
 
 
 
 
 
 
 
6724
 
 
 
 
 
 
 
 
 
 
 
6725	return NULL;
6726}
6727
6728static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6729					    u64 devid, u8 *dev_uuid)
6730{
6731	struct btrfs_device *device;
6732	unsigned int nofs_flag;
6733
6734	/*
6735	 * We call this under the chunk_mutex, so we want to use NOFS for this
6736	 * allocation, however we don't want to change btrfs_alloc_device() to
6737	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6738	 * places.
6739	 */
6740
6741	nofs_flag = memalloc_nofs_save();
6742	device = btrfs_alloc_device(NULL, &devid, dev_uuid, NULL);
6743	memalloc_nofs_restore(nofs_flag);
6744	if (IS_ERR(device))
6745		return device;
6746
6747	list_add(&device->dev_list, &fs_devices->devices);
6748	device->fs_devices = fs_devices;
6749	fs_devices->num_devices++;
6750
6751	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6752	fs_devices->missing_devices++;
6753
6754	return device;
6755}
6756
6757/*
6758 * Allocate new device struct, set up devid and UUID.
6759 *
6760 * @fs_info:	used only for generating a new devid, can be NULL if
6761 *		devid is provided (i.e. @devid != NULL).
6762 * @devid:	a pointer to devid for this device.  If NULL a new devid
6763 *		is generated.
6764 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6765 *		is generated.
6766 * @path:	a pointer to device path if available, NULL otherwise.
6767 *
6768 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6769 * on error.  Returned struct is not linked onto any lists and must be
6770 * destroyed with btrfs_free_device.
6771 */
6772struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6773					const u64 *devid, const u8 *uuid,
6774					const char *path)
6775{
6776	struct btrfs_device *dev;
6777	u64 tmp;
6778
6779	if (WARN_ON(!devid && !fs_info))
6780		return ERR_PTR(-EINVAL);
6781
6782	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6783	if (!dev)
6784		return ERR_PTR(-ENOMEM);
6785
6786	INIT_LIST_HEAD(&dev->dev_list);
6787	INIT_LIST_HEAD(&dev->dev_alloc_list);
6788	INIT_LIST_HEAD(&dev->post_commit_list);
6789
6790	atomic_set(&dev->dev_stats_ccnt, 0);
6791	btrfs_device_data_ordered_init(dev);
6792	extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
6793
6794	if (devid)
6795		tmp = *devid;
6796	else {
6797		int ret;
6798
6799		ret = find_next_devid(fs_info, &tmp);
6800		if (ret) {
6801			btrfs_free_device(dev);
6802			return ERR_PTR(ret);
6803		}
6804	}
6805	dev->devid = tmp;
6806
6807	if (uuid)
6808		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6809	else
6810		generate_random_uuid(dev->uuid);
6811
6812	if (path) {
6813		struct rcu_string *name;
6814
6815		name = rcu_string_strdup(path, GFP_KERNEL);
6816		if (!name) {
6817			btrfs_free_device(dev);
6818			return ERR_PTR(-ENOMEM);
6819		}
6820		rcu_assign_pointer(dev->name, name);
6821	}
6822
6823	return dev;
6824}
6825
6826static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6827					u64 devid, u8 *uuid, bool error)
6828{
6829	if (error)
6830		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6831			      devid, uuid);
6832	else
6833		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6834			      devid, uuid);
6835}
6836
6837u64 btrfs_calc_stripe_length(const struct extent_map *em)
6838{
6839	const struct map_lookup *map = em->map_lookup;
6840	const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
6841
6842	return div_u64(em->len, data_stripes);
6843}
6844
6845#if BITS_PER_LONG == 32
6846/*
6847 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6848 * can't be accessed on 32bit systems.
6849 *
6850 * This function do mount time check to reject the fs if it already has
6851 * metadata chunk beyond that limit.
6852 */
6853static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6854				  u64 logical, u64 length, u64 type)
6855{
6856	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6857		return 0;
6858
6859	if (logical + length < MAX_LFS_FILESIZE)
6860		return 0;
6861
6862	btrfs_err_32bit_limit(fs_info);
6863	return -EOVERFLOW;
6864}
6865
6866/*
6867 * This is to give early warning for any metadata chunk reaching
6868 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6869 * Although we can still access the metadata, it's not going to be possible
6870 * once the limit is reached.
6871 */
6872static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6873				  u64 logical, u64 length, u64 type)
6874{
6875	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6876		return;
6877
6878	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6879		return;
6880
6881	btrfs_warn_32bit_limit(fs_info);
6882}
6883#endif
6884
6885static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
6886						  u64 devid, u8 *uuid)
6887{
6888	struct btrfs_device *dev;
6889
6890	if (!btrfs_test_opt(fs_info, DEGRADED)) {
6891		btrfs_report_missing_device(fs_info, devid, uuid, true);
6892		return ERR_PTR(-ENOENT);
6893	}
6894
6895	dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
6896	if (IS_ERR(dev)) {
6897		btrfs_err(fs_info, "failed to init missing device %llu: %ld",
6898			  devid, PTR_ERR(dev));
6899		return dev;
6900	}
6901	btrfs_report_missing_device(fs_info, devid, uuid, false);
6902
6903	return dev;
6904}
6905
6906static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
 
6907			  struct btrfs_chunk *chunk)
6908{
6909	BTRFS_DEV_LOOKUP_ARGS(args);
6910	struct btrfs_fs_info *fs_info = leaf->fs_info;
6911	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6912	struct map_lookup *map;
6913	struct extent_map *em;
6914	u64 logical;
6915	u64 length;
6916	u64 devid;
6917	u64 type;
6918	u8 uuid[BTRFS_UUID_SIZE];
6919	int index;
6920	int num_stripes;
6921	int ret;
6922	int i;
6923
6924	logical = key->offset;
6925	length = btrfs_chunk_length(leaf, chunk);
6926	type = btrfs_chunk_type(leaf, chunk);
6927	index = btrfs_bg_flags_to_raid_index(type);
6928	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6929
6930#if BITS_PER_LONG == 32
6931	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
6932	if (ret < 0)
6933		return ret;
6934	warn_32bit_meta_chunk(fs_info, logical, length, type);
6935#endif
6936
6937	/*
6938	 * Only need to verify chunk item if we're reading from sys chunk array,
6939	 * as chunk item in tree block is already verified by tree-checker.
6940	 */
6941	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6942		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6943		if (ret)
6944			return ret;
6945	}
6946
6947	read_lock(&map_tree->lock);
6948	em = lookup_extent_mapping(map_tree, logical, 1);
6949	read_unlock(&map_tree->lock);
6950
6951	/* already mapped? */
6952	if (em && em->start <= logical && em->start + em->len > logical) {
6953		free_extent_map(em);
6954		return 0;
6955	} else if (em) {
6956		free_extent_map(em);
6957	}
6958
6959	em = alloc_extent_map();
6960	if (!em)
6961		return -ENOMEM;
 
6962	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6963	if (!map) {
6964		free_extent_map(em);
6965		return -ENOMEM;
6966	}
6967
6968	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6969	em->map_lookup = map;
6970	em->start = logical;
6971	em->len = length;
6972	em->orig_start = 0;
6973	em->block_start = 0;
6974	em->block_len = em->len;
6975
6976	map->num_stripes = num_stripes;
6977	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6978	map->io_align = btrfs_chunk_io_align(leaf, chunk);
 
6979	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6980	map->type = type;
6981	/*
6982	 * We can't use the sub_stripes value, as for profiles other than
6983	 * RAID10, they may have 0 as sub_stripes for filesystems created by
6984	 * older mkfs (<v5.4).
6985	 * In that case, it can cause divide-by-zero errors later.
6986	 * Since currently sub_stripes is fixed for each profile, let's
6987	 * use the trusted value instead.
6988	 */
6989	map->sub_stripes = btrfs_raid_array[index].sub_stripes;
6990	map->verified_stripes = 0;
6991	em->orig_block_len = btrfs_calc_stripe_length(em);
6992	for (i = 0; i < num_stripes; i++) {
6993		map->stripes[i].physical =
6994			btrfs_stripe_offset_nr(leaf, chunk, i);
6995		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6996		args.devid = devid;
6997		read_extent_buffer(leaf, uuid, (unsigned long)
6998				   btrfs_stripe_dev_uuid_nr(chunk, i),
6999				   BTRFS_UUID_SIZE);
7000		args.uuid = uuid;
7001		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
 
 
 
 
 
7002		if (!map->stripes[i].dev) {
7003			map->stripes[i].dev = handle_missing_device(fs_info,
7004								    devid, uuid);
7005			if (IS_ERR(map->stripes[i].dev)) {
7006				ret = PTR_ERR(map->stripes[i].dev);
7007				free_extent_map(em);
7008				return ret;
7009			}
7010		}
7011
7012		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7013				&(map->stripes[i].dev->dev_state));
7014	}
7015
7016	write_lock(&map_tree->lock);
7017	ret = add_extent_mapping(map_tree, em, 0);
7018	write_unlock(&map_tree->lock);
7019	if (ret < 0) {
7020		btrfs_err(fs_info,
7021			  "failed to add chunk map, start=%llu len=%llu: %d",
7022			  em->start, em->len, ret);
7023	}
7024	free_extent_map(em);
7025
7026	return ret;
7027}
7028
7029static void fill_device_from_item(struct extent_buffer *leaf,
7030				 struct btrfs_dev_item *dev_item,
7031				 struct btrfs_device *device)
7032{
7033	unsigned long ptr;
7034
7035	device->devid = btrfs_device_id(leaf, dev_item);
7036	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7037	device->total_bytes = device->disk_total_bytes;
7038	device->commit_total_bytes = device->disk_total_bytes;
7039	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7040	device->commit_bytes_used = device->bytes_used;
7041	device->type = btrfs_device_type(leaf, dev_item);
7042	device->io_align = btrfs_device_io_align(leaf, dev_item);
7043	device->io_width = btrfs_device_io_width(leaf, dev_item);
7044	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7045	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7046	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7047
7048	ptr = btrfs_device_uuid(dev_item);
7049	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7050}
7051
7052static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7053						  u8 *fsid)
7054{
7055	struct btrfs_fs_devices *fs_devices;
7056	int ret;
7057
7058	lockdep_assert_held(&uuid_mutex);
7059	ASSERT(fsid);
7060
7061	/* This will match only for multi-device seed fs */
7062	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7063		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7064			return fs_devices;
7065
 
 
 
 
 
 
 
 
7066
7067	fs_devices = find_fsid(fsid, NULL);
7068	if (!fs_devices) {
7069		if (!btrfs_test_opt(fs_info, DEGRADED))
7070			return ERR_PTR(-ENOENT);
7071
7072		fs_devices = alloc_fs_devices(fsid, NULL);
7073		if (IS_ERR(fs_devices))
7074			return fs_devices;
7075
7076		fs_devices->seeding = true;
7077		fs_devices->opened = 1;
7078		return fs_devices;
7079	}
7080
7081	/*
7082	 * Upon first call for a seed fs fsid, just create a private copy of the
7083	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7084	 */
7085	fs_devices = clone_fs_devices(fs_devices);
7086	if (IS_ERR(fs_devices))
7087		return fs_devices;
 
 
7088
7089	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
 
7090	if (ret) {
7091		free_fs_devices(fs_devices);
7092		return ERR_PTR(ret);
7093	}
7094
7095	if (!fs_devices->seeding) {
7096		close_fs_devices(fs_devices);
7097		free_fs_devices(fs_devices);
7098		return ERR_PTR(-EINVAL);
 
7099	}
7100
7101	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7102
7103	return fs_devices;
 
7104}
7105
7106static int read_one_dev(struct extent_buffer *leaf,
 
7107			struct btrfs_dev_item *dev_item)
7108{
7109	BTRFS_DEV_LOOKUP_ARGS(args);
7110	struct btrfs_fs_info *fs_info = leaf->fs_info;
7111	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7112	struct btrfs_device *device;
7113	u64 devid;
7114	int ret;
7115	u8 fs_uuid[BTRFS_FSID_SIZE];
7116	u8 dev_uuid[BTRFS_UUID_SIZE];
7117
7118	devid = btrfs_device_id(leaf, dev_item);
7119	args.devid = devid;
7120	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7121			   BTRFS_UUID_SIZE);
7122	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7123			   BTRFS_FSID_SIZE);
7124	args.uuid = dev_uuid;
7125	args.fsid = fs_uuid;
7126
7127	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7128		fs_devices = open_seed_devices(fs_info, fs_uuid);
7129		if (IS_ERR(fs_devices))
7130			return PTR_ERR(fs_devices);
7131	}
7132
7133	device = btrfs_find_device(fs_info->fs_devices, &args);
7134	if (!device) {
7135		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7136			btrfs_report_missing_device(fs_info, devid,
7137							dev_uuid, true);
7138			return -ENOENT;
7139		}
7140
7141		device = add_missing_dev(fs_devices, devid, dev_uuid);
7142		if (IS_ERR(device)) {
7143			btrfs_err(fs_info,
7144				"failed to add missing dev %llu: %ld",
7145				devid, PTR_ERR(device));
7146			return PTR_ERR(device);
7147		}
7148		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7149	} else {
7150		if (!device->bdev) {
7151			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7152				btrfs_report_missing_device(fs_info,
7153						devid, dev_uuid, true);
7154				return -ENOENT;
7155			}
7156			btrfs_report_missing_device(fs_info, devid,
7157							dev_uuid, false);
7158		}
7159
7160		if (!device->bdev &&
7161		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7162			/*
7163			 * this happens when a device that was properly setup
7164			 * in the device info lists suddenly goes bad.
7165			 * device->bdev is NULL, and so we have to set
7166			 * device->missing to one here
7167			 */
7168			device->fs_devices->missing_devices++;
7169			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7170		}
7171
7172		/* Move the device to its own fs_devices */
7173		if (device->fs_devices != fs_devices) {
7174			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7175							&device->dev_state));
7176
7177			list_move(&device->dev_list, &fs_devices->devices);
7178			device->fs_devices->num_devices--;
7179			fs_devices->num_devices++;
7180
7181			device->fs_devices->missing_devices--;
7182			fs_devices->missing_devices++;
7183
7184			device->fs_devices = fs_devices;
7185		}
7186	}
7187
7188	if (device->fs_devices != fs_info->fs_devices) {
7189		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7190		if (device->generation !=
7191		    btrfs_device_generation(leaf, dev_item))
7192			return -EINVAL;
7193	}
7194
7195	fill_device_from_item(leaf, dev_item, device);
7196	if (device->bdev) {
7197		u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7198
7199		if (device->total_bytes > max_total_bytes) {
7200			btrfs_err(fs_info,
7201			"device total_bytes should be at most %llu but found %llu",
7202				  max_total_bytes, device->total_bytes);
7203			return -EINVAL;
7204		}
7205	}
7206	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7207	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7208	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7209		device->fs_devices->total_rw_bytes += device->total_bytes;
7210		atomic64_add(device->total_bytes - device->bytes_used,
7211				&fs_info->free_chunk_space);
 
 
7212	}
7213	ret = 0;
7214	return ret;
7215}
7216
7217int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7218{
7219	struct btrfs_super_block *super_copy = fs_info->super_copy;
7220	struct extent_buffer *sb;
7221	struct btrfs_disk_key *disk_key;
7222	struct btrfs_chunk *chunk;
7223	u8 *array_ptr;
7224	unsigned long sb_array_offset;
7225	int ret = 0;
7226	u32 num_stripes;
7227	u32 array_size;
7228	u32 len = 0;
7229	u32 cur_offset;
7230	u64 type;
7231	struct btrfs_key key;
7232
7233	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7234
7235	/*
7236	 * We allocated a dummy extent, just to use extent buffer accessors.
7237	 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7238	 * that's fine, we will not go beyond system chunk array anyway.
7239	 */
7240	sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7241	if (!sb)
7242		return -ENOMEM;
7243	set_extent_buffer_uptodate(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7244
7245	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7246	array_size = btrfs_super_sys_array_size(super_copy);
7247
7248	array_ptr = super_copy->sys_chunk_array;
7249	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7250	cur_offset = 0;
7251
7252	while (cur_offset < array_size) {
7253		disk_key = (struct btrfs_disk_key *)array_ptr;
7254		len = sizeof(*disk_key);
7255		if (cur_offset + len > array_size)
7256			goto out_short_read;
7257
 
 
7258		btrfs_disk_key_to_cpu(&key, disk_key);
7259
7260		array_ptr += len;
7261		sb_array_offset += len;
7262		cur_offset += len;
7263
7264		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7265			btrfs_err(fs_info,
7266			    "unexpected item type %u in sys_array at offset %u",
7267				  (u32)key.type, cur_offset);
7268			ret = -EIO;
7269			break;
7270		}
7271
7272		chunk = (struct btrfs_chunk *)sb_array_offset;
7273		/*
7274		 * At least one btrfs_chunk with one stripe must be present,
7275		 * exact stripe count check comes afterwards
7276		 */
7277		len = btrfs_chunk_item_size(1);
7278		if (cur_offset + len > array_size)
7279			goto out_short_read;
7280
7281		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7282		if (!num_stripes) {
7283			btrfs_err(fs_info,
7284			"invalid number of stripes %u in sys_array at offset %u",
7285				  num_stripes, cur_offset);
7286			ret = -EIO;
7287			break;
7288		}
7289
7290		type = btrfs_chunk_type(sb, chunk);
7291		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7292			btrfs_err(fs_info,
7293			"invalid chunk type %llu in sys_array at offset %u",
7294				  type, cur_offset);
7295			ret = -EIO;
7296			break;
7297		}
7298
7299		len = btrfs_chunk_item_size(num_stripes);
7300		if (cur_offset + len > array_size)
7301			goto out_short_read;
7302
7303		ret = read_one_chunk(&key, sb, chunk);
7304		if (ret)
7305			break;
7306
7307		array_ptr += len;
7308		sb_array_offset += len;
7309		cur_offset += len;
7310	}
7311	clear_extent_buffer_uptodate(sb);
7312	free_extent_buffer_stale(sb);
7313	return ret;
7314
7315out_short_read:
7316	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7317			len, cur_offset);
7318	clear_extent_buffer_uptodate(sb);
7319	free_extent_buffer_stale(sb);
7320	return -EIO;
7321}
7322
7323/*
7324 * Check if all chunks in the fs are OK for read-write degraded mount
7325 *
7326 * If the @failing_dev is specified, it's accounted as missing.
7327 *
7328 * Return true if all chunks meet the minimal RW mount requirements.
7329 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7330 */
7331bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7332					struct btrfs_device *failing_dev)
7333{
7334	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7335	struct extent_map *em;
7336	u64 next_start = 0;
7337	bool ret = true;
7338
7339	read_lock(&map_tree->lock);
7340	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7341	read_unlock(&map_tree->lock);
7342	/* No chunk at all? Return false anyway */
7343	if (!em) {
7344		ret = false;
7345		goto out;
7346	}
7347	while (em) {
7348		struct map_lookup *map;
7349		int missing = 0;
7350		int max_tolerated;
7351		int i;
7352
7353		map = em->map_lookup;
7354		max_tolerated =
7355			btrfs_get_num_tolerated_disk_barrier_failures(
7356					map->type);
7357		for (i = 0; i < map->num_stripes; i++) {
7358			struct btrfs_device *dev = map->stripes[i].dev;
7359
7360			if (!dev || !dev->bdev ||
7361			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7362			    dev->last_flush_error)
7363				missing++;
7364			else if (failing_dev && failing_dev == dev)
7365				missing++;
7366		}
7367		if (missing > max_tolerated) {
7368			if (!failing_dev)
7369				btrfs_warn(fs_info,
7370	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7371				   em->start, missing, max_tolerated);
7372			free_extent_map(em);
7373			ret = false;
7374			goto out;
7375		}
7376		next_start = extent_map_end(em);
7377		free_extent_map(em);
7378
7379		read_lock(&map_tree->lock);
7380		em = lookup_extent_mapping(map_tree, next_start,
7381					   (u64)(-1) - next_start);
7382		read_unlock(&map_tree->lock);
7383	}
7384out:
7385	return ret;
7386}
7387
7388static void readahead_tree_node_children(struct extent_buffer *node)
7389{
7390	int i;
7391	const int nr_items = btrfs_header_nritems(node);
7392
7393	for (i = 0; i < nr_items; i++)
7394		btrfs_readahead_node_child(node, i);
7395}
7396
7397int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7398{
7399	struct btrfs_root *root = fs_info->chunk_root;
7400	struct btrfs_path *path;
7401	struct extent_buffer *leaf;
7402	struct btrfs_key key;
7403	struct btrfs_key found_key;
7404	int ret;
7405	int slot;
7406	int iter_ret = 0;
7407	u64 total_dev = 0;
7408	u64 last_ra_node = 0;
7409
7410	path = btrfs_alloc_path();
7411	if (!path)
7412		return -ENOMEM;
7413
7414	/*
7415	 * uuid_mutex is needed only if we are mounting a sprout FS
7416	 * otherwise we don't need it.
7417	 */
7418	mutex_lock(&uuid_mutex);
7419
7420	/*
7421	 * It is possible for mount and umount to race in such a way that
7422	 * we execute this code path, but open_fs_devices failed to clear
7423	 * total_rw_bytes. We certainly want it cleared before reading the
7424	 * device items, so clear it here.
7425	 */
7426	fs_info->fs_devices->total_rw_bytes = 0;
7427
7428	/*
7429	 * Lockdep complains about possible circular locking dependency between
7430	 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7431	 * used for freeze procection of a fs (struct super_block.s_writers),
7432	 * which we take when starting a transaction, and extent buffers of the
7433	 * chunk tree if we call read_one_dev() while holding a lock on an
7434	 * extent buffer of the chunk tree. Since we are mounting the filesystem
7435	 * and at this point there can't be any concurrent task modifying the
7436	 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7437	 */
7438	ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7439	path->skip_locking = 1;
7440
7441	/*
7442	 * Read all device items, and then all the chunk items. All
7443	 * device items are found before any chunk item (their object id
7444	 * is smaller than the lowest possible object id for a chunk
7445	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7446	 */
7447	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7448	key.offset = 0;
7449	key.type = 0;
7450	btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7451		struct extent_buffer *node = path->nodes[1];
7452
 
7453		leaf = path->nodes[0];
7454		slot = path->slots[0];
7455
7456		if (node) {
7457			if (last_ra_node != node->start) {
7458				readahead_tree_node_children(node);
7459				last_ra_node = node->start;
7460			}
 
7461		}
 
7462		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7463			struct btrfs_dev_item *dev_item;
7464			dev_item = btrfs_item_ptr(leaf, slot,
7465						  struct btrfs_dev_item);
7466			ret = read_one_dev(leaf, dev_item);
7467			if (ret)
7468				goto error;
7469			total_dev++;
7470		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7471			struct btrfs_chunk *chunk;
7472
7473			/*
7474			 * We are only called at mount time, so no need to take
7475			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7476			 * we always lock first fs_info->chunk_mutex before
7477			 * acquiring any locks on the chunk tree. This is a
7478			 * requirement for chunk allocation, see the comment on
7479			 * top of btrfs_chunk_alloc() for details.
7480			 */
7481			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7482			ret = read_one_chunk(&found_key, leaf, chunk);
7483			if (ret)
7484				goto error;
7485		}
7486	}
7487	/* Catch error found during iteration */
7488	if (iter_ret < 0) {
7489		ret = iter_ret;
7490		goto error;
7491	}
7492
7493	/*
7494	 * After loading chunk tree, we've got all device information,
7495	 * do another round of validation checks.
7496	 */
7497	if (total_dev != fs_info->fs_devices->total_devices) {
7498		btrfs_warn(fs_info,
7499"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7500			  btrfs_super_num_devices(fs_info->super_copy),
7501			  total_dev);
7502		fs_info->fs_devices->total_devices = total_dev;
7503		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7504	}
7505	if (btrfs_super_total_bytes(fs_info->super_copy) <
7506	    fs_info->fs_devices->total_rw_bytes) {
7507		btrfs_err(fs_info,
7508	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7509			  btrfs_super_total_bytes(fs_info->super_copy),
7510			  fs_info->fs_devices->total_rw_bytes);
7511		ret = -EINVAL;
7512		goto error;
7513	}
7514	ret = 0;
7515error:
 
7516	mutex_unlock(&uuid_mutex);
7517
7518	btrfs_free_path(path);
7519	return ret;
7520}
7521
7522int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7523{
7524	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7525	struct btrfs_device *device;
7526	int ret = 0;
7527
7528	fs_devices->fs_info = fs_info;
7529
7530	mutex_lock(&fs_devices->device_list_mutex);
7531	list_for_each_entry(device, &fs_devices->devices, dev_list)
7532		device->fs_info = fs_info;
7533
7534	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7535		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7536			device->fs_info = fs_info;
7537			ret = btrfs_get_dev_zone_info(device, false);
7538			if (ret)
7539				break;
7540		}
7541
7542		seed_devs->fs_info = fs_info;
7543	}
7544	mutex_unlock(&fs_devices->device_list_mutex);
7545
7546	return ret;
7547}
7548
7549static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7550				 const struct btrfs_dev_stats_item *ptr,
7551				 int index)
7552{
7553	u64 val;
7554
7555	read_extent_buffer(eb, &val,
7556			   offsetof(struct btrfs_dev_stats_item, values) +
7557			    ((unsigned long)ptr) + (index * sizeof(u64)),
7558			   sizeof(val));
7559	return val;
7560}
7561
7562static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7563				      struct btrfs_dev_stats_item *ptr,
7564				      int index, u64 val)
7565{
7566	write_extent_buffer(eb, &val,
7567			    offsetof(struct btrfs_dev_stats_item, values) +
7568			     ((unsigned long)ptr) + (index * sizeof(u64)),
7569			    sizeof(val));
7570}
7571
7572static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7573				       struct btrfs_path *path)
7574{
7575	struct btrfs_dev_stats_item *ptr;
7576	struct extent_buffer *eb;
7577	struct btrfs_key key;
7578	int item_size;
7579	int i, ret, slot;
7580
7581	if (!device->fs_info->dev_root)
7582		return 0;
7583
7584	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7585	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7586	key.offset = device->devid;
7587	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7588	if (ret) {
7589		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7590			btrfs_dev_stat_set(device, i, 0);
7591		device->dev_stats_valid = 1;
7592		btrfs_release_path(path);
7593		return ret < 0 ? ret : 0;
7594	}
7595	slot = path->slots[0];
7596	eb = path->nodes[0];
7597	item_size = btrfs_item_size(eb, slot);
7598
7599	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7600
7601	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7602		if (item_size >= (1 + i) * sizeof(__le64))
7603			btrfs_dev_stat_set(device, i,
7604					   btrfs_dev_stats_value(eb, ptr, i));
7605		else
7606			btrfs_dev_stat_set(device, i, 0);
7607	}
7608
7609	device->dev_stats_valid = 1;
7610	btrfs_dev_stat_print_on_load(device);
7611	btrfs_release_path(path);
7612
7613	return 0;
 
7614}
7615
7616int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7617{
7618	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
 
 
 
 
 
 
7619	struct btrfs_device *device;
7620	struct btrfs_path *path = NULL;
7621	int ret = 0;
7622
7623	path = btrfs_alloc_path();
7624	if (!path)
7625		return -ENOMEM;
 
 
7626
7627	mutex_lock(&fs_devices->device_list_mutex);
7628	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7629		ret = btrfs_device_init_dev_stats(device, path);
7630		if (ret)
7631			goto out;
7632	}
7633	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7634		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7635			ret = btrfs_device_init_dev_stats(device, path);
7636			if (ret)
7637				goto out;
 
 
 
7638		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7639	}
7640out:
7641	mutex_unlock(&fs_devices->device_list_mutex);
7642
 
7643	btrfs_free_path(path);
7644	return ret;
7645}
7646
7647static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 
7648				struct btrfs_device *device)
7649{
7650	struct btrfs_fs_info *fs_info = trans->fs_info;
7651	struct btrfs_root *dev_root = fs_info->dev_root;
7652	struct btrfs_path *path;
7653	struct btrfs_key key;
7654	struct extent_buffer *eb;
7655	struct btrfs_dev_stats_item *ptr;
7656	int ret;
7657	int i;
7658
7659	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7660	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7661	key.offset = device->devid;
7662
7663	path = btrfs_alloc_path();
7664	if (!path)
7665		return -ENOMEM;
7666	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7667	if (ret < 0) {
7668		btrfs_warn_in_rcu(fs_info,
7669			"error %d while searching for dev_stats item for device %s",
7670				  ret, btrfs_dev_name(device));
7671		goto out;
7672	}
7673
7674	if (ret == 0 &&
7675	    btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7676		/* need to delete old one and insert a new one */
7677		ret = btrfs_del_item(trans, dev_root, path);
7678		if (ret != 0) {
7679			btrfs_warn_in_rcu(fs_info,
7680				"delete too small dev_stats item for device %s failed %d",
7681					  btrfs_dev_name(device), ret);
7682			goto out;
7683		}
7684		ret = 1;
7685	}
7686
7687	if (ret == 1) {
7688		/* need to insert a new item */
7689		btrfs_release_path(path);
7690		ret = btrfs_insert_empty_item(trans, dev_root, path,
7691					      &key, sizeof(*ptr));
7692		if (ret < 0) {
7693			btrfs_warn_in_rcu(fs_info,
7694				"insert dev_stats item for device %s failed %d",
7695				btrfs_dev_name(device), ret);
7696			goto out;
7697		}
7698	}
7699
7700	eb = path->nodes[0];
7701	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7702	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7703		btrfs_set_dev_stats_value(eb, ptr, i,
7704					  btrfs_dev_stat_read(device, i));
7705	btrfs_mark_buffer_dirty(eb);
7706
7707out:
7708	btrfs_free_path(path);
7709	return ret;
7710}
7711
7712/*
7713 * called from commit_transaction. Writes all changed device stats to disk.
7714 */
7715int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
 
7716{
7717	struct btrfs_fs_info *fs_info = trans->fs_info;
7718	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7719	struct btrfs_device *device;
7720	int stats_cnt;
7721	int ret = 0;
7722
7723	mutex_lock(&fs_devices->device_list_mutex);
7724	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7725		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7726		if (!device->dev_stats_valid || stats_cnt == 0)
7727			continue;
7728
7729
7730		/*
7731		 * There is a LOAD-LOAD control dependency between the value of
7732		 * dev_stats_ccnt and updating the on-disk values which requires
7733		 * reading the in-memory counters. Such control dependencies
7734		 * require explicit read memory barriers.
7735		 *
7736		 * This memory barriers pairs with smp_mb__before_atomic in
7737		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7738		 * barrier implied by atomic_xchg in
7739		 * btrfs_dev_stats_read_and_reset
7740		 */
7741		smp_rmb();
7742
7743		ret = update_dev_stat_item(trans, device);
7744		if (!ret)
7745			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7746	}
7747	mutex_unlock(&fs_devices->device_list_mutex);
7748
7749	return ret;
7750}
7751
7752void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7753{
7754	btrfs_dev_stat_inc(dev, index);
 
 
7755
 
 
7756	if (!dev->dev_stats_valid)
7757		return;
7758	btrfs_err_rl_in_rcu(dev->fs_info,
7759		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7760			   btrfs_dev_name(dev),
7761			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7762			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7763			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7764			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7765			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7766}
7767
7768static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7769{
7770	int i;
7771
7772	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7773		if (btrfs_dev_stat_read(dev, i) != 0)
7774			break;
7775	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7776		return; /* all values == 0, suppress message */
7777
7778	btrfs_info_in_rcu(dev->fs_info,
7779		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7780	       btrfs_dev_name(dev),
7781	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7782	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7783	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7784	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7785	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7786}
7787
7788int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7789			struct btrfs_ioctl_get_dev_stats *stats)
7790{
7791	BTRFS_DEV_LOOKUP_ARGS(args);
7792	struct btrfs_device *dev;
7793	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7794	int i;
7795
7796	mutex_lock(&fs_devices->device_list_mutex);
7797	args.devid = stats->devid;
7798	dev = btrfs_find_device(fs_info->fs_devices, &args);
7799	mutex_unlock(&fs_devices->device_list_mutex);
7800
7801	if (!dev) {
7802		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7803		return -ENODEV;
7804	} else if (!dev->dev_stats_valid) {
7805		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7806		return -ENODEV;
7807	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7808		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7809			if (stats->nr_items > i)
7810				stats->values[i] =
7811					btrfs_dev_stat_read_and_reset(dev, i);
7812			else
7813				btrfs_dev_stat_set(dev, i, 0);
7814		}
7815		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7816			   current->comm, task_pid_nr(current));
7817	} else {
7818		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7819			if (stats->nr_items > i)
7820				stats->values[i] = btrfs_dev_stat_read(dev, i);
7821	}
7822	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7823		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7824	return 0;
7825}
7826
7827/*
7828 * Update the size and bytes used for each device where it changed.  This is
7829 * delayed since we would otherwise get errors while writing out the
7830 * superblocks.
7831 *
7832 * Must be invoked during transaction commit.
7833 */
7834void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7835{
7836	struct btrfs_device *curr, *next;
7837
7838	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7839
7840	if (list_empty(&trans->dev_update_list))
7841		return;
7842
7843	/*
7844	 * We don't need the device_list_mutex here.  This list is owned by the
7845	 * transaction and the transaction must complete before the device is
7846	 * released.
7847	 */
7848	mutex_lock(&trans->fs_info->chunk_mutex);
7849	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7850				 post_commit_list) {
7851		list_del_init(&curr->post_commit_list);
7852		curr->commit_total_bytes = curr->disk_total_bytes;
7853		curr->commit_bytes_used = curr->bytes_used;
7854	}
7855	mutex_unlock(&trans->fs_info->chunk_mutex);
7856}
7857
7858/*
7859 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7860 */
7861int btrfs_bg_type_to_factor(u64 flags)
7862{
7863	const int index = btrfs_bg_flags_to_raid_index(flags);
7864
7865	return btrfs_raid_array[index].ncopies;
7866}
7867
7868
7869
7870static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7871				 u64 chunk_offset, u64 devid,
7872				 u64 physical_offset, u64 physical_len)
7873{
7874	struct btrfs_dev_lookup_args args = { .devid = devid };
7875	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7876	struct extent_map *em;
7877	struct map_lookup *map;
7878	struct btrfs_device *dev;
7879	u64 stripe_len;
7880	bool found = false;
7881	int ret = 0;
7882	int i;
7883
7884	read_lock(&em_tree->lock);
7885	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7886	read_unlock(&em_tree->lock);
7887
7888	if (!em) {
7889		btrfs_err(fs_info,
7890"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7891			  physical_offset, devid);
7892		ret = -EUCLEAN;
7893		goto out;
7894	}
7895
7896	map = em->map_lookup;
7897	stripe_len = btrfs_calc_stripe_length(em);
7898	if (physical_len != stripe_len) {
7899		btrfs_err(fs_info,
7900"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7901			  physical_offset, devid, em->start, physical_len,
7902			  stripe_len);
7903		ret = -EUCLEAN;
7904		goto out;
7905	}
7906
7907	/*
7908	 * Very old mkfs.btrfs (before v4.1) will not respect the reserved
7909	 * space. Although kernel can handle it without problem, better to warn
7910	 * the users.
7911	 */
7912	if (physical_offset < BTRFS_DEVICE_RANGE_RESERVED)
7913		btrfs_warn(fs_info,
7914		"devid %llu physical %llu len %llu inside the reserved space",
7915			   devid, physical_offset, physical_len);
7916
7917	for (i = 0; i < map->num_stripes; i++) {
7918		if (map->stripes[i].dev->devid == devid &&
7919		    map->stripes[i].physical == physical_offset) {
7920			found = true;
7921			if (map->verified_stripes >= map->num_stripes) {
7922				btrfs_err(fs_info,
7923				"too many dev extents for chunk %llu found",
7924					  em->start);
7925				ret = -EUCLEAN;
7926				goto out;
7927			}
7928			map->verified_stripes++;
7929			break;
7930		}
7931	}
7932	if (!found) {
7933		btrfs_err(fs_info,
7934	"dev extent physical offset %llu devid %llu has no corresponding chunk",
7935			physical_offset, devid);
7936		ret = -EUCLEAN;
7937	}
7938
7939	/* Make sure no dev extent is beyond device boundary */
7940	dev = btrfs_find_device(fs_info->fs_devices, &args);
7941	if (!dev) {
7942		btrfs_err(fs_info, "failed to find devid %llu", devid);
7943		ret = -EUCLEAN;
7944		goto out;
7945	}
7946
7947	if (physical_offset + physical_len > dev->disk_total_bytes) {
7948		btrfs_err(fs_info,
7949"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7950			  devid, physical_offset, physical_len,
7951			  dev->disk_total_bytes);
7952		ret = -EUCLEAN;
7953		goto out;
7954	}
7955
7956	if (dev->zone_info) {
7957		u64 zone_size = dev->zone_info->zone_size;
7958
7959		if (!IS_ALIGNED(physical_offset, zone_size) ||
7960		    !IS_ALIGNED(physical_len, zone_size)) {
7961			btrfs_err(fs_info,
7962"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7963				  devid, physical_offset, physical_len);
7964			ret = -EUCLEAN;
7965			goto out;
7966		}
7967	}
7968
7969out:
7970	free_extent_map(em);
7971	return ret;
7972}
7973
7974static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7975{
7976	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7977	struct extent_map *em;
7978	struct rb_node *node;
7979	int ret = 0;
7980
7981	read_lock(&em_tree->lock);
7982	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7983		em = rb_entry(node, struct extent_map, rb_node);
7984		if (em->map_lookup->num_stripes !=
7985		    em->map_lookup->verified_stripes) {
7986			btrfs_err(fs_info,
7987			"chunk %llu has missing dev extent, have %d expect %d",
7988				  em->start, em->map_lookup->verified_stripes,
7989				  em->map_lookup->num_stripes);
7990			ret = -EUCLEAN;
7991			goto out;
7992		}
7993	}
7994out:
7995	read_unlock(&em_tree->lock);
7996	return ret;
7997}
7998
7999/*
8000 * Ensure that all dev extents are mapped to correct chunk, otherwise
8001 * later chunk allocation/free would cause unexpected behavior.
8002 *
8003 * NOTE: This will iterate through the whole device tree, which should be of
8004 * the same size level as the chunk tree.  This slightly increases mount time.
8005 */
8006int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8007{
8008	struct btrfs_path *path;
8009	struct btrfs_root *root = fs_info->dev_root;
8010	struct btrfs_key key;
8011	u64 prev_devid = 0;
8012	u64 prev_dev_ext_end = 0;
8013	int ret = 0;
8014
8015	/*
8016	 * We don't have a dev_root because we mounted with ignorebadroots and
8017	 * failed to load the root, so we want to skip the verification in this
8018	 * case for sure.
8019	 *
8020	 * However if the dev root is fine, but the tree itself is corrupted
8021	 * we'd still fail to mount.  This verification is only to make sure
8022	 * writes can happen safely, so instead just bypass this check
8023	 * completely in the case of IGNOREBADROOTS.
8024	 */
8025	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8026		return 0;
8027
8028	key.objectid = 1;
8029	key.type = BTRFS_DEV_EXTENT_KEY;
8030	key.offset = 0;
8031
8032	path = btrfs_alloc_path();
8033	if (!path)
8034		return -ENOMEM;
8035
8036	path->reada = READA_FORWARD;
8037	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8038	if (ret < 0)
8039		goto out;
8040
8041	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8042		ret = btrfs_next_leaf(root, path);
8043		if (ret < 0)
8044			goto out;
8045		/* No dev extents at all? Not good */
8046		if (ret > 0) {
8047			ret = -EUCLEAN;
8048			goto out;
8049		}
8050	}
8051	while (1) {
8052		struct extent_buffer *leaf = path->nodes[0];
8053		struct btrfs_dev_extent *dext;
8054		int slot = path->slots[0];
8055		u64 chunk_offset;
8056		u64 physical_offset;
8057		u64 physical_len;
8058		u64 devid;
8059
8060		btrfs_item_key_to_cpu(leaf, &key, slot);
8061		if (key.type != BTRFS_DEV_EXTENT_KEY)
8062			break;
8063		devid = key.objectid;
8064		physical_offset = key.offset;
8065
8066		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8067		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8068		physical_len = btrfs_dev_extent_length(leaf, dext);
8069
8070		/* Check if this dev extent overlaps with the previous one */
8071		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8072			btrfs_err(fs_info,
8073"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8074				  devid, physical_offset, prev_dev_ext_end);
8075			ret = -EUCLEAN;
8076			goto out;
8077		}
8078
8079		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8080					    physical_offset, physical_len);
8081		if (ret < 0)
8082			goto out;
8083		prev_devid = devid;
8084		prev_dev_ext_end = physical_offset + physical_len;
8085
8086		ret = btrfs_next_item(root, path);
8087		if (ret < 0)
8088			goto out;
8089		if (ret > 0) {
8090			ret = 0;
8091			break;
8092		}
8093	}
8094
8095	/* Ensure all chunks have corresponding dev extents */
8096	ret = verify_chunk_dev_extent_mapping(fs_info);
8097out:
8098	btrfs_free_path(path);
8099	return ret;
8100}
8101
8102/*
8103 * Check whether the given block group or device is pinned by any inode being
8104 * used as a swapfile.
8105 */
8106bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8107{
8108	struct btrfs_swapfile_pin *sp;
8109	struct rb_node *node;
8110
8111	spin_lock(&fs_info->swapfile_pins_lock);
8112	node = fs_info->swapfile_pins.rb_node;
8113	while (node) {
8114		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8115		if (ptr < sp->ptr)
8116			node = node->rb_left;
8117		else if (ptr > sp->ptr)
8118			node = node->rb_right;
8119		else
8120			break;
8121	}
8122	spin_unlock(&fs_info->swapfile_pins_lock);
8123	return node != NULL;
8124}
8125
8126static int relocating_repair_kthread(void *data)
8127{
8128	struct btrfs_block_group *cache = data;
8129	struct btrfs_fs_info *fs_info = cache->fs_info;
8130	u64 target;
8131	int ret = 0;
8132
8133	target = cache->start;
8134	btrfs_put_block_group(cache);
8135
8136	sb_start_write(fs_info->sb);
8137	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8138		btrfs_info(fs_info,
8139			   "zoned: skip relocating block group %llu to repair: EBUSY",
8140			   target);
8141		sb_end_write(fs_info->sb);
8142		return -EBUSY;
8143	}
8144
8145	mutex_lock(&fs_info->reclaim_bgs_lock);
8146
8147	/* Ensure block group still exists */
8148	cache = btrfs_lookup_block_group(fs_info, target);
8149	if (!cache)
8150		goto out;
8151
8152	if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
8153		goto out;
8154
8155	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8156	if (ret < 0)
8157		goto out;
8158
8159	btrfs_info(fs_info,
8160		   "zoned: relocating block group %llu to repair IO failure",
8161		   target);
8162	ret = btrfs_relocate_chunk(fs_info, target);
8163
8164out:
8165	if (cache)
8166		btrfs_put_block_group(cache);
8167	mutex_unlock(&fs_info->reclaim_bgs_lock);
8168	btrfs_exclop_finish(fs_info);
8169	sb_end_write(fs_info->sb);
8170
8171	return ret;
8172}
8173
8174bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8175{
8176	struct btrfs_block_group *cache;
8177
8178	if (!btrfs_is_zoned(fs_info))
8179		return false;
8180
8181	/* Do not attempt to repair in degraded state */
8182	if (btrfs_test_opt(fs_info, DEGRADED))
8183		return true;
8184
8185	cache = btrfs_lookup_block_group(fs_info, logical);
8186	if (!cache)
8187		return true;
8188
8189	if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
8190		btrfs_put_block_group(cache);
8191		return true;
8192	}
8193
8194	kthread_run(relocating_repair_kthread, cache,
8195		    "btrfs-relocating-repair");
 
 
8196
8197	return true;
8198}
v3.15
 
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
 
  18#include <linux/sched.h>
  19#include <linux/bio.h>
  20#include <linux/slab.h>
  21#include <linux/buffer_head.h>
  22#include <linux/blkdev.h>
  23#include <linux/random.h>
  24#include <linux/iocontext.h>
  25#include <linux/capability.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
  28#include <linux/raid/pq.h>
  29#include <linux/semaphore.h>
  30#include <asm/div64.h>
 
 
 
  31#include "ctree.h"
  32#include "extent_map.h"
  33#include "disk-io.h"
  34#include "transaction.h"
  35#include "print-tree.h"
  36#include "volumes.h"
  37#include "raid56.h"
  38#include "async-thread.h"
  39#include "check-integrity.h"
  40#include "rcu-string.h"
  41#include "math.h"
  42#include "dev-replace.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43
  44static int init_first_rw_device(struct btrfs_trans_handle *trans,
  45				struct btrfs_root *root,
  46				struct btrfs_device *device);
  47static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
  48static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
  49static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
  50static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
  51
  52static DEFINE_MUTEX(uuid_mutex);
  53static LIST_HEAD(fs_uuids);
  54
  55static void lock_chunks(struct btrfs_root *root)
  56{
  57	mutex_lock(&root->fs_info->chunk_mutex);
 
 
 
 
 
  58}
  59
  60static void unlock_chunks(struct btrfs_root *root)
  61{
  62	mutex_unlock(&root->fs_info->chunk_mutex);
 
 
  63}
  64
  65static struct btrfs_fs_devices *__alloc_fs_devices(void)
 
 
 
 
  66{
  67	struct btrfs_fs_devices *fs_devs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68
  69	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
  70	if (!fs_devs)
  71		return ERR_PTR(-ENOMEM);
 
 
 
  72
  73	mutex_init(&fs_devs->device_list_mutex);
 
 
  74
  75	INIT_LIST_HEAD(&fs_devs->devices);
  76	INIT_LIST_HEAD(&fs_devs->alloc_list);
  77	INIT_LIST_HEAD(&fs_devs->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  78
  79	return fs_devs;
 
 
 
 
  80}
  81
  82/**
  83 * alloc_fs_devices - allocate struct btrfs_fs_devices
  84 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
  85 *		generated.
  86 *
  87 * Return: a pointer to a new &struct btrfs_fs_devices on success;
  88 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
  89 * can be destroyed with kfree() right away.
  90 */
  91static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
 
  92{
  93	struct btrfs_fs_devices *fs_devs;
  94
  95	fs_devs = __alloc_fs_devices();
  96	if (IS_ERR(fs_devs))
  97		return fs_devs;
  98
 
 
 
 
 
 
  99	if (fsid)
 100		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 101	else
 102		generate_random_uuid(fs_devs->fsid);
 
 
 
 103
 104	return fs_devs;
 105}
 106
 
 
 
 
 
 
 
 
 
 107static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
 108{
 109	struct btrfs_device *device;
 
 110	WARN_ON(fs_devices->opened);
 111	while (!list_empty(&fs_devices->devices)) {
 112		device = list_entry(fs_devices->devices.next,
 113				    struct btrfs_device, dev_list);
 114		list_del(&device->dev_list);
 115		rcu_string_free(device->name);
 116		kfree(device);
 117	}
 118	kfree(fs_devices);
 119}
 120
 121static void btrfs_kobject_uevent(struct block_device *bdev,
 122				 enum kobject_action action)
 123{
 124	int ret;
 125
 126	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
 127	if (ret)
 128		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
 129			action,
 130			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
 131			&disk_to_dev(bdev->bd_disk)->kobj);
 132}
 133
 134void btrfs_cleanup_fs_uuids(void)
 135{
 136	struct btrfs_fs_devices *fs_devices;
 137
 138	while (!list_empty(&fs_uuids)) {
 139		fs_devices = list_entry(fs_uuids.next,
 140					struct btrfs_fs_devices, list);
 141		list_del(&fs_devices->list);
 142		free_fs_devices(fs_devices);
 143	}
 144}
 145
 146static struct btrfs_device *__alloc_device(void)
 
 147{
 148	struct btrfs_device *dev;
 149
 150	dev = kzalloc(sizeof(*dev), GFP_NOFS);
 151	if (!dev)
 152		return ERR_PTR(-ENOMEM);
 153
 154	INIT_LIST_HEAD(&dev->dev_list);
 155	INIT_LIST_HEAD(&dev->dev_alloc_list);
 156
 157	spin_lock_init(&dev->io_lock);
 158
 159	spin_lock_init(&dev->reada_lock);
 160	atomic_set(&dev->reada_in_flight, 0);
 161	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
 162	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
 163
 164	return dev;
 165}
 166
 167static noinline struct btrfs_device *__find_device(struct list_head *head,
 168						   u64 devid, u8 *uuid)
 169{
 170	struct btrfs_device *dev;
 171
 172	list_for_each_entry(dev, head, dev_list) {
 173		if (dev->devid == devid &&
 174		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
 175			return dev;
 176		}
 177	}
 178	return NULL;
 179}
 180
 181static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 
 182{
 
 183	struct btrfs_fs_devices *fs_devices;
 184
 185	list_for_each_entry(fs_devices, &fs_uuids, list) {
 186		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 
 
 
 
 
 
 
 
 
 
 187			return fs_devices;
 
 188	}
 189	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190}
 191
 
 192static int
 193btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 194		      int flush, struct block_device **bdev,
 195		      struct buffer_head **bh)
 196{
 197	int ret;
 198
 199	*bdev = blkdev_get_by_path(device_path, flags, holder);
 200
 201	if (IS_ERR(*bdev)) {
 202		ret = PTR_ERR(*bdev);
 203		printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
 204		goto error;
 205	}
 206
 207	if (flush)
 208		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
 209	ret = set_blocksize(*bdev, 4096);
 210	if (ret) {
 211		blkdev_put(*bdev, flags);
 212		goto error;
 213	}
 214	invalidate_bdev(*bdev);
 215	*bh = btrfs_read_dev_super(*bdev);
 216	if (!*bh) {
 217		ret = -EINVAL;
 218		blkdev_put(*bdev, flags);
 219		goto error;
 220	}
 221
 222	return 0;
 223
 224error:
 225	*bdev = NULL;
 226	*bh = NULL;
 227	return ret;
 228}
 229
 230static void requeue_list(struct btrfs_pending_bios *pending_bios,
 231			struct bio *head, struct bio *tail)
 
 
 
 
 
 
 
 
 
 
 
 
 232{
 
 
 
 
 
 233
 234	struct bio *old_head;
 
 
 
 235
 236	old_head = pending_bios->head;
 237	pending_bios->head = head;
 238	if (pending_bios->tail)
 239		tail->bi_next = old_head;
 240	else
 241		pending_bios->tail = tail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242}
 243
 244/*
 245 * we try to collect pending bios for a device so we don't get a large
 246 * number of procs sending bios down to the same device.  This greatly
 247 * improves the schedulers ability to collect and merge the bios.
 248 *
 249 * But, it also turns into a long list of bios to process and that is sure
 250 * to eventually make the worker thread block.  The solution here is to
 251 * make some progress and then put this work struct back at the end of
 252 * the list if the block device is congested.  This way, multiple devices
 253 * can make progress from a single worker thread.
 254 */
 255static noinline void run_scheduled_bios(struct btrfs_device *device)
 256{
 257	struct bio *pending;
 258	struct backing_dev_info *bdi;
 259	struct btrfs_fs_info *fs_info;
 260	struct btrfs_pending_bios *pending_bios;
 261	struct bio *tail;
 262	struct bio *cur;
 263	int again = 0;
 264	unsigned long num_run;
 265	unsigned long batch_run = 0;
 266	unsigned long limit;
 267	unsigned long last_waited = 0;
 268	int force_reg = 0;
 269	int sync_pending = 0;
 270	struct blk_plug plug;
 271
 272	/*
 273	 * this function runs all the bios we've collected for
 274	 * a particular device.  We don't want to wander off to
 275	 * another device without first sending all of these down.
 276	 * So, setup a plug here and finish it off before we return
 277	 */
 278	blk_start_plug(&plug);
 279
 280	bdi = blk_get_backing_dev_info(device->bdev);
 281	fs_info = device->dev_root->fs_info;
 282	limit = btrfs_async_submit_limit(fs_info);
 283	limit = limit * 2 / 3;
 284
 285loop:
 286	spin_lock(&device->io_lock);
 
 
 
 
 
 
 287
 288loop_lock:
 289	num_run = 0;
 
 
 
 
 
 290
 291	/* take all the bios off the list at once and process them
 292	 * later on (without the lock held).  But, remember the
 293	 * tail and other pointers so the bios can be properly reinserted
 294	 * into the list if we hit congestion
 295	 */
 296	if (!force_reg && device->pending_sync_bios.head) {
 297		pending_bios = &device->pending_sync_bios;
 298		force_reg = 1;
 299	} else {
 300		pending_bios = &device->pending_bios;
 301		force_reg = 0;
 
 
 302	}
 303
 304	pending = pending_bios->head;
 305	tail = pending_bios->tail;
 306	WARN_ON(pending && !tail);
 
 
 
 
 
 
 307
 308	/*
 309	 * if pending was null this time around, no bios need processing
 310	 * at all and we can stop.  Otherwise it'll loop back up again
 311	 * and do an additional check so no bios are missed.
 312	 *
 313	 * device->running_pending is used to synchronize with the
 314	 * schedule_bio code.
 315	 */
 316	if (device->pending_sync_bios.head == NULL &&
 317	    device->pending_bios.head == NULL) {
 318		again = 0;
 319		device->running_pending = 0;
 320	} else {
 321		again = 1;
 322		device->running_pending = 1;
 323	}
 
 324
 325	pending_bios->head = NULL;
 326	pending_bios->tail = NULL;
 327
 328	spin_unlock(&device->io_lock);
 
 
 329
 330	while (pending) {
 
 331
 332		rmb();
 333		/* we want to work on both lists, but do more bios on the
 334		 * sync list than the regular list
 335		 */
 336		if ((num_run > 32 &&
 337		    pending_bios != &device->pending_sync_bios &&
 338		    device->pending_sync_bios.head) ||
 339		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
 340		    device->pending_bios.head)) {
 341			spin_lock(&device->io_lock);
 342			requeue_list(pending_bios, pending, tail);
 343			goto loop_lock;
 344		}
 345
 346		cur = pending;
 347		pending = pending->bi_next;
 348		cur->bi_next = NULL;
 349
 350		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
 351		    waitqueue_active(&fs_info->async_submit_wait))
 352			wake_up(&fs_info->async_submit_wait);
 353
 354		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 
 
 
 
 
 
 
 355
 356		/*
 357		 * if we're doing the sync list, record that our
 358		 * plug has some sync requests on it
 359		 *
 360		 * If we're doing the regular list and there are
 361		 * sync requests sitting around, unplug before
 362		 * we add more
 363		 */
 364		if (pending_bios == &device->pending_sync_bios) {
 365			sync_pending = 1;
 366		} else if (sync_pending) {
 367			blk_finish_plug(&plug);
 368			blk_start_plug(&plug);
 369			sync_pending = 0;
 370		}
 371
 372		btrfsic_submit_bio(cur->bi_rw, cur);
 373		num_run++;
 374		batch_run++;
 375		if (need_resched())
 376			cond_resched();
 377
 378		/*
 379		 * we made progress, there is more work to do and the bdi
 380		 * is now congested.  Back off and let other work structs
 381		 * run instead
 382		 */
 383		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
 384		    fs_info->fs_devices->open_devices > 1) {
 385			struct io_context *ioc;
 386
 387			ioc = current->io_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388
 389			/*
 390			 * the main goal here is that we don't want to
 391			 * block if we're going to be able to submit
 392			 * more requests without blocking.
 393			 *
 394			 * This code does two great things, it pokes into
 395			 * the elevator code from a filesystem _and_
 396			 * it makes assumptions about how batching works.
 397			 */
 398			if (ioc && ioc->nr_batch_requests > 0 &&
 399			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
 400			    (last_waited == 0 ||
 401			     ioc->last_waited == last_waited)) {
 402				/*
 403				 * we want to go through our batch of
 404				 * requests and stop.  So, we copy out
 405				 * the ioc->last_waited time and test
 406				 * against it before looping
 407				 */
 408				last_waited = ioc->last_waited;
 409				if (need_resched())
 410					cond_resched();
 411				continue;
 412			}
 413			spin_lock(&device->io_lock);
 414			requeue_list(pending_bios, pending, tail);
 415			device->running_pending = 1;
 416
 417			spin_unlock(&device->io_lock);
 418			btrfs_queue_work(fs_info->submit_workers,
 419					 &device->work);
 420			goto done;
 421		}
 422		/* unplug every 64 requests just for good measure */
 423		if (batch_run % 64 == 0) {
 424			blk_finish_plug(&plug);
 425			blk_start_plug(&plug);
 426			sync_pending = 0;
 427		}
 428	}
 429
 430	cond_resched();
 431	if (again)
 432		goto loop;
 433
 434	spin_lock(&device->io_lock);
 435	if (device->pending_bios.head || device->pending_sync_bios.head)
 436		goto loop_lock;
 437	spin_unlock(&device->io_lock);
 438
 439done:
 440	blk_finish_plug(&plug);
 441}
 442
 443static void pending_bios_fn(struct btrfs_work *work)
 
 444{
 445	struct btrfs_device *device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	device = container_of(work, struct btrfs_device, work);
 448	run_scheduled_bios(device);
 449}
 450
 451/*
 452 * Add new device to list of registered devices
 453 *
 454 * Returns:
 455 * 1   - first time device is seen
 456 * 0   - device already known
 457 * < 0 - error
 458 */
 459static noinline int device_list_add(const char *path,
 460			   struct btrfs_super_block *disk_super,
 461			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 462{
 463	struct btrfs_device *device;
 464	struct btrfs_fs_devices *fs_devices;
 465	struct rcu_string *name;
 466	int ret = 0;
 467	u64 found_transid = btrfs_super_generation(disk_super);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468
 469	fs_devices = find_fsid(disk_super->fsid);
 470	if (!fs_devices) {
 471		fs_devices = alloc_fs_devices(disk_super->fsid);
 
 
 
 
 
 472		if (IS_ERR(fs_devices))
 473			return PTR_ERR(fs_devices);
 474
 475		list_add(&fs_devices->list, &fs_uuids);
 476		fs_devices->latest_devid = devid;
 477		fs_devices->latest_trans = found_transid;
 
 478
 479		device = NULL;
 480	} else {
 481		device = __find_device(&fs_devices->devices, devid,
 482				       disk_super->dev_item.uuid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 483	}
 
 484	if (!device) {
 485		if (fs_devices->opened)
 486			return -EBUSY;
 487
 
 
 
 
 
 
 
 
 
 488		device = btrfs_alloc_device(NULL, &devid,
 489					    disk_super->dev_item.uuid);
 
 490		if (IS_ERR(device)) {
 
 491			/* we can safely leave the fs_devices entry around */
 492			return PTR_ERR(device);
 493		}
 494
 495		name = rcu_string_strdup(path, GFP_NOFS);
 496		if (!name) {
 497			kfree(device);
 498			return -ENOMEM;
 499		}
 500		rcu_assign_pointer(device->name, name);
 501
 502		mutex_lock(&fs_devices->device_list_mutex);
 503		list_add_rcu(&device->dev_list, &fs_devices->devices);
 504		fs_devices->num_devices++;
 505		mutex_unlock(&fs_devices->device_list_mutex);
 506
 507		ret = 1;
 508		device->fs_devices = fs_devices;
 
 
 
 
 
 
 
 
 
 
 
 
 
 509	} else if (!device->name || strcmp(device->name->str, path)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 510		name = rcu_string_strdup(path, GFP_NOFS);
 511		if (!name)
 512			return -ENOMEM;
 
 
 513		rcu_string_free(device->name);
 514		rcu_assign_pointer(device->name, name);
 515		if (device->missing) {
 516			fs_devices->missing_devices--;
 517			device->missing = 0;
 518		}
 
 519	}
 520
 521	if (found_transid > fs_devices->latest_trans) {
 522		fs_devices->latest_devid = devid;
 523		fs_devices->latest_trans = found_transid;
 
 
 
 
 
 
 
 524	}
 525	*fs_devices_ret = fs_devices;
 526
 527	return ret;
 
 
 
 528}
 529
 530static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 531{
 532	struct btrfs_fs_devices *fs_devices;
 533	struct btrfs_device *device;
 534	struct btrfs_device *orig_dev;
 
 
 
 535
 536	fs_devices = alloc_fs_devices(orig->fsid);
 537	if (IS_ERR(fs_devices))
 538		return fs_devices;
 539
 540	fs_devices->latest_devid = orig->latest_devid;
 541	fs_devices->latest_trans = orig->latest_trans;
 542	fs_devices->total_devices = orig->total_devices;
 543
 544	/* We have held the volume lock, it is safe to get the devices. */
 545	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 546		struct rcu_string *name;
 547
 548		device = btrfs_alloc_device(NULL, &orig_dev->devid,
 549					    orig_dev->uuid);
 550		if (IS_ERR(device))
 551			goto error;
 552
 553		/*
 554		 * This is ok to do without rcu read locked because we hold the
 555		 * uuid mutex so nothing we touch in here is going to disappear.
 556		 */
 557		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
 558		if (!name) {
 559			kfree(device);
 
 
 
 
 560			goto error;
 561		}
 562		rcu_assign_pointer(device->name, name);
 
 
 
 
 
 
 
 
 
 
 
 563
 564		list_add(&device->dev_list, &fs_devices->devices);
 565		device->fs_devices = fs_devices;
 566		fs_devices->num_devices++;
 567	}
 568	return fs_devices;
 569error:
 570	free_fs_devices(fs_devices);
 571	return ERR_PTR(-ENOMEM);
 572}
 573
 574void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
 575			       struct btrfs_fs_devices *fs_devices, int step)
 576{
 577	struct btrfs_device *device, *next;
 578
 579	struct block_device *latest_bdev = NULL;
 580	u64 latest_devid = 0;
 581	u64 latest_transid = 0;
 582
 583	mutex_lock(&uuid_mutex);
 584again:
 585	/* This is the initialized path, it is safe to release the devices. */
 586	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
 587		if (device->in_fs_metadata) {
 588			if (!device->is_tgtdev_for_dev_replace &&
 589			    (!latest_transid ||
 590			     device->generation > latest_transid)) {
 591				latest_devid = device->devid;
 592				latest_transid = device->generation;
 593				latest_bdev = device->bdev;
 
 594			}
 595			continue;
 596		}
 597
 598		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
 599			/*
 600			 * In the first step, keep the device which has
 601			 * the correct fsid and the devid that is used
 602			 * for the dev_replace procedure.
 603			 * In the second step, the dev_replace state is
 604			 * read from the device tree and it is known
 605			 * whether the procedure is really active or
 606			 * not, which means whether this device is
 607			 * used or whether it should be removed.
 608			 */
 609			if (step == 0 || device->is_tgtdev_for_dev_replace) {
 610				continue;
 611			}
 612		}
 613		if (device->bdev) {
 614			blkdev_put(device->bdev, device->mode);
 615			device->bdev = NULL;
 616			fs_devices->open_devices--;
 617		}
 618		if (device->writeable) {
 619			list_del_init(&device->dev_alloc_list);
 620			device->writeable = 0;
 621			if (!device->is_tgtdev_for_dev_replace)
 622				fs_devices->rw_devices--;
 623		}
 624		list_del_init(&device->dev_list);
 625		fs_devices->num_devices--;
 626		rcu_string_free(device->name);
 627		kfree(device);
 628	}
 629
 630	if (fs_devices->seed) {
 631		fs_devices = fs_devices->seed;
 632		goto again;
 633	}
 634
 635	fs_devices->latest_bdev = latest_bdev;
 636	fs_devices->latest_devid = latest_devid;
 637	fs_devices->latest_trans = latest_transid;
 638
 639	mutex_unlock(&uuid_mutex);
 640}
 641
 642static void __free_device(struct work_struct *work)
 
 
 
 
 643{
 644	struct btrfs_device *device;
 
 
 
 
 645
 646	device = container_of(work, struct btrfs_device, rcu_work);
 
 647
 648	if (device->bdev)
 649		blkdev_put(device->bdev, device->mode);
 650
 651	rcu_string_free(device->name);
 652	kfree(device);
 653}
 654
 655static void free_device(struct rcu_head *head)
 656{
 657	struct btrfs_device *device;
 
 658
 659	device = container_of(head, struct btrfs_device, rcu);
 
 
 
 660
 661	INIT_WORK(&device->rcu_work, __free_device);
 662	schedule_work(&device->rcu_work);
 663}
 664
 665static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 666{
 667	struct btrfs_device *device;
 
 
 
 
 
 
 
 
 
 668
 669	if (--fs_devices->opened > 0)
 670		return 0;
 
 
 671
 672	mutex_lock(&fs_devices->device_list_mutex);
 673	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 674		struct btrfs_device *new_device;
 675		struct rcu_string *name;
 
 
 
 676
 677		if (device->bdev)
 678			fs_devices->open_devices--;
 
 679
 680		if (device->writeable &&
 681		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 682			list_del_init(&device->dev_alloc_list);
 683			fs_devices->rw_devices--;
 684		}
 
 
 
 
 
 
 
 685
 686		if (device->can_discard)
 687			fs_devices->num_can_discard--;
 688		if (device->missing)
 689			fs_devices->missing_devices--;
 
 
 690
 691		new_device = btrfs_alloc_device(NULL, &device->devid,
 692						device->uuid);
 693		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
 694
 695		/* Safe because we are under uuid_mutex */
 696		if (device->name) {
 697			name = rcu_string_strdup(device->name->str, GFP_NOFS);
 698			BUG_ON(!name); /* -ENOMEM */
 699			rcu_assign_pointer(new_device->name, name);
 700		}
 701
 702		list_replace_rcu(&device->dev_list, &new_device->dev_list);
 703		new_device->fs_devices = device->fs_devices;
 704
 705		call_rcu(&device->rcu, free_device);
 706	}
 707	mutex_unlock(&fs_devices->device_list_mutex);
 708
 709	WARN_ON(fs_devices->open_devices);
 710	WARN_ON(fs_devices->rw_devices);
 711	fs_devices->opened = 0;
 712	fs_devices->seeding = 0;
 713
 714	return 0;
 715}
 716
 717int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 718{
 719	struct btrfs_fs_devices *seed_devices = NULL;
 720	int ret;
 721
 722	mutex_lock(&uuid_mutex);
 723	ret = __btrfs_close_devices(fs_devices);
 724	if (!fs_devices->opened) {
 725		seed_devices = fs_devices->seed;
 726		fs_devices->seed = NULL;
 
 
 
 
 
 
 
 
 
 
 727	}
 728	mutex_unlock(&uuid_mutex);
 729
 730	while (seed_devices) {
 731		fs_devices = seed_devices;
 732		seed_devices = fs_devices->seed;
 733		__btrfs_close_devices(fs_devices);
 734		free_fs_devices(fs_devices);
 735	}
 736	/*
 737	 * Wait for rcu kworkers under __btrfs_close_devices
 738	 * to finish all blkdev_puts so device is really
 739	 * free when umount is done.
 740	 */
 741	rcu_barrier();
 742	return ret;
 743}
 744
 745static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 746				fmode_t flags, void *holder)
 747{
 748	struct request_queue *q;
 749	struct block_device *bdev;
 750	struct list_head *head = &fs_devices->devices;
 751	struct btrfs_device *device;
 752	struct block_device *latest_bdev = NULL;
 753	struct buffer_head *bh;
 754	struct btrfs_super_block *disk_super;
 755	u64 latest_devid = 0;
 756	u64 latest_transid = 0;
 757	u64 devid;
 758	int seeding = 1;
 759	int ret = 0;
 760
 761	flags |= FMODE_EXCL;
 762
 763	list_for_each_entry(device, head, dev_list) {
 764		if (device->bdev)
 765			continue;
 766		if (!device->name)
 767			continue;
 768
 769		/* Just open everything we can; ignore failures here */
 770		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 771					    &bdev, &bh))
 772			continue;
 773
 774		disk_super = (struct btrfs_super_block *)bh->b_data;
 775		devid = btrfs_stack_device_id(&disk_super->dev_item);
 776		if (devid != device->devid)
 777			goto error_brelse;
 778
 779		if (memcmp(device->uuid, disk_super->dev_item.uuid,
 780			   BTRFS_UUID_SIZE))
 781			goto error_brelse;
 782
 783		device->generation = btrfs_super_generation(disk_super);
 784		if (!latest_transid || device->generation > latest_transid) {
 785			latest_devid = devid;
 786			latest_transid = device->generation;
 787			latest_bdev = bdev;
 788		}
 
 
 
 789
 790		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 791			device->writeable = 0;
 792		} else {
 793			device->writeable = !bdev_read_only(bdev);
 794			seeding = 0;
 795		}
 796
 797		q = bdev_get_queue(bdev);
 798		if (blk_queue_discard(q)) {
 799			device->can_discard = 1;
 800			fs_devices->num_can_discard++;
 801		}
 802
 803		device->bdev = bdev;
 804		device->in_fs_metadata = 0;
 805		device->mode = flags;
 
 806
 807		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
 808			fs_devices->rotating = 1;
 809
 810		fs_devices->open_devices++;
 811		if (device->writeable &&
 812		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 813			fs_devices->rw_devices++;
 814			list_add(&device->dev_alloc_list,
 815				 &fs_devices->alloc_list);
 816		}
 817		brelse(bh);
 818		continue;
 819
 820error_brelse:
 821		brelse(bh);
 822		blkdev_put(bdev, flags);
 823		continue;
 824	}
 825	if (fs_devices->open_devices == 0) {
 826		ret = -EINVAL;
 827		goto out;
 828	}
 829	fs_devices->seeding = seeding;
 830	fs_devices->opened = 1;
 831	fs_devices->latest_bdev = latest_bdev;
 832	fs_devices->latest_devid = latest_devid;
 833	fs_devices->latest_trans = latest_transid;
 834	fs_devices->total_rw_bytes = 0;
 835out:
 836	return ret;
 837}
 838
 839int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 840		       fmode_t flags, void *holder)
 841{
 842	int ret;
 843
 844	mutex_lock(&uuid_mutex);
 
 
 
 
 
 
 
 
 845	if (fs_devices->opened) {
 846		fs_devices->opened++;
 847		ret = 0;
 848	} else {
 849		ret = __btrfs_open_devices(fs_devices, flags, holder);
 
 850	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851	mutex_unlock(&uuid_mutex);
 
 852	return ret;
 853}
 854
 855/*
 856 * Look for a btrfs signature on a device. This may be called out of the mount path
 857 * and we are not allowed to call set_blocksize during the scan. The superblock
 858 * is read via pagecache
 859 */
 860int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 861			  struct btrfs_fs_devices **fs_devices_ret)
 862{
 863	struct btrfs_super_block *disk_super;
 
 
 864	struct block_device *bdev;
 865	struct page *page;
 866	void *p;
 867	int ret = -EINVAL;
 868	u64 devid;
 869	u64 transid;
 870	u64 total_devices;
 871	u64 bytenr;
 872	pgoff_t index;
 873
 874	/*
 875	 * we would like to check all the supers, but that would make
 876	 * a btrfs mount succeed after a mkfs from a different FS.
 877	 * So, we need to add a special mount option to scan for
 878	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
 879	 */
 880	bytenr = btrfs_sb_offset(0);
 881	flags |= FMODE_EXCL;
 882	mutex_lock(&uuid_mutex);
 883
 884	bdev = blkdev_get_by_path(path, flags, holder);
 
 
 885
 886	if (IS_ERR(bdev)) {
 887		ret = PTR_ERR(bdev);
 888		goto error;
 
 
 889	}
 890
 891	/* make sure our super fits in the device */
 892	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
 
 893		goto error_bdev_put;
 
 894
 895	/* make sure our super fits in the page */
 896	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
 897		goto error_bdev_put;
 898
 899	/* make sure our super doesn't straddle pages on disk */
 900	index = bytenr >> PAGE_CACHE_SHIFT;
 901	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
 902		goto error_bdev_put;
 903
 904	/* pull in the page with our super */
 905	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
 906				   index, GFP_NOFS);
 907
 908	if (IS_ERR_OR_NULL(page))
 909		goto error_bdev_put;
 910
 911	p = kmap(page);
 
 
 
 
 
 
 
 912
 913	/* align our pointer to the offset of the super block */
 914	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
 915
 916	if (btrfs_super_bytenr(disk_super) != bytenr ||
 917	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
 918		goto error_unmap;
 919
 920	devid = btrfs_stack_device_id(&disk_super->dev_item);
 921	transid = btrfs_super_generation(disk_super);
 922	total_devices = btrfs_super_num_devices(disk_super);
 923
 924	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 925	if (ret > 0) {
 926		if (disk_super->label[0]) {
 927			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
 928				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
 929			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
 930		} else {
 931			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
 932		}
 
 
 
 933
 934		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
 935		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 936	}
 937	if (!ret && fs_devices_ret)
 938		(*fs_devices_ret)->total_devices = total_devices;
 939
 940error_unmap:
 941	kunmap(page);
 942	page_cache_release(page);
 943
 944error_bdev_put:
 945	blkdev_put(bdev, flags);
 946error:
 947	mutex_unlock(&uuid_mutex);
 948	return ret;
 949}
 950
 951/* helper to account the used device space in the range */
 952int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 953				   u64 end, u64 *length)
 954{
 955	struct btrfs_key key;
 956	struct btrfs_root *root = device->dev_root;
 957	struct btrfs_dev_extent *dev_extent;
 958	struct btrfs_path *path;
 959	u64 extent_end;
 960	int ret;
 961	int slot;
 962	struct extent_buffer *l;
 963
 964	*length = 0;
 965
 966	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
 967		return 0;
 
 
 
 
 
 
 
 
 
 968
 969	path = btrfs_alloc_path();
 970	if (!path)
 971		return -ENOMEM;
 972	path->reada = 2;
 973
 974	key.objectid = device->devid;
 975	key.offset = start;
 976	key.type = BTRFS_DEV_EXTENT_KEY;
 977
 978	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 979	if (ret < 0)
 980		goto out;
 981	if (ret > 0) {
 982		ret = btrfs_previous_item(root, path, key.objectid, key.type);
 983		if (ret < 0)
 984			goto out;
 985	}
 986
 987	while (1) {
 988		l = path->nodes[0];
 989		slot = path->slots[0];
 990		if (slot >= btrfs_header_nritems(l)) {
 991			ret = btrfs_next_leaf(root, path);
 992			if (ret == 0)
 993				continue;
 994			if (ret < 0)
 995				goto out;
 996
 997			break;
 998		}
 999		btrfs_item_key_to_cpu(l, &key, slot);
1000
1001		if (key.objectid < device->devid)
1002			goto next;
1003
1004		if (key.objectid > device->devid)
1005			break;
1006
1007		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1008			goto next;
1009
1010		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1011		extent_end = key.offset + btrfs_dev_extent_length(l,
1012								  dev_extent);
1013		if (key.offset <= start && extent_end > end) {
1014			*length = end - start + 1;
1015			break;
1016		} else if (key.offset <= start && extent_end > start)
1017			*length += extent_end - start;
1018		else if (key.offset > start && extent_end <= end)
1019			*length += extent_end - key.offset;
1020		else if (key.offset > start && key.offset <= end) {
1021			*length += end - key.offset + 1;
1022			break;
1023		} else if (key.offset > end)
1024			break;
1025
1026next:
1027		path->slots[0]++;
1028	}
1029	ret = 0;
1030out:
1031	btrfs_free_path(path);
1032	return ret;
1033}
1034
1035static int contains_pending_extent(struct btrfs_trans_handle *trans,
1036				   struct btrfs_device *device,
1037				   u64 *start, u64 len)
 
 
 
 
 
 
 
 
 
 
1038{
1039	struct extent_map *em;
1040	int ret = 0;
1041
1042	list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1043		struct map_lookup *map;
1044		int i;
 
 
 
 
 
 
 
 
 
1045
1046		map = (struct map_lookup *)em->bdev;
1047		for (i = 0; i < map->num_stripes; i++) {
1048			if (map->stripes[i].dev != device)
 
 
 
 
 
 
 
 
 
1049				continue;
1050			if (map->stripes[i].physical >= *start + len ||
1051			    map->stripes[i].physical + em->orig_block_len <=
1052			    *start)
1053				continue;
1054			*start = map->stripes[i].physical +
1055				em->orig_block_len;
1056			ret = 1;
1057		}
 
 
1058	}
1059
1060	return ret;
1061}
1062
1063
1064/*
1065 * find_free_dev_extent - find free space in the specified device
1066 * @device:	the device which we search the free space in
1067 * @num_bytes:	the size of the free space that we need
1068 * @start:	store the start of the free space.
1069 * @len:	the size of the free space. that we find, or the size of the max
1070 * 		free space if we don't find suitable free space
1071 *
1072 * this uses a pretty simple search, the expectation is that it is
1073 * called very infrequently and that a given device has a small number
1074 * of extents
 
1075 *
1076 * @start is used to store the start of the free space if we find. But if we
1077 * don't find suitable free space, it will be used to store the start position
1078 * of the max free space.
1079 *
1080 * @len is used to store the size of the free space that we find.
1081 * But if we don't find suitable free space, it is used to store the size of
1082 * the max free space.
 
 
 
 
 
 
1083 */
1084int find_free_dev_extent(struct btrfs_trans_handle *trans,
1085			 struct btrfs_device *device, u64 num_bytes,
1086			 u64 *start, u64 *len)
1087{
 
 
1088	struct btrfs_key key;
1089	struct btrfs_root *root = device->dev_root;
1090	struct btrfs_dev_extent *dev_extent;
1091	struct btrfs_path *path;
1092	u64 hole_size;
1093	u64 max_hole_start;
1094	u64 max_hole_size;
1095	u64 extent_end;
1096	u64 search_start;
1097	u64 search_end = device->total_bytes;
1098	int ret;
1099	int slot;
1100	struct extent_buffer *l;
1101
1102	/* FIXME use last free of some kind */
1103
1104	/* we don't want to overwrite the superblock on the drive,
1105	 * so we make sure to start at an offset of at least 1MB
1106	 */
1107	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1108
1109	path = btrfs_alloc_path();
1110	if (!path)
1111		return -ENOMEM;
1112again:
1113	max_hole_start = search_start;
1114	max_hole_size = 0;
1115	hole_size = 0;
1116
1117	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
 
 
1118		ret = -ENOSPC;
1119		goto out;
1120	}
1121
1122	path->reada = 2;
1123	path->search_commit_root = 1;
1124	path->skip_locking = 1;
1125
1126	key.objectid = device->devid;
1127	key.offset = search_start;
1128	key.type = BTRFS_DEV_EXTENT_KEY;
1129
1130	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1131	if (ret < 0)
1132		goto out;
1133	if (ret > 0) {
1134		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1135		if (ret < 0)
1136			goto out;
1137	}
1138
1139	while (1) {
1140		l = path->nodes[0];
1141		slot = path->slots[0];
1142		if (slot >= btrfs_header_nritems(l)) {
1143			ret = btrfs_next_leaf(root, path);
1144			if (ret == 0)
1145				continue;
1146			if (ret < 0)
1147				goto out;
1148
1149			break;
1150		}
1151		btrfs_item_key_to_cpu(l, &key, slot);
1152
1153		if (key.objectid < device->devid)
1154			goto next;
1155
1156		if (key.objectid > device->devid)
1157			break;
1158
1159		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1160			goto next;
1161
 
 
 
1162		if (key.offset > search_start) {
1163			hole_size = key.offset - search_start;
1164
1165			/*
1166			 * Have to check before we set max_hole_start, otherwise
1167			 * we could end up sending back this offset anyway.
1168			 */
1169			if (contains_pending_extent(trans, device,
1170						    &search_start,
1171						    hole_size))
1172				hole_size = 0;
1173
1174			if (hole_size > max_hole_size) {
1175				max_hole_start = search_start;
1176				max_hole_size = hole_size;
1177			}
1178
1179			/*
1180			 * If this free space is greater than which we need,
1181			 * it must be the max free space that we have found
1182			 * until now, so max_hole_start must point to the start
1183			 * of this free space and the length of this free space
1184			 * is stored in max_hole_size. Thus, we return
1185			 * max_hole_start and max_hole_size and go back to the
1186			 * caller.
1187			 */
1188			if (hole_size >= num_bytes) {
1189				ret = 0;
1190				goto out;
1191			}
1192		}
1193
1194		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1195		extent_end = key.offset + btrfs_dev_extent_length(l,
1196								  dev_extent);
1197		if (extent_end > search_start)
1198			search_start = extent_end;
1199next:
1200		path->slots[0]++;
1201		cond_resched();
1202	}
1203
1204	/*
1205	 * At this point, search_start should be the end of
1206	 * allocated dev extents, and when shrinking the device,
1207	 * search_end may be smaller than search_start.
1208	 */
1209	if (search_end > search_start)
1210		hole_size = search_end - search_start;
 
 
 
 
 
1211
1212	if (hole_size > max_hole_size) {
1213		max_hole_start = search_start;
1214		max_hole_size = hole_size;
1215	}
1216
1217	if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1218		btrfs_release_path(path);
1219		goto again;
1220	}
1221
1222	/* See above. */
1223	if (hole_size < num_bytes)
1224		ret = -ENOSPC;
1225	else
1226		ret = 0;
1227
 
1228out:
1229	btrfs_free_path(path);
1230	*start = max_hole_start;
1231	if (len)
1232		*len = max_hole_size;
1233	return ret;
1234}
1235
 
 
 
 
 
 
 
1236static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1237			  struct btrfs_device *device,
1238			  u64 start)
1239{
 
 
1240	int ret;
1241	struct btrfs_path *path;
1242	struct btrfs_root *root = device->dev_root;
1243	struct btrfs_key key;
1244	struct btrfs_key found_key;
1245	struct extent_buffer *leaf = NULL;
1246	struct btrfs_dev_extent *extent = NULL;
1247
1248	path = btrfs_alloc_path();
1249	if (!path)
1250		return -ENOMEM;
1251
1252	key.objectid = device->devid;
1253	key.offset = start;
1254	key.type = BTRFS_DEV_EXTENT_KEY;
1255again:
1256	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1257	if (ret > 0) {
1258		ret = btrfs_previous_item(root, path, key.objectid,
1259					  BTRFS_DEV_EXTENT_KEY);
1260		if (ret)
1261			goto out;
1262		leaf = path->nodes[0];
1263		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1264		extent = btrfs_item_ptr(leaf, path->slots[0],
1265					struct btrfs_dev_extent);
1266		BUG_ON(found_key.offset > start || found_key.offset +
1267		       btrfs_dev_extent_length(leaf, extent) < start);
1268		key = found_key;
1269		btrfs_release_path(path);
1270		goto again;
1271	} else if (ret == 0) {
1272		leaf = path->nodes[0];
1273		extent = btrfs_item_ptr(leaf, path->slots[0],
1274					struct btrfs_dev_extent);
1275	} else {
1276		btrfs_error(root->fs_info, ret, "Slot search failed");
1277		goto out;
1278	}
1279
1280	if (device->bytes_used > 0) {
1281		u64 len = btrfs_dev_extent_length(leaf, extent);
1282		device->bytes_used -= len;
1283		spin_lock(&root->fs_info->free_chunk_lock);
1284		root->fs_info->free_chunk_space += len;
1285		spin_unlock(&root->fs_info->free_chunk_lock);
1286	}
1287	ret = btrfs_del_item(trans, root, path);
1288	if (ret) {
1289		btrfs_error(root->fs_info, ret,
1290			    "Failed to remove dev extent item");
1291	}
1292out:
1293	btrfs_free_path(path);
1294	return ret;
1295}
1296
1297static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1298				  struct btrfs_device *device,
1299				  u64 chunk_tree, u64 chunk_objectid,
1300				  u64 chunk_offset, u64 start, u64 num_bytes)
1301{
1302	int ret;
1303	struct btrfs_path *path;
1304	struct btrfs_root *root = device->dev_root;
1305	struct btrfs_dev_extent *extent;
1306	struct extent_buffer *leaf;
1307	struct btrfs_key key;
1308
1309	WARN_ON(!device->in_fs_metadata);
1310	WARN_ON(device->is_tgtdev_for_dev_replace);
1311	path = btrfs_alloc_path();
1312	if (!path)
1313		return -ENOMEM;
1314
1315	key.objectid = device->devid;
1316	key.offset = start;
1317	key.type = BTRFS_DEV_EXTENT_KEY;
1318	ret = btrfs_insert_empty_item(trans, root, path, &key,
1319				      sizeof(*extent));
1320	if (ret)
1321		goto out;
1322
1323	leaf = path->nodes[0];
1324	extent = btrfs_item_ptr(leaf, path->slots[0],
1325				struct btrfs_dev_extent);
1326	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1327	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1328	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1329
1330	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1331		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1332
1333	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1334	btrfs_mark_buffer_dirty(leaf);
1335out:
1336	btrfs_free_path(path);
1337	return ret;
1338}
1339
1340static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1341{
1342	struct extent_map_tree *em_tree;
1343	struct extent_map *em;
1344	struct rb_node *n;
1345	u64 ret = 0;
1346
1347	em_tree = &fs_info->mapping_tree.map_tree;
1348	read_lock(&em_tree->lock);
1349	n = rb_last(&em_tree->map);
1350	if (n) {
1351		em = rb_entry(n, struct extent_map, rb_node);
1352		ret = em->start + em->len;
1353	}
1354	read_unlock(&em_tree->lock);
1355
1356	return ret;
1357}
1358
1359static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1360				    u64 *devid_ret)
1361{
1362	int ret;
1363	struct btrfs_key key;
1364	struct btrfs_key found_key;
1365	struct btrfs_path *path;
1366
1367	path = btrfs_alloc_path();
1368	if (!path)
1369		return -ENOMEM;
1370
1371	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1372	key.type = BTRFS_DEV_ITEM_KEY;
1373	key.offset = (u64)-1;
1374
1375	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1376	if (ret < 0)
1377		goto error;
1378
1379	BUG_ON(ret == 0); /* Corruption */
 
 
 
 
 
1380
1381	ret = btrfs_previous_item(fs_info->chunk_root, path,
1382				  BTRFS_DEV_ITEMS_OBJECTID,
1383				  BTRFS_DEV_ITEM_KEY);
1384	if (ret) {
1385		*devid_ret = 1;
1386	} else {
1387		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1388				      path->slots[0]);
1389		*devid_ret = found_key.offset + 1;
1390	}
1391	ret = 0;
1392error:
1393	btrfs_free_path(path);
1394	return ret;
1395}
1396
1397/*
1398 * the device information is stored in the chunk root
1399 * the btrfs_device struct should be fully filled in
1400 */
1401static int btrfs_add_device(struct btrfs_trans_handle *trans,
1402			    struct btrfs_root *root,
1403			    struct btrfs_device *device)
1404{
1405	int ret;
1406	struct btrfs_path *path;
1407	struct btrfs_dev_item *dev_item;
1408	struct extent_buffer *leaf;
1409	struct btrfs_key key;
1410	unsigned long ptr;
1411
1412	root = root->fs_info->chunk_root;
1413
1414	path = btrfs_alloc_path();
1415	if (!path)
1416		return -ENOMEM;
1417
1418	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1419	key.type = BTRFS_DEV_ITEM_KEY;
1420	key.offset = device->devid;
1421
1422	ret = btrfs_insert_empty_item(trans, root, path, &key,
1423				      sizeof(*dev_item));
 
 
1424	if (ret)
1425		goto out;
1426
1427	leaf = path->nodes[0];
1428	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1429
1430	btrfs_set_device_id(leaf, dev_item, device->devid);
1431	btrfs_set_device_generation(leaf, dev_item, 0);
1432	btrfs_set_device_type(leaf, dev_item, device->type);
1433	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1434	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1435	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1436	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1437	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
 
 
1438	btrfs_set_device_group(leaf, dev_item, 0);
1439	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1440	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1441	btrfs_set_device_start_offset(leaf, dev_item, 0);
1442
1443	ptr = btrfs_device_uuid(dev_item);
1444	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1445	ptr = btrfs_device_fsid(dev_item);
1446	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
 
1447	btrfs_mark_buffer_dirty(leaf);
1448
1449	ret = 0;
1450out:
1451	btrfs_free_path(path);
1452	return ret;
1453}
1454
1455static int btrfs_rm_dev_item(struct btrfs_root *root,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456			     struct btrfs_device *device)
1457{
 
1458	int ret;
1459	struct btrfs_path *path;
1460	struct btrfs_key key;
1461	struct btrfs_trans_handle *trans;
1462
1463	root = root->fs_info->chunk_root;
1464
1465	path = btrfs_alloc_path();
1466	if (!path)
1467		return -ENOMEM;
1468
1469	trans = btrfs_start_transaction(root, 0);
1470	if (IS_ERR(trans)) {
1471		btrfs_free_path(path);
1472		return PTR_ERR(trans);
1473	}
1474	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1475	key.type = BTRFS_DEV_ITEM_KEY;
1476	key.offset = device->devid;
1477	lock_chunks(root);
1478
 
1479	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1480	if (ret < 0)
1481		goto out;
1482
1483	if (ret > 0) {
1484		ret = -ENOENT;
1485		goto out;
1486	}
1487
1488	ret = btrfs_del_item(trans, root, path);
1489	if (ret)
1490		goto out;
1491out:
1492	btrfs_free_path(path);
1493	unlock_chunks(root);
1494	btrfs_commit_transaction(trans, root);
1495	return ret;
1496}
1497
1498int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
 
 
 
 
 
1499{
1500	struct btrfs_device *device;
1501	struct btrfs_device *next_device;
1502	struct block_device *bdev;
1503	struct buffer_head *bh = NULL;
1504	struct btrfs_super_block *disk_super;
1505	struct btrfs_fs_devices *cur_devices;
1506	u64 all_avail;
1507	u64 devid;
1508	u64 num_devices;
1509	u8 *dev_uuid;
1510	unsigned seq;
1511	int ret = 0;
1512	bool clear_super = false;
 
 
1513
1514	mutex_lock(&uuid_mutex);
 
 
 
1515
1516	do {
1517		seq = read_seqbegin(&root->fs_info->profiles_lock);
 
1518
1519		all_avail = root->fs_info->avail_data_alloc_bits |
1520			    root->fs_info->avail_system_alloc_bits |
1521			    root->fs_info->avail_metadata_alloc_bits;
1522	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1523
1524	num_devices = root->fs_info->fs_devices->num_devices;
1525	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1526	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1527		WARN_ON(num_devices < 1);
1528		num_devices--;
1529	}
1530	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1531
1532	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1533		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1534		goto out;
 
 
 
 
 
 
 
 
 
 
1535	}
1536
1537	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1538		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1539		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540	}
 
 
 
 
1541
1542	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1543	    root->fs_info->fs_devices->rw_devices <= 2) {
1544		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1545		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546	}
1547	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1548	    root->fs_info->fs_devices->rw_devices <= 3) {
1549		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1550		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551	}
1552
1553	if (strcmp(device_path, "missing") == 0) {
1554		struct list_head *devices;
1555		struct btrfs_device *tmp;
 
 
 
 
 
 
 
1556
1557		device = NULL;
1558		devices = &root->fs_info->fs_devices->devices;
1559		/*
1560		 * It is safe to read the devices since the volume_mutex
1561		 * is held.
1562		 */
1563		list_for_each_entry(tmp, devices, dev_list) {
1564			if (tmp->in_fs_metadata &&
1565			    !tmp->is_tgtdev_for_dev_replace &&
1566			    !tmp->bdev) {
1567				device = tmp;
1568				break;
1569			}
1570		}
1571		bdev = NULL;
1572		bh = NULL;
1573		disk_super = NULL;
1574		if (!device) {
1575			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1576			goto out;
1577		}
1578	} else {
1579		ret = btrfs_get_bdev_and_sb(device_path,
1580					    FMODE_WRITE | FMODE_EXCL,
1581					    root->fs_info->bdev_holder, 0,
1582					    &bdev, &bh);
1583		if (ret)
1584			goto out;
1585		disk_super = (struct btrfs_super_block *)bh->b_data;
1586		devid = btrfs_stack_device_id(&disk_super->dev_item);
1587		dev_uuid = disk_super->dev_item.uuid;
1588		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1589					   disk_super->fsid);
1590		if (!device) {
1591			ret = -ENOENT;
1592			goto error_brelse;
1593		}
1594	}
1595
1596	if (device->is_tgtdev_for_dev_replace) {
1597		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1598		goto error_brelse;
1599	}
 
 
 
 
 
 
 
 
 
1600
1601	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1602		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1603		goto error_brelse;
1604	}
1605
1606	if (device->writeable) {
1607		lock_chunks(root);
1608		list_del_init(&device->dev_alloc_list);
1609		unlock_chunks(root);
1610		root->fs_info->fs_devices->rw_devices--;
1611		clear_super = true;
1612	}
1613
1614	mutex_unlock(&uuid_mutex);
1615	ret = btrfs_shrink_device(device, 0);
1616	mutex_lock(&uuid_mutex);
1617	if (ret)
1618		goto error_undo;
1619
1620	/*
1621	 * TODO: the superblock still includes this device in its num_devices
1622	 * counter although write_all_supers() is not locked out. This
1623	 * could give a filesystem state which requires a degraded mount.
1624	 */
1625	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1626	if (ret)
1627		goto error_undo;
 
1628
1629	spin_lock(&root->fs_info->free_chunk_lock);
1630	root->fs_info->free_chunk_space = device->total_bytes -
1631		device->bytes_used;
1632	spin_unlock(&root->fs_info->free_chunk_lock);
 
 
 
 
 
 
1633
1634	device->in_fs_metadata = 0;
1635	btrfs_scrub_cancel_dev(root->fs_info, device);
1636
1637	/*
1638	 * the device list mutex makes sure that we don't change
1639	 * the device list while someone else is writing out all
1640	 * the device supers. Whoever is writing all supers, should
1641	 * lock the device list mutex before getting the number of
1642	 * devices in the super block (super_copy). Conversely,
1643	 * whoever updates the number of devices in the super block
1644	 * (super_copy) should hold the device list mutex.
1645	 */
1646
 
 
 
 
 
1647	cur_devices = device->fs_devices;
1648	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1649	list_del_rcu(&device->dev_list);
1650
1651	device->fs_devices->num_devices--;
1652	device->fs_devices->total_devices--;
 
 
 
1653
1654	if (device->missing)
1655		root->fs_info->fs_devices->missing_devices--;
1656
1657	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1658				 struct btrfs_device, dev_list);
1659	if (device->bdev == root->fs_info->sb->s_bdev)
1660		root->fs_info->sb->s_bdev = next_device->bdev;
1661	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1662		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1663
1664	if (device->bdev)
1665		device->fs_devices->open_devices--;
 
 
 
1666
1667	call_rcu(&device->rcu, free_device);
 
 
1668
1669	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1670	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1671	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1672
1673	if (cur_devices->open_devices == 0) {
1674		struct btrfs_fs_devices *fs_devices;
1675		fs_devices = root->fs_info->fs_devices;
1676		while (fs_devices) {
1677			if (fs_devices->seed == cur_devices)
1678				break;
1679			fs_devices = fs_devices->seed;
 
 
 
 
 
1680		}
1681		fs_devices->seed = cur_devices->seed;
1682		cur_devices->seed = NULL;
1683		lock_chunks(root);
1684		__btrfs_close_devices(cur_devices);
1685		unlock_chunks(root);
1686		free_fs_devices(cur_devices);
1687	}
1688
1689	root->fs_info->num_tolerated_disk_barrier_failures =
1690		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
 
 
1691
1692	/*
1693	 * at this point, the device is zero sized.  We want to
1694	 * remove it from the devices list and zero out the old super
 
 
 
1695	 */
1696	if (clear_super && disk_super) {
1697		/* make sure this device isn't detected as part of
1698		 * the FS anymore
1699		 */
1700		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1701		set_buffer_dirty(bh);
1702		sync_dirty_buffer(bh);
1703	}
1704
1705	ret = 0;
1706
1707	/* Notify udev that device has changed */
1708	if (bdev)
1709		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1710
1711error_brelse:
1712	brelse(bh);
1713	if (bdev)
1714		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1715out:
1716	mutex_unlock(&uuid_mutex);
1717	return ret;
1718error_undo:
1719	if (device->writeable) {
1720		lock_chunks(root);
1721		list_add(&device->dev_alloc_list,
1722			 &root->fs_info->fs_devices->alloc_list);
1723		unlock_chunks(root);
1724		root->fs_info->fs_devices->rw_devices++;
1725	}
1726	goto error_brelse;
1727}
1728
1729void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1730				 struct btrfs_device *srcdev)
1731{
1732	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
 
 
 
 
 
 
 
 
 
 
1733
1734	list_del_rcu(&srcdev->dev_list);
1735	list_del_rcu(&srcdev->dev_alloc_list);
1736	fs_info->fs_devices->num_devices--;
1737	if (srcdev->missing) {
1738		fs_info->fs_devices->missing_devices--;
1739		fs_info->fs_devices->rw_devices++;
1740	}
1741	if (srcdev->can_discard)
1742		fs_info->fs_devices->num_can_discard--;
1743	if (srcdev->bdev) {
1744		fs_info->fs_devices->open_devices--;
1745
1746		/* zero out the old super */
1747		btrfs_scratch_superblock(srcdev);
1748	}
1749
1750	call_rcu(&srcdev->rcu, free_device);
 
1751}
1752
1753void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1754				      struct btrfs_device *tgtdev)
1755{
1756	struct btrfs_device *next_device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1757
1758	WARN_ON(!tgtdev);
1759	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1760	if (tgtdev->bdev) {
1761		btrfs_scratch_superblock(tgtdev);
1762		fs_info->fs_devices->open_devices--;
1763	}
1764	fs_info->fs_devices->num_devices--;
1765	if (tgtdev->can_discard)
1766		fs_info->fs_devices->num_can_discard++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1767
1768	next_device = list_entry(fs_info->fs_devices->devices.next,
1769				 struct btrfs_device, dev_list);
1770	if (tgtdev->bdev == fs_info->sb->s_bdev)
1771		fs_info->sb->s_bdev = next_device->bdev;
1772	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1773		fs_info->fs_devices->latest_bdev = next_device->bdev;
1774	list_del_rcu(&tgtdev->dev_list);
1775
1776	call_rcu(&tgtdev->rcu, free_device);
 
 
 
1777
1778	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 
1779}
1780
1781static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1782				     struct btrfs_device **device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1783{
1784	int ret = 0;
1785	struct btrfs_super_block *disk_super;
1786	u64 devid;
1787	u8 *dev_uuid;
1788	struct block_device *bdev;
1789	struct buffer_head *bh;
 
 
 
 
 
 
 
1790
1791	*device = NULL;
1792	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1793				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1794	if (ret)
 
 
 
 
 
 
 
1795		return ret;
1796	disk_super = (struct btrfs_super_block *)bh->b_data;
1797	devid = btrfs_stack_device_id(&disk_super->dev_item);
1798	dev_uuid = disk_super->dev_item.uuid;
1799	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1800				    disk_super->fsid);
1801	brelse(bh);
1802	if (!*device)
1803		ret = -ENOENT;
 
1804	blkdev_put(bdev, FMODE_READ);
1805	return ret;
1806}
1807
1808int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1809					 char *device_path,
1810					 struct btrfs_device **device)
1811{
1812	*device = NULL;
1813	if (strcmp(device_path, "missing") == 0) {
1814		struct list_head *devices;
1815		struct btrfs_device *tmp;
 
 
 
 
1816
1817		devices = &root->fs_info->fs_devices->devices;
1818		/*
1819		 * It is safe to read the devices since the volume_mutex
1820		 * is held by the caller.
1821		 */
1822		list_for_each_entry(tmp, devices, dev_list) {
1823			if (tmp->in_fs_metadata && !tmp->bdev) {
1824				*device = tmp;
1825				break;
1826			}
1827		}
1828
1829		if (!*device) {
1830			btrfs_err(root->fs_info, "no missing device found");
1831			return -ENOENT;
1832		}
 
 
 
1833
1834		return 0;
1835	} else {
1836		return btrfs_find_device_by_path(root, device_path, device);
1837	}
 
 
 
 
1838}
1839
1840/*
1841 * does all the dirty work required for changing file system's UUID.
1842 */
1843static int btrfs_prepare_sprout(struct btrfs_root *root)
1844{
1845	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1846	struct btrfs_fs_devices *old_devices;
1847	struct btrfs_fs_devices *seed_devices;
1848	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1849	struct btrfs_device *device;
1850	u64 super_flags;
1851
1852	BUG_ON(!mutex_is_locked(&uuid_mutex));
1853	if (!fs_devices->seeding)
1854		return -EINVAL;
1855
1856	seed_devices = __alloc_fs_devices();
 
 
 
 
1857	if (IS_ERR(seed_devices))
1858		return PTR_ERR(seed_devices);
1859
 
 
 
 
 
 
1860	old_devices = clone_fs_devices(fs_devices);
1861	if (IS_ERR(old_devices)) {
1862		kfree(seed_devices);
1863		return PTR_ERR(old_devices);
1864	}
1865
1866	list_add(&old_devices->list, &fs_uuids);
1867
1868	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1869	seed_devices->opened = 1;
1870	INIT_LIST_HEAD(&seed_devices->devices);
1871	INIT_LIST_HEAD(&seed_devices->alloc_list);
1872	mutex_init(&seed_devices->device_list_mutex);
1873
1874	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1875	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1876			      synchronize_rcu);
1877
1878	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1879	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1880		device->fs_devices = seed_devices;
1881	}
1882
1883	fs_devices->seeding = 0;
1884	fs_devices->num_devices = 0;
1885	fs_devices->open_devices = 0;
1886	fs_devices->total_devices = 0;
1887	fs_devices->seed = seed_devices;
 
1888
1889	generate_random_uuid(fs_devices->fsid);
1890	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1891	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1892	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1893
1894	super_flags = btrfs_super_flags(disk_super) &
1895		      ~BTRFS_SUPER_FLAG_SEEDING;
1896	btrfs_set_super_flags(disk_super, super_flags);
1897
1898	return 0;
1899}
1900
1901/*
1902 * strore the expected generation for seed devices in device items.
1903 */
1904static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1905			       struct btrfs_root *root)
1906{
 
 
 
1907	struct btrfs_path *path;
1908	struct extent_buffer *leaf;
1909	struct btrfs_dev_item *dev_item;
1910	struct btrfs_device *device;
1911	struct btrfs_key key;
1912	u8 fs_uuid[BTRFS_UUID_SIZE];
1913	u8 dev_uuid[BTRFS_UUID_SIZE];
1914	u64 devid;
1915	int ret;
1916
1917	path = btrfs_alloc_path();
1918	if (!path)
1919		return -ENOMEM;
1920
1921	root = root->fs_info->chunk_root;
1922	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1923	key.offset = 0;
1924	key.type = BTRFS_DEV_ITEM_KEY;
1925
1926	while (1) {
 
1927		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 
1928		if (ret < 0)
1929			goto error;
1930
1931		leaf = path->nodes[0];
1932next_slot:
1933		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1934			ret = btrfs_next_leaf(root, path);
1935			if (ret > 0)
1936				break;
1937			if (ret < 0)
1938				goto error;
1939			leaf = path->nodes[0];
1940			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1941			btrfs_release_path(path);
1942			continue;
1943		}
1944
1945		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1946		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1947		    key.type != BTRFS_DEV_ITEM_KEY)
1948			break;
1949
1950		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1951					  struct btrfs_dev_item);
1952		devid = btrfs_device_id(leaf, dev_item);
1953		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
1954				   BTRFS_UUID_SIZE);
1955		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
1956				   BTRFS_UUID_SIZE);
1957		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1958					   fs_uuid);
 
1959		BUG_ON(!device); /* Logic error */
1960
1961		if (device->fs_devices->seeding) {
1962			btrfs_set_device_generation(leaf, dev_item,
1963						    device->generation);
1964			btrfs_mark_buffer_dirty(leaf);
1965		}
1966
1967		path->slots[0]++;
1968		goto next_slot;
1969	}
1970	ret = 0;
1971error:
1972	btrfs_free_path(path);
1973	return ret;
1974}
1975
1976int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1977{
1978	struct request_queue *q;
1979	struct btrfs_trans_handle *trans;
1980	struct btrfs_device *device;
1981	struct block_device *bdev;
1982	struct list_head *devices;
1983	struct super_block *sb = root->fs_info->sb;
1984	struct rcu_string *name;
1985	u64 total_bytes;
1986	int seeding_dev = 0;
1987	int ret = 0;
 
 
1988
1989	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1990		return -EROFS;
1991
1992	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1993				  root->fs_info->bdev_holder);
1994	if (IS_ERR(bdev))
1995		return PTR_ERR(bdev);
1996
1997	if (root->fs_info->fs_devices->seeding) {
1998		seeding_dev = 1;
 
 
 
 
 
1999		down_write(&sb->s_umount);
2000		mutex_lock(&uuid_mutex);
 
2001	}
2002
2003	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2004
2005	devices = &root->fs_info->fs_devices->devices;
2006
2007	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2008	list_for_each_entry(device, devices, dev_list) {
2009		if (device->bdev == bdev) {
2010			ret = -EEXIST;
2011			mutex_unlock(
2012				&root->fs_info->fs_devices->device_list_mutex);
2013			goto error;
2014		}
2015	}
2016	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2017
2018	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2019	if (IS_ERR(device)) {
2020		/* we can safely leave the fs_devices entry around */
2021		ret = PTR_ERR(device);
2022		goto error;
2023	}
2024
2025	name = rcu_string_strdup(device_path, GFP_NOFS);
2026	if (!name) {
2027		kfree(device);
2028		ret = -ENOMEM;
2029		goto error;
2030	}
2031	rcu_assign_pointer(device->name, name);
 
 
2032
2033	trans = btrfs_start_transaction(root, 0);
2034	if (IS_ERR(trans)) {
2035		rcu_string_free(device->name);
2036		kfree(device);
2037		ret = PTR_ERR(trans);
2038		goto error;
2039	}
2040
2041	lock_chunks(root);
2042
2043	q = bdev_get_queue(bdev);
2044	if (blk_queue_discard(q))
2045		device->can_discard = 1;
2046	device->writeable = 1;
2047	device->generation = trans->transid;
2048	device->io_width = root->sectorsize;
2049	device->io_align = root->sectorsize;
2050	device->sector_size = root->sectorsize;
2051	device->total_bytes = i_size_read(bdev->bd_inode);
 
2052	device->disk_total_bytes = device->total_bytes;
2053	device->dev_root = root->fs_info->dev_root;
2054	device->bdev = bdev;
2055	device->in_fs_metadata = 1;
2056	device->is_tgtdev_for_dev_replace = 0;
2057	device->mode = FMODE_EXCL;
2058	device->dev_stats_valid = 1;
2059	set_blocksize(device->bdev, 4096);
2060
2061	if (seeding_dev) {
2062		sb->s_flags &= ~MS_RDONLY;
2063		ret = btrfs_prepare_sprout(root);
2064		BUG_ON(ret); /* -ENOMEM */
2065	}
2066
2067	device->fs_devices = root->fs_info->fs_devices;
2068
2069	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2070	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2071	list_add(&device->dev_alloc_list,
2072		 &root->fs_info->fs_devices->alloc_list);
2073	root->fs_info->fs_devices->num_devices++;
2074	root->fs_info->fs_devices->open_devices++;
2075	root->fs_info->fs_devices->rw_devices++;
2076	root->fs_info->fs_devices->total_devices++;
2077	if (device->can_discard)
2078		root->fs_info->fs_devices->num_can_discard++;
2079	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2080
2081	spin_lock(&root->fs_info->free_chunk_lock);
2082	root->fs_info->free_chunk_space += device->total_bytes;
2083	spin_unlock(&root->fs_info->free_chunk_lock);
2084
2085	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2086		root->fs_info->fs_devices->rotating = 1;
2087
2088	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2089	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2090				    total_bytes + device->total_bytes);
2091
2092	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2093	btrfs_set_super_num_devices(root->fs_info->super_copy,
2094				    total_bytes + 1);
2095	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2096
2097	if (seeding_dev) {
2098		ret = init_first_rw_device(trans, root, device);
2099		if (ret) {
2100			btrfs_abort_transaction(trans, root, ret);
2101			goto error_trans;
2102		}
2103		ret = btrfs_finish_sprout(trans, root);
2104		if (ret) {
2105			btrfs_abort_transaction(trans, root, ret);
2106			goto error_trans;
2107		}
2108	} else {
2109		ret = btrfs_add_device(trans, root, device);
2110		if (ret) {
2111			btrfs_abort_transaction(trans, root, ret);
2112			goto error_trans;
2113		}
2114	}
2115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2116	/*
2117	 * we've got more storage, clear any full flags on the space
2118	 * infos
2119	 */
2120	btrfs_clear_space_info_full(root->fs_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2121
2122	unlock_chunks(root);
2123	root->fs_info->num_tolerated_disk_barrier_failures =
2124		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2125	ret = btrfs_commit_transaction(trans, root);
 
 
 
 
 
 
 
 
 
 
 
2126
2127	if (seeding_dev) {
2128		mutex_unlock(&uuid_mutex);
2129		up_write(&sb->s_umount);
 
2130
2131		if (ret) /* transaction commit */
2132			return ret;
2133
2134		ret = btrfs_relocate_sys_chunks(root);
2135		if (ret < 0)
2136			btrfs_error(root->fs_info, ret,
2137				    "Failed to relocate sys chunks after "
2138				    "device initialization. This can be fixed "
2139				    "using the \"btrfs balance\" command.");
2140		trans = btrfs_attach_transaction(root);
2141		if (IS_ERR(trans)) {
2142			if (PTR_ERR(trans) == -ENOENT)
2143				return 0;
2144			return PTR_ERR(trans);
 
 
2145		}
2146		ret = btrfs_commit_transaction(trans, root);
2147	}
2148
 
 
 
 
 
 
 
 
 
 
 
 
2149	return ret;
2150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2151error_trans:
2152	unlock_chunks(root);
2153	btrfs_end_transaction(trans, root);
2154	rcu_string_free(device->name);
2155	kfree(device);
 
 
 
 
2156error:
2157	blkdev_put(bdev, FMODE_EXCL);
2158	if (seeding_dev) {
2159		mutex_unlock(&uuid_mutex);
2160		up_write(&sb->s_umount);
2161	}
2162	return ret;
2163}
2164
2165int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2166				  struct btrfs_device **device_out)
2167{
2168	struct request_queue *q;
2169	struct btrfs_device *device;
2170	struct block_device *bdev;
2171	struct btrfs_fs_info *fs_info = root->fs_info;
2172	struct list_head *devices;
2173	struct rcu_string *name;
2174	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2175	int ret = 0;
2176
2177	*device_out = NULL;
2178	if (fs_info->fs_devices->seeding)
2179		return -EINVAL;
2180
2181	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2182				  fs_info->bdev_holder);
2183	if (IS_ERR(bdev))
2184		return PTR_ERR(bdev);
2185
2186	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2187
2188	devices = &fs_info->fs_devices->devices;
2189	list_for_each_entry(device, devices, dev_list) {
2190		if (device->bdev == bdev) {
2191			ret = -EEXIST;
2192			goto error;
2193		}
2194	}
2195
2196	device = btrfs_alloc_device(NULL, &devid, NULL);
2197	if (IS_ERR(device)) {
2198		ret = PTR_ERR(device);
2199		goto error;
2200	}
2201
2202	name = rcu_string_strdup(device_path, GFP_NOFS);
2203	if (!name) {
2204		kfree(device);
2205		ret = -ENOMEM;
2206		goto error;
2207	}
2208	rcu_assign_pointer(device->name, name);
2209
2210	q = bdev_get_queue(bdev);
2211	if (blk_queue_discard(q))
2212		device->can_discard = 1;
2213	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2214	device->writeable = 1;
2215	device->generation = 0;
2216	device->io_width = root->sectorsize;
2217	device->io_align = root->sectorsize;
2218	device->sector_size = root->sectorsize;
2219	device->total_bytes = i_size_read(bdev->bd_inode);
2220	device->disk_total_bytes = device->total_bytes;
2221	device->dev_root = fs_info->dev_root;
2222	device->bdev = bdev;
2223	device->in_fs_metadata = 1;
2224	device->is_tgtdev_for_dev_replace = 1;
2225	device->mode = FMODE_EXCL;
2226	device->dev_stats_valid = 1;
2227	set_blocksize(device->bdev, 4096);
2228	device->fs_devices = fs_info->fs_devices;
2229	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2230	fs_info->fs_devices->num_devices++;
2231	fs_info->fs_devices->open_devices++;
2232	if (device->can_discard)
2233		fs_info->fs_devices->num_can_discard++;
2234	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2235
2236	*device_out = device;
2237	return ret;
2238
2239error:
2240	blkdev_put(bdev, FMODE_EXCL);
2241	return ret;
2242}
2243
2244void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2245					      struct btrfs_device *tgtdev)
2246{
2247	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2248	tgtdev->io_width = fs_info->dev_root->sectorsize;
2249	tgtdev->io_align = fs_info->dev_root->sectorsize;
2250	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2251	tgtdev->dev_root = fs_info->dev_root;
2252	tgtdev->in_fs_metadata = 1;
2253}
2254
2255static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2256					struct btrfs_device *device)
2257{
2258	int ret;
2259	struct btrfs_path *path;
2260	struct btrfs_root *root;
2261	struct btrfs_dev_item *dev_item;
2262	struct extent_buffer *leaf;
2263	struct btrfs_key key;
2264
2265	root = device->dev_root->fs_info->chunk_root;
2266
2267	path = btrfs_alloc_path();
2268	if (!path)
2269		return -ENOMEM;
2270
2271	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2272	key.type = BTRFS_DEV_ITEM_KEY;
2273	key.offset = device->devid;
2274
2275	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2276	if (ret < 0)
2277		goto out;
2278
2279	if (ret > 0) {
2280		ret = -ENOENT;
2281		goto out;
2282	}
2283
2284	leaf = path->nodes[0];
2285	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2286
2287	btrfs_set_device_id(leaf, dev_item, device->devid);
2288	btrfs_set_device_type(leaf, dev_item, device->type);
2289	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2290	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2291	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2292	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2293	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
 
 
2294	btrfs_mark_buffer_dirty(leaf);
2295
2296out:
2297	btrfs_free_path(path);
2298	return ret;
2299}
2300
2301static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2302		      struct btrfs_device *device, u64 new_size)
2303{
2304	struct btrfs_super_block *super_copy =
2305		device->dev_root->fs_info->super_copy;
2306	u64 old_total = btrfs_super_total_bytes(super_copy);
2307	u64 diff = new_size - device->total_bytes;
 
2308
2309	if (!device->writeable)
2310		return -EACCES;
 
 
 
 
 
 
 
2311	if (new_size <= device->total_bytes ||
2312	    device->is_tgtdev_for_dev_replace)
 
2313		return -EINVAL;
 
2314
2315	btrfs_set_super_total_bytes(super_copy, old_total + diff);
 
2316	device->fs_devices->total_rw_bytes += diff;
2317
2318	device->total_bytes = new_size;
2319	device->disk_total_bytes = new_size;
2320	btrfs_clear_space_info_full(device->dev_root->fs_info);
 
 
 
 
2321
2322	return btrfs_update_device(trans, device);
2323}
 
2324
2325int btrfs_grow_device(struct btrfs_trans_handle *trans,
2326		      struct btrfs_device *device, u64 new_size)
2327{
2328	int ret;
2329	lock_chunks(device->dev_root);
2330	ret = __btrfs_grow_device(trans, device, new_size);
2331	unlock_chunks(device->dev_root);
2332	return ret;
2333}
2334
2335static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2336			    struct btrfs_root *root,
2337			    u64 chunk_tree, u64 chunk_objectid,
2338			    u64 chunk_offset)
2339{
 
 
2340	int ret;
2341	struct btrfs_path *path;
2342	struct btrfs_key key;
2343
2344	root = root->fs_info->chunk_root;
2345	path = btrfs_alloc_path();
2346	if (!path)
2347		return -ENOMEM;
2348
2349	key.objectid = chunk_objectid;
2350	key.offset = chunk_offset;
2351	key.type = BTRFS_CHUNK_ITEM_KEY;
2352
2353	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2354	if (ret < 0)
2355		goto out;
2356	else if (ret > 0) { /* Logic error or corruption */
2357		btrfs_error(root->fs_info, -ENOENT,
2358			    "Failed lookup while freeing chunk.");
2359		ret = -ENOENT;
2360		goto out;
2361	}
2362
2363	ret = btrfs_del_item(trans, root, path);
2364	if (ret < 0)
2365		btrfs_error(root->fs_info, ret,
2366			    "Failed to delete chunk item.");
2367out:
2368	btrfs_free_path(path);
2369	return ret;
2370}
2371
2372static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2373			chunk_offset)
2374{
2375	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2376	struct btrfs_disk_key *disk_key;
2377	struct btrfs_chunk *chunk;
2378	u8 *ptr;
2379	int ret = 0;
2380	u32 num_stripes;
2381	u32 array_size;
2382	u32 len = 0;
2383	u32 cur;
2384	struct btrfs_key key;
2385
 
2386	array_size = btrfs_super_sys_array_size(super_copy);
2387
2388	ptr = super_copy->sys_chunk_array;
2389	cur = 0;
2390
2391	while (cur < array_size) {
2392		disk_key = (struct btrfs_disk_key *)ptr;
2393		btrfs_disk_key_to_cpu(&key, disk_key);
2394
2395		len = sizeof(*disk_key);
2396
2397		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2398			chunk = (struct btrfs_chunk *)(ptr + len);
2399			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2400			len += btrfs_chunk_item_size(num_stripes);
2401		} else {
2402			ret = -EIO;
2403			break;
2404		}
2405		if (key.objectid == chunk_objectid &&
2406		    key.offset == chunk_offset) {
2407			memmove(ptr, ptr + len, array_size - (cur + len));
2408			array_size -= len;
2409			btrfs_set_super_sys_array_size(super_copy, array_size);
2410		} else {
2411			ptr += len;
2412			cur += len;
2413		}
2414	}
2415	return ret;
2416}
2417
2418static int btrfs_relocate_chunk(struct btrfs_root *root,
2419			 u64 chunk_tree, u64 chunk_objectid,
2420			 u64 chunk_offset)
 
 
 
 
 
 
2421{
2422	struct extent_map_tree *em_tree;
2423	struct btrfs_root *extent_root;
2424	struct btrfs_trans_handle *trans;
2425	struct extent_map *em;
2426	struct map_lookup *map;
2427	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2428	int i;
2429
2430	root = root->fs_info->chunk_root;
2431	extent_root = root->fs_info->extent_root;
2432	em_tree = &root->fs_info->mapping_tree.map_tree;
 
 
 
 
 
 
 
 
 
 
 
2433
2434	ret = btrfs_can_relocate(extent_root, chunk_offset);
2435	if (ret)
2436		return -ENOSPC;
2437
2438	/* step one, relocate all the extents inside this chunk */
2439	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2440	if (ret)
2441		return ret;
 
 
 
 
2442
2443	trans = btrfs_start_transaction(root, 0);
2444	if (IS_ERR(trans)) {
2445		ret = PTR_ERR(trans);
2446		btrfs_std_error(root->fs_info, ret);
2447		return ret;
 
 
 
 
2448	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2449
2450	lock_chunks(root);
 
 
 
 
 
 
 
 
 
2451
2452	/*
2453	 * step two, delete the device extents and the
2454	 * chunk tree entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2455	 */
2456	read_lock(&em_tree->lock);
2457	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2458	read_unlock(&em_tree->lock);
 
2459
2460	BUG_ON(!em || em->start > chunk_offset ||
2461	       em->start + em->len < chunk_offset);
2462	map = (struct map_lookup *)em->bdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2463
2464	for (i = 0; i < map->num_stripes; i++) {
2465		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2466					    map->stripes[i].physical);
2467		BUG_ON(ret);
 
2468
2469		if (map->stripes[i].dev) {
2470			ret = btrfs_update_device(trans, map->stripes[i].dev);
2471			BUG_ON(ret);
 
2472		}
 
 
 
2473	}
2474	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2475			       chunk_offset);
2476
2477	BUG_ON(ret);
2478
2479	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2480
2481	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2482		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2483		BUG_ON(ret);
 
 
 
2484	}
2485
2486	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2487	BUG_ON(ret);
2488
2489	write_lock(&em_tree->lock);
2490	remove_extent_mapping(em_tree, em);
2491	write_unlock(&em_tree->lock);
 
 
2492
2493	kfree(map);
2494	em->bdev = NULL;
 
 
 
2495
2496	/* once for the tree */
2497	free_extent_map(em);
 
 
 
2498	/* once for us */
2499	free_extent_map(em);
 
 
2500
2501	unlock_chunks(root);
2502	btrfs_end_transaction(trans, root);
2503	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504}
2505
2506static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2507{
2508	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2509	struct btrfs_path *path;
2510	struct extent_buffer *leaf;
2511	struct btrfs_chunk *chunk;
2512	struct btrfs_key key;
2513	struct btrfs_key found_key;
2514	u64 chunk_tree = chunk_root->root_key.objectid;
2515	u64 chunk_type;
2516	bool retried = false;
2517	int failed = 0;
2518	int ret;
2519
2520	path = btrfs_alloc_path();
2521	if (!path)
2522		return -ENOMEM;
2523
2524again:
2525	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2526	key.offset = (u64)-1;
2527	key.type = BTRFS_CHUNK_ITEM_KEY;
2528
2529	while (1) {
 
2530		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2531		if (ret < 0)
 
2532			goto error;
 
2533		BUG_ON(ret == 0); /* Corruption */
2534
2535		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2536					  key.type);
 
 
2537		if (ret < 0)
2538			goto error;
2539		if (ret > 0)
2540			break;
2541
2542		leaf = path->nodes[0];
2543		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2544
2545		chunk = btrfs_item_ptr(leaf, path->slots[0],
2546				       struct btrfs_chunk);
2547		chunk_type = btrfs_chunk_type(leaf, chunk);
2548		btrfs_release_path(path);
2549
2550		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2551			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2552						   found_key.objectid,
2553						   found_key.offset);
2554			if (ret == -ENOSPC)
2555				failed++;
2556			else if (ret)
2557				BUG();
2558		}
 
2559
2560		if (found_key.offset == 0)
2561			break;
2562		key.offset = found_key.offset - 1;
2563	}
2564	ret = 0;
2565	if (failed && !retried) {
2566		failed = 0;
2567		retried = true;
2568		goto again;
2569	} else if (WARN_ON(failed && retried)) {
2570		ret = -ENOSPC;
2571	}
2572error:
2573	btrfs_free_path(path);
2574	return ret;
2575}
2576
2577static int insert_balance_item(struct btrfs_root *root,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2578			       struct btrfs_balance_control *bctl)
2579{
 
2580	struct btrfs_trans_handle *trans;
2581	struct btrfs_balance_item *item;
2582	struct btrfs_disk_balance_args disk_bargs;
2583	struct btrfs_path *path;
2584	struct extent_buffer *leaf;
2585	struct btrfs_key key;
2586	int ret, err;
2587
2588	path = btrfs_alloc_path();
2589	if (!path)
2590		return -ENOMEM;
2591
2592	trans = btrfs_start_transaction(root, 0);
2593	if (IS_ERR(trans)) {
2594		btrfs_free_path(path);
2595		return PTR_ERR(trans);
2596	}
2597
2598	key.objectid = BTRFS_BALANCE_OBJECTID;
2599	key.type = BTRFS_BALANCE_ITEM_KEY;
2600	key.offset = 0;
2601
2602	ret = btrfs_insert_empty_item(trans, root, path, &key,
2603				      sizeof(*item));
2604	if (ret)
2605		goto out;
2606
2607	leaf = path->nodes[0];
2608	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2609
2610	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2611
2612	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2613	btrfs_set_balance_data(leaf, item, &disk_bargs);
2614	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2615	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2616	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2617	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2618
2619	btrfs_set_balance_flags(leaf, item, bctl->flags);
2620
2621	btrfs_mark_buffer_dirty(leaf);
2622out:
2623	btrfs_free_path(path);
2624	err = btrfs_commit_transaction(trans, root);
2625	if (err && !ret)
2626		ret = err;
2627	return ret;
2628}
2629
2630static int del_balance_item(struct btrfs_root *root)
2631{
 
2632	struct btrfs_trans_handle *trans;
2633	struct btrfs_path *path;
2634	struct btrfs_key key;
2635	int ret, err;
2636
2637	path = btrfs_alloc_path();
2638	if (!path)
2639		return -ENOMEM;
2640
2641	trans = btrfs_start_transaction(root, 0);
2642	if (IS_ERR(trans)) {
2643		btrfs_free_path(path);
2644		return PTR_ERR(trans);
2645	}
2646
2647	key.objectid = BTRFS_BALANCE_OBJECTID;
2648	key.type = BTRFS_BALANCE_ITEM_KEY;
2649	key.offset = 0;
2650
2651	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2652	if (ret < 0)
2653		goto out;
2654	if (ret > 0) {
2655		ret = -ENOENT;
2656		goto out;
2657	}
2658
2659	ret = btrfs_del_item(trans, root, path);
2660out:
2661	btrfs_free_path(path);
2662	err = btrfs_commit_transaction(trans, root);
2663	if (err && !ret)
2664		ret = err;
2665	return ret;
2666}
2667
2668/*
2669 * This is a heuristic used to reduce the number of chunks balanced on
2670 * resume after balance was interrupted.
2671 */
2672static void update_balance_args(struct btrfs_balance_control *bctl)
2673{
2674	/*
2675	 * Turn on soft mode for chunk types that were being converted.
2676	 */
2677	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2678		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2679	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2680		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2681	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2682		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2683
2684	/*
2685	 * Turn on usage filter if is not already used.  The idea is
2686	 * that chunks that we have already balanced should be
2687	 * reasonably full.  Don't do it for chunks that are being
2688	 * converted - that will keep us from relocating unconverted
2689	 * (albeit full) chunks.
2690	 */
2691	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2692	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2693		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2694		bctl->data.usage = 90;
2695	}
2696	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2697	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2698		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2699		bctl->sys.usage = 90;
2700	}
2701	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2702	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2703		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2704		bctl->meta.usage = 90;
2705	}
2706}
2707
2708/*
2709 * Should be called with both balance and volume mutexes held to
2710 * serialize other volume operations (add_dev/rm_dev/resize) with
2711 * restriper.  Same goes for unset_balance_control.
2712 */
2713static void set_balance_control(struct btrfs_balance_control *bctl)
2714{
2715	struct btrfs_fs_info *fs_info = bctl->fs_info;
2716
2717	BUG_ON(fs_info->balance_ctl);
2718
2719	spin_lock(&fs_info->balance_lock);
2720	fs_info->balance_ctl = bctl;
2721	spin_unlock(&fs_info->balance_lock);
2722}
2723
2724static void unset_balance_control(struct btrfs_fs_info *fs_info)
2725{
2726	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
 
2727
2728	BUG_ON(!fs_info->balance_ctl);
2729
2730	spin_lock(&fs_info->balance_lock);
2731	fs_info->balance_ctl = NULL;
2732	spin_unlock(&fs_info->balance_lock);
2733
2734	kfree(bctl);
 
 
 
2735}
2736
2737/*
2738 * Balance filters.  Return 1 if chunk should be filtered out
2739 * (should not be balanced).
2740 */
2741static int chunk_profiles_filter(u64 chunk_type,
2742				 struct btrfs_balance_args *bargs)
2743{
2744	chunk_type = chunk_to_extended(chunk_type) &
2745				BTRFS_EXTENDED_PROFILE_MASK;
2746
2747	if (bargs->profiles & chunk_type)
2748		return 0;
2749
2750	return 1;
2751}
2752
2753static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2754			      struct btrfs_balance_args *bargs)
2755{
2756	struct btrfs_block_group_cache *cache;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2757	u64 chunk_used, user_thresh;
2758	int ret = 1;
2759
2760	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2761	chunk_used = btrfs_block_group_used(&cache->item);
2762
2763	if (bargs->usage == 0)
2764		user_thresh = 1;
2765	else if (bargs->usage > 100)
2766		user_thresh = cache->key.offset;
2767	else
2768		user_thresh = div_factor_fine(cache->key.offset,
2769					      bargs->usage);
2770
2771	if (chunk_used < user_thresh)
2772		ret = 0;
2773
2774	btrfs_put_block_group(cache);
2775	return ret;
2776}
2777
2778static int chunk_devid_filter(struct extent_buffer *leaf,
2779			      struct btrfs_chunk *chunk,
2780			      struct btrfs_balance_args *bargs)
2781{
2782	struct btrfs_stripe *stripe;
2783	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2784	int i;
2785
2786	for (i = 0; i < num_stripes; i++) {
2787		stripe = btrfs_stripe_nr(chunk, i);
2788		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2789			return 0;
2790	}
2791
2792	return 1;
2793}
2794
 
 
 
 
 
 
 
 
 
2795/* [pstart, pend) */
2796static int chunk_drange_filter(struct extent_buffer *leaf,
2797			       struct btrfs_chunk *chunk,
2798			       u64 chunk_offset,
2799			       struct btrfs_balance_args *bargs)
2800{
2801	struct btrfs_stripe *stripe;
2802	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2803	u64 stripe_offset;
2804	u64 stripe_length;
 
2805	int factor;
2806	int i;
2807
2808	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2809		return 0;
2810
2811	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2812	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2813		factor = num_stripes / 2;
2814	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2815		factor = num_stripes - 1;
2816	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2817		factor = num_stripes - 2;
2818	} else {
2819		factor = num_stripes;
2820	}
2821
2822	for (i = 0; i < num_stripes; i++) {
2823		stripe = btrfs_stripe_nr(chunk, i);
2824		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2825			continue;
2826
2827		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2828		stripe_length = btrfs_chunk_length(leaf, chunk);
2829		do_div(stripe_length, factor);
2830
2831		if (stripe_offset < bargs->pend &&
2832		    stripe_offset + stripe_length > bargs->pstart)
2833			return 0;
2834	}
2835
2836	return 1;
2837}
2838
2839/* [vstart, vend) */
2840static int chunk_vrange_filter(struct extent_buffer *leaf,
2841			       struct btrfs_chunk *chunk,
2842			       u64 chunk_offset,
2843			       struct btrfs_balance_args *bargs)
2844{
2845	if (chunk_offset < bargs->vend &&
2846	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2847		/* at least part of the chunk is inside this vrange */
2848		return 0;
2849
2850	return 1;
2851}
2852
 
 
 
 
 
 
 
 
 
 
 
 
 
2853static int chunk_soft_convert_filter(u64 chunk_type,
2854				     struct btrfs_balance_args *bargs)
2855{
2856	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2857		return 0;
2858
2859	chunk_type = chunk_to_extended(chunk_type) &
2860				BTRFS_EXTENDED_PROFILE_MASK;
2861
2862	if (bargs->target == chunk_type)
2863		return 1;
2864
2865	return 0;
2866}
2867
2868static int should_balance_chunk(struct btrfs_root *root,
2869				struct extent_buffer *leaf,
2870				struct btrfs_chunk *chunk, u64 chunk_offset)
2871{
2872	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
 
2873	struct btrfs_balance_args *bargs = NULL;
2874	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2875
2876	/* type filter */
2877	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2878	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2879		return 0;
2880	}
2881
2882	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2883		bargs = &bctl->data;
2884	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2885		bargs = &bctl->sys;
2886	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2887		bargs = &bctl->meta;
2888
2889	/* profiles filter */
2890	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2891	    chunk_profiles_filter(chunk_type, bargs)) {
2892		return 0;
2893	}
2894
2895	/* usage filter */
2896	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2897	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
 
 
 
2898		return 0;
2899	}
2900
2901	/* devid filter */
2902	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2903	    chunk_devid_filter(leaf, chunk, bargs)) {
2904		return 0;
2905	}
2906
2907	/* drange filter, makes sense only with devid filter */
2908	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2909	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2910		return 0;
2911	}
2912
2913	/* vrange filter */
2914	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2915	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2916		return 0;
2917	}
2918
 
 
 
 
 
 
2919	/* soft profile changing mode */
2920	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2921	    chunk_soft_convert_filter(chunk_type, bargs)) {
2922		return 0;
2923	}
2924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2925	return 1;
2926}
2927
2928static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2929{
2930	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2931	struct btrfs_root *chunk_root = fs_info->chunk_root;
2932	struct btrfs_root *dev_root = fs_info->dev_root;
2933	struct list_head *devices;
2934	struct btrfs_device *device;
2935	u64 old_size;
2936	u64 size_to_free;
2937	struct btrfs_chunk *chunk;
2938	struct btrfs_path *path;
2939	struct btrfs_key key;
2940	struct btrfs_key found_key;
2941	struct btrfs_trans_handle *trans;
2942	struct extent_buffer *leaf;
2943	int slot;
2944	int ret;
2945	int enospc_errors = 0;
2946	bool counting = true;
 
 
 
 
 
 
 
 
2947
2948	/* step one make some room on all the devices */
2949	devices = &fs_info->fs_devices->devices;
2950	list_for_each_entry(device, devices, dev_list) {
2951		old_size = device->total_bytes;
2952		size_to_free = div_factor(old_size, 1);
2953		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2954		if (!device->writeable ||
2955		    device->total_bytes - device->bytes_used > size_to_free ||
2956		    device->is_tgtdev_for_dev_replace)
2957			continue;
2958
2959		ret = btrfs_shrink_device(device, old_size - size_to_free);
2960		if (ret == -ENOSPC)
2961			break;
2962		BUG_ON(ret);
2963
2964		trans = btrfs_start_transaction(dev_root, 0);
2965		BUG_ON(IS_ERR(trans));
2966
2967		ret = btrfs_grow_device(trans, device, old_size);
2968		BUG_ON(ret);
2969
2970		btrfs_end_transaction(trans, dev_root);
2971	}
2972
2973	/* step two, relocate all the chunks */
2974	path = btrfs_alloc_path();
2975	if (!path) {
2976		ret = -ENOMEM;
2977		goto error;
2978	}
2979
2980	/* zero out stat counters */
2981	spin_lock(&fs_info->balance_lock);
2982	memset(&bctl->stat, 0, sizeof(bctl->stat));
2983	spin_unlock(&fs_info->balance_lock);
2984again:
 
 
 
 
 
 
 
 
 
2985	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2986	key.offset = (u64)-1;
2987	key.type = BTRFS_CHUNK_ITEM_KEY;
2988
2989	while (1) {
2990		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2991		    atomic_read(&fs_info->balance_cancel_req)) {
2992			ret = -ECANCELED;
2993			goto error;
2994		}
2995
 
2996		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2997		if (ret < 0)
 
2998			goto error;
 
2999
3000		/*
3001		 * this shouldn't happen, it means the last relocate
3002		 * failed
3003		 */
3004		if (ret == 0)
3005			BUG(); /* FIXME break ? */
3006
3007		ret = btrfs_previous_item(chunk_root, path, 0,
3008					  BTRFS_CHUNK_ITEM_KEY);
3009		if (ret) {
 
3010			ret = 0;
3011			break;
3012		}
3013
3014		leaf = path->nodes[0];
3015		slot = path->slots[0];
3016		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3017
3018		if (found_key.objectid != key.objectid)
 
3019			break;
 
3020
3021		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
 
3022
3023		if (!counting) {
3024			spin_lock(&fs_info->balance_lock);
3025			bctl->stat.considered++;
3026			spin_unlock(&fs_info->balance_lock);
3027		}
3028
3029		ret = should_balance_chunk(chunk_root, leaf, chunk,
3030					   found_key.offset);
3031		btrfs_release_path(path);
3032		if (!ret)
 
3033			goto loop;
 
3034
3035		if (counting) {
 
3036			spin_lock(&fs_info->balance_lock);
3037			bctl->stat.expected++;
3038			spin_unlock(&fs_info->balance_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3039			goto loop;
3040		}
3041
3042		ret = btrfs_relocate_chunk(chunk_root,
3043					   chunk_root->root_key.objectid,
3044					   found_key.objectid,
3045					   found_key.offset);
3046		if (ret && ret != -ENOSPC)
3047			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
3048		if (ret == -ENOSPC) {
3049			enospc_errors++;
 
 
 
 
 
 
 
3050		} else {
3051			spin_lock(&fs_info->balance_lock);
3052			bctl->stat.completed++;
3053			spin_unlock(&fs_info->balance_lock);
3054		}
3055loop:
3056		if (found_key.offset == 0)
3057			break;
3058		key.offset = found_key.offset - 1;
3059	}
3060
3061	if (counting) {
3062		btrfs_release_path(path);
3063		counting = false;
3064		goto again;
3065	}
3066error:
3067	btrfs_free_path(path);
3068	if (enospc_errors) {
3069		btrfs_info(fs_info, "%d enospc errors during balance",
3070		       enospc_errors);
3071		if (!ret)
3072			ret = -ENOSPC;
3073	}
3074
3075	return ret;
3076}
3077
3078/**
3079 * alloc_profile_is_valid - see if a given profile is valid and reduced
3080 * @flags: profile to validate
3081 * @extended: if true @flags is treated as an extended profile
 
3082 */
3083static int alloc_profile_is_valid(u64 flags, int extended)
3084{
3085	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3086			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3087
3088	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3089
3090	/* 1) check that all other bits are zeroed */
3091	if (flags & ~mask)
3092		return 0;
3093
3094	/* 2) see if profile is reduced */
3095	if (flags == 0)
3096		return !extended; /* "0" is valid for usual profiles */
3097
3098	/* true if exactly one bit set */
3099	return (flags & (flags - 1)) == 0;
3100}
3101
3102static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3103{
3104	/* cancel requested || normal exit path */
3105	return atomic_read(&fs_info->balance_cancel_req) ||
3106		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3107		 atomic_read(&fs_info->balance_cancel_req) == 0);
3108}
3109
3110static void __cancel_balance(struct btrfs_fs_info *fs_info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3111{
 
 
 
 
 
3112	int ret;
 
3113
3114	unset_balance_control(fs_info);
3115	ret = del_balance_item(fs_info->tree_root);
3116	if (ret)
3117		btrfs_std_error(fs_info, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3118
3119	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3120}
3121
3122/*
3123 * Should be called with both balance and volume mutexes held
3124 */
3125int btrfs_balance(struct btrfs_balance_control *bctl,
 
3126		  struct btrfs_ioctl_balance_args *bargs)
3127{
3128	struct btrfs_fs_info *fs_info = bctl->fs_info;
3129	u64 allowed;
3130	int mixed = 0;
3131	int ret;
3132	u64 num_devices;
3133	unsigned seq;
 
 
3134
3135	if (btrfs_fs_closing(fs_info) ||
3136	    atomic_read(&fs_info->balance_pause_req) ||
3137	    atomic_read(&fs_info->balance_cancel_req)) {
3138		ret = -EINVAL;
3139		goto out;
3140	}
3141
3142	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3143	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3144		mixed = 1;
3145
3146	/*
3147	 * In case of mixed groups both data and meta should be picked,
3148	 * and identical options should be given for both of them.
3149	 */
3150	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3151	if (mixed && (bctl->flags & allowed)) {
3152		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3153		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3154		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3155			btrfs_err(fs_info, "with mixed groups data and "
3156				   "metadata balance options must be the same");
3157			ret = -EINVAL;
3158			goto out;
3159		}
3160	}
3161
3162	num_devices = fs_info->fs_devices->num_devices;
3163	btrfs_dev_replace_lock(&fs_info->dev_replace);
3164	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3165		BUG_ON(num_devices < 1);
3166		num_devices--;
3167	}
3168	btrfs_dev_replace_unlock(&fs_info->dev_replace);
 
 
 
 
3169	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3170	if (num_devices == 1)
3171		allowed |= BTRFS_BLOCK_GROUP_DUP;
3172	else if (num_devices > 1)
3173		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3174	if (num_devices > 2)
3175		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3176	if (num_devices > 3)
3177		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3178			    BTRFS_BLOCK_GROUP_RAID6);
3179	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3180	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3181	     (bctl->data.target & ~allowed))) {
3182		btrfs_err(fs_info, "unable to start balance with target "
3183			   "data profile %llu",
3184		       bctl->data.target);
3185		ret = -EINVAL;
3186		goto out;
3187	}
3188	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3189	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3190	     (bctl->meta.target & ~allowed))) {
3191		btrfs_err(fs_info,
3192			   "unable to start balance with target metadata profile %llu",
3193		       bctl->meta.target);
3194		ret = -EINVAL;
3195		goto out;
3196	}
3197	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3198	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3199	     (bctl->sys.target & ~allowed))) {
3200		btrfs_err(fs_info,
3201			   "unable to start balance with target system profile %llu",
3202		       bctl->sys.target);
3203		ret = -EINVAL;
3204		goto out;
3205	}
3206
3207	/* allow dup'ed data chunks only in mixed mode */
3208	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3209	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3210		btrfs_err(fs_info, "dup for data is not allowed");
3211		ret = -EINVAL;
3212		goto out;
 
 
 
3213	}
3214
3215	/* allow to reduce meta or sys integrity only if force set */
3216	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3217			BTRFS_BLOCK_GROUP_RAID10 |
3218			BTRFS_BLOCK_GROUP_RAID5 |
3219			BTRFS_BLOCK_GROUP_RAID6;
3220	do {
3221		seq = read_seqbegin(&fs_info->profiles_lock);
3222
3223		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3224		     (fs_info->avail_system_alloc_bits & allowed) &&
3225		     !(bctl->sys.target & allowed)) ||
3226		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3227		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3228		     !(bctl->meta.target & allowed))) {
3229			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3230				btrfs_info(fs_info, "force reducing metadata integrity");
3231			} else {
3232				btrfs_err(fs_info, "balance will reduce metadata "
3233					   "integrity, use force if you want this");
3234				ret = -EINVAL;
3235				goto out;
3236			}
3237		}
3238	} while (read_seqretry(&fs_info->profiles_lock, seq));
3239
3240	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3241		int num_tolerated_disk_barrier_failures;
3242		u64 target = bctl->sys.target;
3243
3244		num_tolerated_disk_barrier_failures =
3245			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3246		if (num_tolerated_disk_barrier_failures > 0 &&
3247		    (target &
3248		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3249		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3250			num_tolerated_disk_barrier_failures = 0;
3251		else if (num_tolerated_disk_barrier_failures > 1 &&
3252			 (target &
3253			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3254			num_tolerated_disk_barrier_failures = 1;
3255
3256		fs_info->num_tolerated_disk_barrier_failures =
3257			num_tolerated_disk_barrier_failures;
 
 
 
 
3258	}
3259
3260	ret = insert_balance_item(fs_info->tree_root, bctl);
3261	if (ret && ret != -EEXIST)
3262		goto out;
3263
3264	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3265		BUG_ON(ret == -EEXIST);
3266		set_balance_control(bctl);
 
 
 
3267	} else {
3268		BUG_ON(ret != -EEXIST);
3269		spin_lock(&fs_info->balance_lock);
3270		update_balance_args(bctl);
3271		spin_unlock(&fs_info->balance_lock);
3272	}
3273
3274	atomic_inc(&fs_info->balance_running);
 
 
3275	mutex_unlock(&fs_info->balance_mutex);
3276
3277	ret = __btrfs_balance(fs_info);
3278
3279	mutex_lock(&fs_info->balance_mutex);
3280	atomic_dec(&fs_info->balance_running);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3281
3282	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3283		fs_info->num_tolerated_disk_barrier_failures =
3284			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3285	}
3286
3287	if (bargs) {
3288		memset(bargs, 0, sizeof(*bargs));
3289		update_ioctl_balance_args(fs_info, 0, bargs);
3290	}
3291
3292	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3293	    balance_need_close(fs_info)) {
3294		__cancel_balance(fs_info);
 
3295	}
3296
3297	wake_up(&fs_info->balance_wait_q);
3298
3299	return ret;
3300out:
3301	if (bctl->flags & BTRFS_BALANCE_RESUME)
3302		__cancel_balance(fs_info);
3303	else {
3304		kfree(bctl);
3305		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3306	}
3307	return ret;
3308}
3309
3310static int balance_kthread(void *data)
3311{
3312	struct btrfs_fs_info *fs_info = data;
3313	int ret = 0;
3314
3315	mutex_lock(&fs_info->volume_mutex);
3316	mutex_lock(&fs_info->balance_mutex);
3317
3318	if (fs_info->balance_ctl) {
3319		btrfs_info(fs_info, "continuing balance");
3320		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3321	}
3322
3323	mutex_unlock(&fs_info->balance_mutex);
3324	mutex_unlock(&fs_info->volume_mutex);
3325
3326	return ret;
3327}
3328
3329int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3330{
3331	struct task_struct *tsk;
3332
3333	spin_lock(&fs_info->balance_lock);
3334	if (!fs_info->balance_ctl) {
3335		spin_unlock(&fs_info->balance_lock);
3336		return 0;
3337	}
3338	spin_unlock(&fs_info->balance_lock);
3339
3340	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3341		btrfs_info(fs_info, "force skipping balance");
3342		return 0;
3343	}
3344
 
 
 
 
 
 
 
 
 
 
 
 
 
3345	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3346	return PTR_ERR_OR_ZERO(tsk);
3347}
3348
3349int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3350{
3351	struct btrfs_balance_control *bctl;
3352	struct btrfs_balance_item *item;
3353	struct btrfs_disk_balance_args disk_bargs;
3354	struct btrfs_path *path;
3355	struct extent_buffer *leaf;
3356	struct btrfs_key key;
3357	int ret;
3358
3359	path = btrfs_alloc_path();
3360	if (!path)
3361		return -ENOMEM;
3362
3363	key.objectid = BTRFS_BALANCE_OBJECTID;
3364	key.type = BTRFS_BALANCE_ITEM_KEY;
3365	key.offset = 0;
3366
3367	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3368	if (ret < 0)
3369		goto out;
3370	if (ret > 0) { /* ret = -ENOENT; */
3371		ret = 0;
3372		goto out;
3373	}
3374
3375	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3376	if (!bctl) {
3377		ret = -ENOMEM;
3378		goto out;
3379	}
3380
3381	leaf = path->nodes[0];
3382	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3383
3384	bctl->fs_info = fs_info;
3385	bctl->flags = btrfs_balance_flags(leaf, item);
3386	bctl->flags |= BTRFS_BALANCE_RESUME;
3387
3388	btrfs_balance_data(leaf, item, &disk_bargs);
3389	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3390	btrfs_balance_meta(leaf, item, &disk_bargs);
3391	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3392	btrfs_balance_sys(leaf, item, &disk_bargs);
3393	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3394
3395	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3396
3397	mutex_lock(&fs_info->volume_mutex);
3398	mutex_lock(&fs_info->balance_mutex);
3399
3400	set_balance_control(bctl);
3401
 
3402	mutex_unlock(&fs_info->balance_mutex);
3403	mutex_unlock(&fs_info->volume_mutex);
3404out:
3405	btrfs_free_path(path);
3406	return ret;
3407}
3408
3409int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3410{
3411	int ret = 0;
3412
3413	mutex_lock(&fs_info->balance_mutex);
3414	if (!fs_info->balance_ctl) {
3415		mutex_unlock(&fs_info->balance_mutex);
3416		return -ENOTCONN;
3417	}
3418
3419	if (atomic_read(&fs_info->balance_running)) {
3420		atomic_inc(&fs_info->balance_pause_req);
3421		mutex_unlock(&fs_info->balance_mutex);
3422
3423		wait_event(fs_info->balance_wait_q,
3424			   atomic_read(&fs_info->balance_running) == 0);
3425
3426		mutex_lock(&fs_info->balance_mutex);
3427		/* we are good with balance_ctl ripped off from under us */
3428		BUG_ON(atomic_read(&fs_info->balance_running));
3429		atomic_dec(&fs_info->balance_pause_req);
3430	} else {
3431		ret = -ENOTCONN;
3432	}
3433
3434	mutex_unlock(&fs_info->balance_mutex);
3435	return ret;
3436}
3437
3438int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3439{
3440	if (fs_info->sb->s_flags & MS_RDONLY)
3441		return -EROFS;
3442
3443	mutex_lock(&fs_info->balance_mutex);
3444	if (!fs_info->balance_ctl) {
3445		mutex_unlock(&fs_info->balance_mutex);
3446		return -ENOTCONN;
3447	}
3448
 
 
 
 
 
 
 
 
 
 
3449	atomic_inc(&fs_info->balance_cancel_req);
3450	/*
3451	 * if we are running just wait and return, balance item is
3452	 * deleted in btrfs_balance in this case
3453	 */
3454	if (atomic_read(&fs_info->balance_running)) {
3455		mutex_unlock(&fs_info->balance_mutex);
3456		wait_event(fs_info->balance_wait_q,
3457			   atomic_read(&fs_info->balance_running) == 0);
3458		mutex_lock(&fs_info->balance_mutex);
3459	} else {
3460		/* __cancel_balance needs volume_mutex */
3461		mutex_unlock(&fs_info->balance_mutex);
3462		mutex_lock(&fs_info->volume_mutex);
 
 
 
3463		mutex_lock(&fs_info->balance_mutex);
3464
3465		if (fs_info->balance_ctl)
3466			__cancel_balance(fs_info);
3467
3468		mutex_unlock(&fs_info->volume_mutex);
 
3469	}
3470
3471	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
 
3472	atomic_dec(&fs_info->balance_cancel_req);
3473	mutex_unlock(&fs_info->balance_mutex);
3474	return 0;
3475}
3476
3477static int btrfs_uuid_scan_kthread(void *data)
3478{
3479	struct btrfs_fs_info *fs_info = data;
3480	struct btrfs_root *root = fs_info->tree_root;
3481	struct btrfs_key key;
3482	struct btrfs_key max_key;
3483	struct btrfs_path *path = NULL;
3484	int ret = 0;
3485	struct extent_buffer *eb;
3486	int slot;
3487	struct btrfs_root_item root_item;
3488	u32 item_size;
3489	struct btrfs_trans_handle *trans = NULL;
 
3490
3491	path = btrfs_alloc_path();
3492	if (!path) {
3493		ret = -ENOMEM;
3494		goto out;
3495	}
3496
3497	key.objectid = 0;
3498	key.type = BTRFS_ROOT_ITEM_KEY;
3499	key.offset = 0;
3500
3501	max_key.objectid = (u64)-1;
3502	max_key.type = BTRFS_ROOT_ITEM_KEY;
3503	max_key.offset = (u64)-1;
3504
3505	path->keep_locks = 1;
3506
3507	while (1) {
3508		ret = btrfs_search_forward(root, &key, path, 0);
 
 
 
 
 
3509		if (ret) {
3510			if (ret > 0)
3511				ret = 0;
3512			break;
3513		}
3514
3515		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3516		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3517		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3518		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3519			goto skip;
3520
3521		eb = path->nodes[0];
3522		slot = path->slots[0];
3523		item_size = btrfs_item_size_nr(eb, slot);
3524		if (item_size < sizeof(root_item))
3525			goto skip;
3526
3527		read_extent_buffer(eb, &root_item,
3528				   btrfs_item_ptr_offset(eb, slot),
3529				   (int)sizeof(root_item));
3530		if (btrfs_root_refs(&root_item) == 0)
3531			goto skip;
3532
3533		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3534		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3535			if (trans)
3536				goto update_tree;
3537
3538			btrfs_release_path(path);
3539			/*
3540			 * 1 - subvol uuid item
3541			 * 1 - received_subvol uuid item
3542			 */
3543			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3544			if (IS_ERR(trans)) {
3545				ret = PTR_ERR(trans);
3546				break;
3547			}
3548			continue;
3549		} else {
3550			goto skip;
3551		}
3552update_tree:
 
3553		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3554			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3555						  root_item.uuid,
3556						  BTRFS_UUID_KEY_SUBVOL,
3557						  key.objectid);
3558			if (ret < 0) {
3559				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3560					ret);
3561				break;
3562			}
3563		}
3564
3565		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3566			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3567						  root_item.received_uuid,
3568						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3569						  key.objectid);
3570			if (ret < 0) {
3571				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3572					ret);
3573				break;
3574			}
3575		}
3576
3577skip:
 
3578		if (trans) {
3579			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3580			trans = NULL;
3581			if (ret)
3582				break;
3583		}
3584
3585		btrfs_release_path(path);
3586		if (key.offset < (u64)-1) {
3587			key.offset++;
3588		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3589			key.offset = 0;
3590			key.type = BTRFS_ROOT_ITEM_KEY;
3591		} else if (key.objectid < (u64)-1) {
3592			key.offset = 0;
3593			key.type = BTRFS_ROOT_ITEM_KEY;
3594			key.objectid++;
3595		} else {
3596			break;
3597		}
3598		cond_resched();
3599	}
3600
3601out:
3602	btrfs_free_path(path);
3603	if (trans && !IS_ERR(trans))
3604		btrfs_end_transaction(trans, fs_info->uuid_root);
3605	if (ret)
3606		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
3607	else
3608		fs_info->update_uuid_tree_gen = 1;
3609	up(&fs_info->uuid_tree_rescan_sem);
3610	return 0;
3611}
3612
3613/*
3614 * Callback for btrfs_uuid_tree_iterate().
3615 * returns:
3616 * 0	check succeeded, the entry is not outdated.
3617 * < 0	if an error occured.
3618 * > 0	if the check failed, which means the caller shall remove the entry.
3619 */
3620static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3621				       u8 *uuid, u8 type, u64 subid)
3622{
3623	struct btrfs_key key;
3624	int ret = 0;
3625	struct btrfs_root *subvol_root;
3626
3627	if (type != BTRFS_UUID_KEY_SUBVOL &&
3628	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3629		goto out;
3630
3631	key.objectid = subid;
3632	key.type = BTRFS_ROOT_ITEM_KEY;
3633	key.offset = (u64)-1;
3634	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3635	if (IS_ERR(subvol_root)) {
3636		ret = PTR_ERR(subvol_root);
3637		if (ret == -ENOENT)
3638			ret = 1;
3639		goto out;
3640	}
3641
3642	switch (type) {
3643	case BTRFS_UUID_KEY_SUBVOL:
3644		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3645			ret = 1;
3646		break;
3647	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3648		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3649			   BTRFS_UUID_SIZE))
3650			ret = 1;
3651		break;
3652	}
3653
3654out:
3655	return ret;
3656}
3657
3658static int btrfs_uuid_rescan_kthread(void *data)
3659{
3660	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3661	int ret;
3662
3663	/*
3664	 * 1st step is to iterate through the existing UUID tree and
3665	 * to delete all entries that contain outdated data.
3666	 * 2nd step is to add all missing entries to the UUID tree.
3667	 */
3668	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3669	if (ret < 0) {
3670		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
3671		up(&fs_info->uuid_tree_rescan_sem);
3672		return ret;
3673	}
3674	return btrfs_uuid_scan_kthread(data);
3675}
3676
3677int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3678{
3679	struct btrfs_trans_handle *trans;
3680	struct btrfs_root *tree_root = fs_info->tree_root;
3681	struct btrfs_root *uuid_root;
3682	struct task_struct *task;
3683	int ret;
3684
3685	/*
3686	 * 1 - root node
3687	 * 1 - root item
3688	 */
3689	trans = btrfs_start_transaction(tree_root, 2);
3690	if (IS_ERR(trans))
3691		return PTR_ERR(trans);
3692
3693	uuid_root = btrfs_create_tree(trans, fs_info,
3694				      BTRFS_UUID_TREE_OBJECTID);
3695	if (IS_ERR(uuid_root)) {
3696		btrfs_abort_transaction(trans, tree_root,
3697					PTR_ERR(uuid_root));
3698		return PTR_ERR(uuid_root);
 
3699	}
3700
3701	fs_info->uuid_root = uuid_root;
3702
3703	ret = btrfs_commit_transaction(trans, tree_root);
3704	if (ret)
3705		return ret;
3706
3707	down(&fs_info->uuid_tree_rescan_sem);
3708	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3709	if (IS_ERR(task)) {
3710		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3711		btrfs_warn(fs_info, "failed to start uuid_scan task");
3712		up(&fs_info->uuid_tree_rescan_sem);
3713		return PTR_ERR(task);
3714	}
3715
3716	return 0;
3717}
3718
3719int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3720{
3721	struct task_struct *task;
3722
3723	down(&fs_info->uuid_tree_rescan_sem);
3724	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3725	if (IS_ERR(task)) {
3726		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3727		btrfs_warn(fs_info, "failed to start uuid_rescan task");
3728		up(&fs_info->uuid_tree_rescan_sem);
3729		return PTR_ERR(task);
3730	}
3731
3732	return 0;
3733}
3734
3735/*
3736 * shrinking a device means finding all of the device extents past
3737 * the new size, and then following the back refs to the chunks.
3738 * The chunk relocation code actually frees the device extent
3739 */
3740int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3741{
 
 
3742	struct btrfs_trans_handle *trans;
3743	struct btrfs_root *root = device->dev_root;
3744	struct btrfs_dev_extent *dev_extent = NULL;
3745	struct btrfs_path *path;
3746	u64 length;
3747	u64 chunk_tree;
3748	u64 chunk_objectid;
3749	u64 chunk_offset;
3750	int ret;
3751	int slot;
3752	int failed = 0;
3753	bool retried = false;
3754	struct extent_buffer *l;
3755	struct btrfs_key key;
3756	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3757	u64 old_total = btrfs_super_total_bytes(super_copy);
3758	u64 old_size = device->total_bytes;
3759	u64 diff = device->total_bytes - new_size;
 
 
 
 
 
3760
3761	if (device->is_tgtdev_for_dev_replace)
3762		return -EINVAL;
3763
3764	path = btrfs_alloc_path();
3765	if (!path)
3766		return -ENOMEM;
3767
3768	path->reada = 2;
3769
3770	lock_chunks(root);
 
 
 
 
 
 
3771
3772	device->total_bytes = new_size;
3773	if (device->writeable) {
3774		device->fs_devices->total_rw_bytes -= diff;
3775		spin_lock(&root->fs_info->free_chunk_lock);
3776		root->fs_info->free_chunk_space -= diff;
3777		spin_unlock(&root->fs_info->free_chunk_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
3778	}
3779	unlock_chunks(root);
3780
3781again:
3782	key.objectid = device->devid;
3783	key.offset = (u64)-1;
3784	key.type = BTRFS_DEV_EXTENT_KEY;
3785
3786	do {
 
3787		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3788		if (ret < 0)
 
3789			goto done;
 
3790
3791		ret = btrfs_previous_item(root, path, 0, key.type);
3792		if (ret < 0)
3793			goto done;
3794		if (ret) {
 
 
 
3795			ret = 0;
3796			btrfs_release_path(path);
3797			break;
3798		}
3799
3800		l = path->nodes[0];
3801		slot = path->slots[0];
3802		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3803
3804		if (key.objectid != device->devid) {
 
3805			btrfs_release_path(path);
3806			break;
3807		}
3808
3809		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3810		length = btrfs_dev_extent_length(l, dev_extent);
3811
3812		if (key.offset + length <= new_size) {
 
3813			btrfs_release_path(path);
3814			break;
3815		}
3816
3817		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3818		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3819		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3820		btrfs_release_path(path);
3821
3822		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3823					   chunk_offset);
3824		if (ret && ret != -ENOSPC)
3825			goto done;
3826		if (ret == -ENOSPC)
3827			failed++;
3828	} while (key.offset-- > 0);
3829
3830	if (failed && !retried) {
3831		failed = 0;
3832		retried = true;
3833		goto again;
3834	} else if (failed && retried) {
3835		ret = -ENOSPC;
3836		lock_chunks(root);
3837
3838		device->total_bytes = old_size;
3839		if (device->writeable)
3840			device->fs_devices->total_rw_bytes += diff;
3841		spin_lock(&root->fs_info->free_chunk_lock);
3842		root->fs_info->free_chunk_space += diff;
3843		spin_unlock(&root->fs_info->free_chunk_lock);
3844		unlock_chunks(root);
3845		goto done;
3846	}
3847
3848	/* Shrinking succeeded, else we would be at "done". */
3849	trans = btrfs_start_transaction(root, 0);
3850	if (IS_ERR(trans)) {
3851		ret = PTR_ERR(trans);
3852		goto done;
3853	}
3854
3855	lock_chunks(root);
 
 
 
 
 
 
 
 
 
 
 
 
 
3856
3857	device->disk_total_bytes = new_size;
3858	/* Now btrfs_update_device() will change the on-disk size. */
3859	ret = btrfs_update_device(trans, device);
3860	if (ret) {
3861		unlock_chunks(root);
3862		btrfs_end_transaction(trans, root);
3863		goto done;
 
 
3864	}
3865	WARN_ON(diff > old_total);
3866	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3867	unlock_chunks(root);
3868	btrfs_end_transaction(trans, root);
3869done:
3870	btrfs_free_path(path);
 
 
 
 
 
 
 
 
3871	return ret;
3872}
3873
3874static int btrfs_add_system_chunk(struct btrfs_root *root,
3875			   struct btrfs_key *key,
3876			   struct btrfs_chunk *chunk, int item_size)
3877{
3878	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3879	struct btrfs_disk_key disk_key;
3880	u32 array_size;
3881	u8 *ptr;
3882
 
 
3883	array_size = btrfs_super_sys_array_size(super_copy);
3884	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
 
3885		return -EFBIG;
3886
3887	ptr = super_copy->sys_chunk_array + array_size;
3888	btrfs_cpu_key_to_disk(&disk_key, key);
3889	memcpy(ptr, &disk_key, sizeof(disk_key));
3890	ptr += sizeof(disk_key);
3891	memcpy(ptr, chunk, item_size);
3892	item_size += sizeof(disk_key);
3893	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
 
3894	return 0;
3895}
3896
3897/*
3898 * sort the devices in descending order by max_avail, total_avail
3899 */
3900static int btrfs_cmp_device_info(const void *a, const void *b)
3901{
3902	const struct btrfs_device_info *di_a = a;
3903	const struct btrfs_device_info *di_b = b;
3904
3905	if (di_a->max_avail > di_b->max_avail)
3906		return -1;
3907	if (di_a->max_avail < di_b->max_avail)
3908		return 1;
3909	if (di_a->total_avail > di_b->total_avail)
3910		return -1;
3911	if (di_a->total_avail < di_b->total_avail)
3912		return 1;
3913	return 0;
3914}
3915
3916static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3917	[BTRFS_RAID_RAID10] = {
3918		.sub_stripes	= 2,
3919		.dev_stripes	= 1,
3920		.devs_max	= 0,	/* 0 == as many as possible */
3921		.devs_min	= 4,
3922		.devs_increment	= 2,
3923		.ncopies	= 2,
3924	},
3925	[BTRFS_RAID_RAID1] = {
3926		.sub_stripes	= 1,
3927		.dev_stripes	= 1,
3928		.devs_max	= 2,
3929		.devs_min	= 2,
3930		.devs_increment	= 2,
3931		.ncopies	= 2,
3932	},
3933	[BTRFS_RAID_DUP] = {
3934		.sub_stripes	= 1,
3935		.dev_stripes	= 2,
3936		.devs_max	= 1,
3937		.devs_min	= 1,
3938		.devs_increment	= 1,
3939		.ncopies	= 2,
3940	},
3941	[BTRFS_RAID_RAID0] = {
3942		.sub_stripes	= 1,
3943		.dev_stripes	= 1,
3944		.devs_max	= 0,
3945		.devs_min	= 2,
3946		.devs_increment	= 1,
3947		.ncopies	= 1,
3948	},
3949	[BTRFS_RAID_SINGLE] = {
3950		.sub_stripes	= 1,
3951		.dev_stripes	= 1,
3952		.devs_max	= 1,
3953		.devs_min	= 1,
3954		.devs_increment	= 1,
3955		.ncopies	= 1,
3956	},
3957	[BTRFS_RAID_RAID5] = {
3958		.sub_stripes	= 1,
3959		.dev_stripes	= 1,
3960		.devs_max	= 0,
3961		.devs_min	= 2,
3962		.devs_increment	= 1,
3963		.ncopies	= 2,
3964	},
3965	[BTRFS_RAID_RAID6] = {
3966		.sub_stripes	= 1,
3967		.dev_stripes	= 1,
3968		.devs_max	= 0,
3969		.devs_min	= 3,
3970		.devs_increment	= 1,
3971		.ncopies	= 3,
3972	},
3973};
3974
3975static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3976{
3977	/* TODO allow them to set a preferred stripe size */
3978	return 64 * 1024;
3979}
3980
3981static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3982{
3983	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3984		return;
3985
3986	btrfs_set_fs_incompat(info, RAID56);
3987}
3988
3989static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3990			       struct btrfs_root *extent_root, u64 start,
3991			       u64 type)
3992{
3993	struct btrfs_fs_info *info = extent_root->fs_info;
3994	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3995	struct list_head *cur;
3996	struct map_lookup *map = NULL;
3997	struct extent_map_tree *em_tree;
3998	struct extent_map *em;
3999	struct btrfs_device_info *devices_info = NULL;
4000	u64 total_avail;
4001	int num_stripes;	/* total number of stripes to allocate */
4002	int data_stripes;	/* number of stripes that count for
4003				   block group size */
4004	int sub_stripes;	/* sub_stripes info for map */
4005	int dev_stripes;	/* stripes per dev */
4006	int devs_max;		/* max devs to use */
4007	int devs_min;		/* min devs needed */
4008	int devs_increment;	/* ndevs has to be a multiple of this */
4009	int ncopies;		/* how many copies to data has */
4010	int ret;
 
4011	u64 max_stripe_size;
4012	u64 max_chunk_size;
 
4013	u64 stripe_size;
4014	u64 num_bytes;
4015	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4016	int ndevs;
4017	int i;
4018	int j;
4019	int index;
 
 
 
 
 
 
 
4020
4021	BUG_ON(!alloc_profile_is_valid(type, 0));
 
4022
4023	if (list_empty(&fs_devices->alloc_list))
4024		return -ENOSPC;
4025
4026	index = __get_raid_index(type);
 
 
 
 
4027
4028	sub_stripes = btrfs_raid_array[index].sub_stripes;
4029	dev_stripes = btrfs_raid_array[index].dev_stripes;
4030	devs_max = btrfs_raid_array[index].devs_max;
4031	devs_min = btrfs_raid_array[index].devs_min;
4032	devs_increment = btrfs_raid_array[index].devs_increment;
4033	ncopies = btrfs_raid_array[index].ncopies;
 
 
 
 
4034
 
4035	if (type & BTRFS_BLOCK_GROUP_DATA) {
4036		max_stripe_size = 1024 * 1024 * 1024;
4037		max_chunk_size = 10 * max_stripe_size;
4038	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4039		/* for larger filesystems, use larger metadata chunks */
4040		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4041			max_stripe_size = 1024 * 1024 * 1024;
4042		else
4043			max_stripe_size = 256 * 1024 * 1024;
4044		max_chunk_size = max_stripe_size;
4045	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4046		max_stripe_size = 32 * 1024 * 1024;
4047		max_chunk_size = 2 * max_stripe_size;
 
4048	} else {
4049		btrfs_err(info, "invalid chunk type 0x%llx requested\n",
4050		       type);
4051		BUG_ON(1);
4052	}
4053
4054	/* we don't want a chunk larger than 10% of writeable space */
4055	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4056			     max_chunk_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4057
4058	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4059			       GFP_NOFS);
4060	if (!devices_info)
4061		return -ENOMEM;
4062
4063	cur = fs_devices->alloc_list.next;
 
 
 
 
 
 
4064
4065	/*
4066	 * in the first pass through the devices list, we gather information
4067	 * about the available holes on each device.
4068	 */
4069	ndevs = 0;
4070	while (cur != &fs_devices->alloc_list) {
4071		struct btrfs_device *device;
4072		u64 max_avail;
4073		u64 dev_offset;
4074
4075		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4076
4077		cur = cur->next;
4078
4079		if (!device->writeable) {
4080			WARN(1, KERN_ERR
4081			       "BTRFS: read-only device in alloc_list\n");
4082			continue;
4083		}
4084
4085		if (!device->in_fs_metadata ||
4086		    device->is_tgtdev_for_dev_replace)
 
4087			continue;
4088
4089		if (device->total_bytes > device->bytes_used)
4090			total_avail = device->total_bytes - device->bytes_used;
4091		else
4092			total_avail = 0;
4093
4094		/* If there is no space on this device, skip it. */
4095		if (total_avail == 0)
4096			continue;
4097
4098		ret = find_free_dev_extent(trans, device,
4099					   max_stripe_size * dev_stripes,
4100					   &dev_offset, &max_avail);
4101		if (ret && ret != -ENOSPC)
4102			goto error;
4103
4104		if (ret == 0)
4105			max_avail = max_stripe_size * dev_stripes;
4106
4107		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
 
 
 
 
 
4108			continue;
 
4109
4110		if (ndevs == fs_devices->rw_devices) {
4111			WARN(1, "%s: found more than %llu devices\n",
4112			     __func__, fs_devices->rw_devices);
4113			break;
4114		}
4115		devices_info[ndevs].dev_offset = dev_offset;
4116		devices_info[ndevs].max_avail = max_avail;
4117		devices_info[ndevs].total_avail = total_avail;
4118		devices_info[ndevs].dev = device;
4119		++ndevs;
4120	}
 
4121
4122	/*
4123	 * now sort the devices by hole size / available space
4124	 */
4125	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4126	     btrfs_cmp_device_info, NULL);
4127
4128	/* round down to number of usable stripes */
4129	ndevs -= ndevs % devs_increment;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4130
4131	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4132		ret = -ENOSPC;
4133		goto error;
4134	}
4135
4136	if (devs_max && ndevs > devs_max)
4137		ndevs = devs_max;
4138	/*
4139	 * the primary goal is to maximize the number of stripes, so use as many
4140	 * devices as possible, even if the stripes are not maximum sized.
 
 
4141	 */
4142	stripe_size = devices_info[ndevs-1].max_avail;
4143	num_stripes = ndevs * dev_stripes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4144
4145	/*
4146	 * this will have to be fixed for RAID1 and RAID10 over
4147	 * more drives
4148	 */
4149	data_stripes = num_stripes / ncopies;
4150
4151	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4152		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4153				 btrfs_super_stripesize(info->super_copy));
4154		data_stripes = num_stripes - 1;
4155	}
4156	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4157		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4158				 btrfs_super_stripesize(info->super_copy));
4159		data_stripes = num_stripes - 2;
 
 
 
4160	}
 
 
 
 
 
 
 
 
 
 
 
4161
4162	/*
4163	 * Use the number of data stripes to figure out how big this chunk
4164	 * is really going to be in terms of logical address space,
4165	 * and compare that answer with the max chunk size
4166	 */
4167	if (stripe_size * data_stripes > max_chunk_size) {
4168		u64 mask = (1ULL << 24) - 1;
4169		stripe_size = max_chunk_size;
4170		do_div(stripe_size, data_stripes);
4171
4172		/* bump the answer up to a 16MB boundary */
4173		stripe_size = (stripe_size + mask) & ~mask;
 
 
 
 
 
 
 
 
4174
4175		/* but don't go higher than the limits we found
4176		 * while searching for free extents
4177		 */
4178		if (stripe_size > devices_info[ndevs-1].max_avail)
4179			stripe_size = devices_info[ndevs-1].max_avail;
 
 
4180	}
 
4181
4182	do_div(stripe_size, dev_stripes);
 
 
 
 
 
 
 
 
 
 
 
 
 
4183
4184	/* align to BTRFS_STRIPE_LEN */
4185	do_div(stripe_size, raid_stripe_len);
4186	stripe_size *= raid_stripe_len;
 
4187
4188	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4189	if (!map) {
4190		ret = -ENOMEM;
4191		goto error;
4192	}
4193	map->num_stripes = num_stripes;
4194
4195	for (i = 0; i < ndevs; ++i) {
4196		for (j = 0; j < dev_stripes; ++j) {
4197			int s = i * dev_stripes + j;
4198			map->stripes[s].dev = devices_info[i].dev;
4199			map->stripes[s].physical = devices_info[i].dev_offset +
4200						   j * stripe_size;
4201		}
4202	}
4203	map->sector_size = extent_root->sectorsize;
4204	map->stripe_len = raid_stripe_len;
4205	map->io_align = raid_stripe_len;
4206	map->io_width = raid_stripe_len;
4207	map->type = type;
4208	map->sub_stripes = sub_stripes;
4209
4210	num_bytes = stripe_size * data_stripes;
4211
4212	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4213
4214	em = alloc_extent_map();
4215	if (!em) {
4216		ret = -ENOMEM;
4217		goto error;
4218	}
4219	em->bdev = (struct block_device *)map;
 
4220	em->start = start;
4221	em->len = num_bytes;
4222	em->block_start = 0;
4223	em->block_len = em->len;
4224	em->orig_block_len = stripe_size;
4225
4226	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4227	write_lock(&em_tree->lock);
4228	ret = add_extent_mapping(em_tree, em, 0);
4229	if (!ret) {
4230		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4231		atomic_inc(&em->refs);
4232	}
4233	write_unlock(&em_tree->lock);
4234	if (ret) {
 
4235		free_extent_map(em);
4236		goto error;
4237	}
 
4238
4239	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4240				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4241				     start, num_bytes);
4242	if (ret)
4243		goto error_del_extent;
4244
 
 
 
 
 
 
 
 
 
 
 
 
 
4245	free_extent_map(em);
4246	check_raid56_incompat_flag(extent_root->fs_info, type);
 
4247
4248	kfree(devices_info);
4249	return 0;
4250
4251error_del_extent:
4252	write_lock(&em_tree->lock);
4253	remove_extent_mapping(em_tree, em);
4254	write_unlock(&em_tree->lock);
4255
4256	/* One for our allocation */
4257	free_extent_map(em);
4258	/* One for the tree reference */
4259	free_extent_map(em);
4260error:
4261	kfree(map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4262	kfree(devices_info);
4263	return ret;
4264}
4265
4266int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4267				struct btrfs_root *extent_root,
4268				u64 chunk_offset, u64 chunk_size)
 
 
 
 
 
 
 
4269{
 
 
4270	struct btrfs_key key;
4271	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4272	struct btrfs_device *device;
4273	struct btrfs_chunk *chunk;
4274	struct btrfs_stripe *stripe;
4275	struct extent_map_tree *em_tree;
4276	struct extent_map *em;
4277	struct map_lookup *map;
4278	size_t item_size;
4279	u64 dev_offset;
4280	u64 stripe_size;
4281	int i = 0;
4282	int ret;
4283
4284	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4285	read_lock(&em_tree->lock);
4286	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4287	read_unlock(&em_tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4288
4289	if (!em) {
4290		btrfs_crit(extent_root->fs_info, "unable to find logical "
4291			   "%Lu len %Lu", chunk_offset, chunk_size);
4292		return -EINVAL;
 
4293	}
4294
4295	if (em->start != chunk_offset || em->len != chunk_size) {
4296		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4297			  " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4298			  chunk_size, em->start, em->len);
4299		free_extent_map(em);
4300		return -EINVAL;
4301	}
4302
4303	map = (struct map_lookup *)em->bdev;
4304	item_size = btrfs_chunk_item_size(map->num_stripes);
4305	stripe_size = em->orig_block_len;
4306
4307	chunk = kzalloc(item_size, GFP_NOFS);
4308	if (!chunk) {
4309		ret = -ENOMEM;
 
4310		goto out;
4311	}
4312
4313	for (i = 0; i < map->num_stripes; i++) {
4314		device = map->stripes[i].dev;
4315		dev_offset = map->stripes[i].physical;
4316
4317		device->bytes_used += stripe_size;
4318		ret = btrfs_update_device(trans, device);
4319		if (ret)
4320			goto out;
4321		ret = btrfs_alloc_dev_extent(trans, device,
4322					     chunk_root->root_key.objectid,
4323					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4324					     chunk_offset, dev_offset,
4325					     stripe_size);
4326		if (ret)
4327			goto out;
4328	}
4329
4330	spin_lock(&extent_root->fs_info->free_chunk_lock);
4331	extent_root->fs_info->free_chunk_space -= (stripe_size *
4332						   map->num_stripes);
4333	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4334
4335	stripe = &chunk->stripe;
4336	for (i = 0; i < map->num_stripes; i++) {
4337		device = map->stripes[i].dev;
4338		dev_offset = map->stripes[i].physical;
4339
4340		btrfs_set_stack_stripe_devid(stripe, device->devid);
4341		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4342		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4343		stripe++;
4344	}
4345
4346	btrfs_set_stack_chunk_length(chunk, chunk_size);
4347	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4348	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4349	btrfs_set_stack_chunk_type(chunk, map->type);
4350	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4351	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4352	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4353	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4354	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4355
4356	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4357	key.type = BTRFS_CHUNK_ITEM_KEY;
4358	key.offset = chunk_offset;
4359
4360	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4361	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4362		/*
4363		 * TODO: Cleanup of inserted chunk root in case of
4364		 * failure.
4365		 */
4366		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4367					     item_size);
 
 
4368	}
4369
4370out:
4371	kfree(chunk);
4372	free_extent_map(em);
4373	return ret;
4374}
4375
4376/*
4377 * Chunk allocation falls into two parts. The first part does works
4378 * that make the new allocated chunk useable, but not do any operation
4379 * that modifies the chunk tree. The second part does the works that
4380 * require modifying the chunk tree. This division is important for the
4381 * bootstrap process of adding storage to a seed btrfs.
4382 */
4383int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4384		      struct btrfs_root *extent_root, u64 type)
4385{
4386	u64 chunk_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4387
4388	chunk_offset = find_next_chunk(extent_root->fs_info);
4389	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4390}
4391
4392static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4393					 struct btrfs_root *root,
4394					 struct btrfs_device *device)
4395{
4396	u64 chunk_offset;
4397	u64 sys_chunk_offset;
4398	u64 alloc_profile;
4399	struct btrfs_fs_info *fs_info = root->fs_info;
4400	struct btrfs_root *extent_root = fs_info->extent_root;
4401	int ret;
4402
4403	chunk_offset = find_next_chunk(fs_info);
4404	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4405	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4406				  alloc_profile);
4407	if (ret)
4408		return ret;
4409
4410	sys_chunk_offset = find_next_chunk(root->fs_info);
4411	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4412	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4413				  alloc_profile);
4414	if (ret) {
4415		btrfs_abort_transaction(trans, root, ret);
4416		goto out;
4417	}
4418
4419	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4420	if (ret)
4421		btrfs_abort_transaction(trans, root, ret);
4422out:
4423	return ret;
4424}
4425
4426int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4427{
4428	struct extent_map *em;
4429	struct map_lookup *map;
4430	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4431	int readonly = 0;
4432	int i;
 
4433
4434	read_lock(&map_tree->map_tree.lock);
4435	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4436	read_unlock(&map_tree->map_tree.lock);
4437	if (!em)
4438		return 1;
4439
4440	if (btrfs_test_opt(root, DEGRADED)) {
4441		free_extent_map(em);
4442		return 0;
4443	}
4444
4445	map = (struct map_lookup *)em->bdev;
4446	for (i = 0; i < map->num_stripes; i++) {
4447		if (!map->stripes[i].dev->writeable) {
4448			readonly = 1;
4449			break;
 
 
 
 
 
 
4450		}
4451	}
 
 
 
 
 
 
 
 
4452	free_extent_map(em);
4453	return readonly;
4454}
4455
4456void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4457{
4458	extent_map_tree_init(&tree->map_tree);
4459}
4460
4461void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4462{
4463	struct extent_map *em;
4464
4465	while (1) {
4466		write_lock(&tree->map_tree.lock);
4467		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4468		if (em)
4469			remove_extent_mapping(&tree->map_tree, em);
4470		write_unlock(&tree->map_tree.lock);
4471		if (!em)
4472			break;
4473		kfree(em->bdev);
4474		/* once for us */
4475		free_extent_map(em);
4476		/* once for the tree */
4477		free_extent_map(em);
4478	}
4479}
4480
4481int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4482{
4483	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4484	struct extent_map *em;
4485	struct map_lookup *map;
4486	struct extent_map_tree *em_tree = &map_tree->map_tree;
4487	int ret;
4488
4489	read_lock(&em_tree->lock);
4490	em = lookup_extent_mapping(em_tree, logical, len);
4491	read_unlock(&em_tree->lock);
4492
4493	/*
4494	 * We could return errors for these cases, but that could get ugly and
4495	 * we'd probably do the same thing which is just not do anything else
4496	 * and exit, so return 1 so the callers don't try to use other copies.
4497	 */
4498	if (!em) {
4499		btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4500			    logical+len);
4501		return 1;
4502	}
4503
4504	if (em->start > logical || em->start + em->len < logical) {
4505		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4506			    "%Lu-%Lu\n", logical, logical+len, em->start,
4507			    em->start + em->len);
4508		free_extent_map(em);
4509		return 1;
4510	}
4511
4512	map = (struct map_lookup *)em->bdev;
4513	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4514		ret = map->num_stripes;
4515	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4516		ret = map->sub_stripes;
4517	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4518		ret = 2;
4519	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4520		ret = 3;
4521	else
4522		ret = 1;
 
 
 
 
 
4523	free_extent_map(em);
4524
4525	btrfs_dev_replace_lock(&fs_info->dev_replace);
4526	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
 
4527		ret++;
4528	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4529
4530	return ret;
4531}
4532
4533unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4534				    struct btrfs_mapping_tree *map_tree,
4535				    u64 logical)
4536{
4537	struct extent_map *em;
4538	struct map_lookup *map;
4539	struct extent_map_tree *em_tree = &map_tree->map_tree;
4540	unsigned long len = root->sectorsize;
 
 
4541
4542	read_lock(&em_tree->lock);
4543	em = lookup_extent_mapping(em_tree, logical, len);
4544	read_unlock(&em_tree->lock);
4545	BUG_ON(!em);
4546
4547	BUG_ON(em->start > logical || em->start + em->len < logical);
4548	map = (struct map_lookup *)em->bdev;
4549	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4550			 BTRFS_BLOCK_GROUP_RAID6)) {
4551		len = map->stripe_len * nr_data_stripes(map);
4552	}
4553	free_extent_map(em);
4554	return len;
4555}
4556
4557int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4558			   u64 logical, u64 len, int mirror_num)
4559{
4560	struct extent_map *em;
4561	struct map_lookup *map;
4562	struct extent_map_tree *em_tree = &map_tree->map_tree;
4563	int ret = 0;
4564
4565	read_lock(&em_tree->lock);
4566	em = lookup_extent_mapping(em_tree, logical, len);
4567	read_unlock(&em_tree->lock);
4568	BUG_ON(!em);
4569
4570	BUG_ON(em->start > logical || em->start + em->len < logical);
4571	map = (struct map_lookup *)em->bdev;
4572	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4573			 BTRFS_BLOCK_GROUP_RAID6))
4574		ret = 1;
4575	free_extent_map(em);
4576	return ret;
4577}
4578
4579static int find_live_mirror(struct btrfs_fs_info *fs_info,
4580			    struct map_lookup *map, int first, int num,
4581			    int optimal, int dev_replace_is_ongoing)
4582{
4583	int i;
 
 
4584	int tolerance;
4585	struct btrfs_device *srcdev;
4586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4587	if (dev_replace_is_ongoing &&
4588	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4589	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4590		srcdev = fs_info->dev_replace.srcdev;
4591	else
4592		srcdev = NULL;
4593
4594	/*
4595	 * try to avoid the drive that is the source drive for a
4596	 * dev-replace procedure, only choose it if no other non-missing
4597	 * mirror is available
4598	 */
4599	for (tolerance = 0; tolerance < 2; tolerance++) {
4600		if (map->stripes[optimal].dev->bdev &&
4601		    (tolerance || map->stripes[optimal].dev != srcdev))
4602			return optimal;
4603		for (i = first; i < first + num; i++) {
4604			if (map->stripes[i].dev->bdev &&
4605			    (tolerance || map->stripes[i].dev != srcdev))
4606				return i;
4607		}
4608	}
4609
4610	/* we couldn't find one that doesn't fail.  Just return something
4611	 * and the io error handling code will clean up eventually
4612	 */
4613	return optimal;
4614}
4615
4616static inline int parity_smaller(u64 a, u64 b)
4617{
4618	return a > b;
4619}
4620
4621/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4622static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4623{
4624	struct btrfs_bio_stripe s;
4625	int i;
4626	u64 l;
4627	int again = 1;
4628
4629	while (again) {
4630		again = 0;
4631		for (i = 0; i < bbio->num_stripes - 1; i++) {
4632			if (parity_smaller(raid_map[i], raid_map[i+1])) {
4633				s = bbio->stripes[i];
4634				l = raid_map[i];
4635				bbio->stripes[i] = bbio->stripes[i+1];
4636				raid_map[i] = raid_map[i+1];
4637				bbio->stripes[i+1] = s;
4638				raid_map[i+1] = l;
4639				again = 1;
4640			}
4641		}
4642	}
4643}
4644
4645static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4646			     u64 logical, u64 *length,
4647			     struct btrfs_bio **bbio_ret,
4648			     int mirror_num, u64 **raid_map_ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4649{
4650	struct extent_map *em;
4651	struct map_lookup *map;
4652	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4653	struct extent_map_tree *em_tree = &map_tree->map_tree;
4654	u64 offset;
4655	u64 stripe_offset;
4656	u64 stripe_end_offset;
4657	u64 stripe_nr;
4658	u64 stripe_nr_orig;
4659	u64 stripe_nr_end;
 
 
4660	u64 stripe_len;
4661	u64 *raid_map = NULL;
4662	int stripe_index;
 
 
 
 
 
 
4663	int i;
4664	int ret = 0;
4665	int num_stripes;
4666	int max_errors = 0;
4667	struct btrfs_bio *bbio = NULL;
4668	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4669	int dev_replace_is_ongoing = 0;
4670	int num_alloc_stripes;
4671	int patch_the_first_stripe_for_dev_replace = 0;
4672	u64 physical_to_patch_in_first_stripe = 0;
4673	u64 raid56_full_stripe_start = (u64)-1;
4674
4675	read_lock(&em_tree->lock);
4676	em = lookup_extent_mapping(em_tree, logical, *length);
4677	read_unlock(&em_tree->lock);
4678
4679	if (!em) {
4680		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4681			logical, *length);
4682		return -EINVAL;
4683	}
4684
4685	if (em->start > logical || em->start + em->len < logical) {
4686		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4687			   "found %Lu-%Lu\n", logical, em->start,
4688			   em->start + em->len);
4689		free_extent_map(em);
4690		return -EINVAL;
4691	}
4692
4693	map = (struct map_lookup *)em->bdev;
4694	offset = logical - em->start;
 
 
4695
4696	stripe_len = map->stripe_len;
4697	stripe_nr = offset;
4698	/*
4699	 * stripe_nr counts the total number of stripes we have to stride
4700	 * to get to this block
4701	 */
4702	do_div(stripe_nr, stripe_len);
4703
4704	stripe_offset = stripe_nr * stripe_len;
4705	BUG_ON(offset < stripe_offset);
4706
4707	/* stripe_offset is the offset of this block in its stripe*/
4708	stripe_offset = offset - stripe_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4709
4710	/* if we're here for raid56, we need to know the stripe aligned start */
4711	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4712		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4713		raid56_full_stripe_start = offset;
 
 
 
 
 
 
 
 
 
 
 
 
4714
4715		/* allow a write of a full stripe, but make sure we don't
4716		 * allow straddling of stripes
4717		 */
4718		do_div(raid56_full_stripe_start, full_stripe_len);
4719		raid56_full_stripe_start *= full_stripe_len;
4720	}
4721
4722	if (rw & REQ_DISCARD) {
4723		/* we don't discard raid56 yet */
4724		if (map->type &
4725		    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4726			ret = -EOPNOTSUPP;
4727			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4728		}
4729		*length = min_t(u64, em->len - offset, *length);
4730	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4731		u64 max_len;
4732		/* For writes to RAID[56], allow a full stripeset across all disks.
4733		   For other RAID types and for RAID[56] reads, just allow a single
4734		   stripe (on a single disk). */
4735		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4736		    (rw & REQ_WRITE)) {
4737			max_len = stripe_len * nr_data_stripes(map) -
4738				(offset - raid56_full_stripe_start);
4739		} else {
4740			/* we limit the length of each bio to what fits in a stripe */
4741			max_len = stripe_len - stripe_offset;
4742		}
4743		*length = min_t(u64, em->len - offset, max_len);
4744	} else {
4745		*length = em->len - offset;
4746	}
4747
4748	/* This is for when we're called from btrfs_merge_bio_hook() and all
4749	   it cares about is the length */
4750	if (!bbio_ret)
4751		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4752
4753	btrfs_dev_replace_lock(dev_replace);
4754	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4755	if (!dev_replace_is_ongoing)
4756		btrfs_dev_replace_unlock(dev_replace);
 
 
4757
4758	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4759	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4760	    dev_replace->tgtdev != NULL) {
4761		/*
4762		 * in dev-replace case, for repair case (that's the only
4763		 * case where the mirror is selected explicitly when
4764		 * calling btrfs_map_block), blocks left of the left cursor
4765		 * can also be read from the target drive.
4766		 * For REQ_GET_READ_MIRRORS, the target drive is added as
4767		 * the last one to the array of stripes. For READ, it also
4768		 * needs to be supported using the same mirror number.
4769		 * If the requested block is not left of the left cursor,
4770		 * EIO is returned. This can happen because btrfs_num_copies()
4771		 * returns one more in the dev-replace case.
4772		 */
4773		u64 tmp_length = *length;
4774		struct btrfs_bio *tmp_bbio = NULL;
4775		int tmp_num_stripes;
4776		u64 srcdev_devid = dev_replace->srcdev->devid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4777		int index_srcdev = 0;
4778		int found = 0;
4779		u64 physical_of_found = 0;
4780
4781		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4782			     logical, &tmp_length, &tmp_bbio, 0, NULL);
4783		if (ret) {
4784			WARN_ON(tmp_bbio != NULL);
4785			goto out;
4786		}
4787
4788		tmp_num_stripes = tmp_bbio->num_stripes;
4789		if (mirror_num > tmp_num_stripes) {
4790			/*
4791			 * REQ_GET_READ_MIRRORS does not contain this
4792			 * mirror, that means that the requested area
4793			 * is not left of the left cursor
4794			 */
4795			ret = -EIO;
4796			kfree(tmp_bbio);
4797			goto out;
4798		}
4799
4800		/*
4801		 * process the rest of the function using the mirror_num
4802		 * of the source drive. Therefore look it up first.
4803		 * At the end, patch the device pointer to the one of the
4804		 * target drive.
 
4805		 */
4806		for (i = 0; i < tmp_num_stripes; i++) {
4807			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4808				/*
4809				 * In case of DUP, in order to keep it
4810				 * simple, only add the mirror with the
4811				 * lowest physical address
4812				 */
4813				if (found &&
4814				    physical_of_found <=
4815				     tmp_bbio->stripes[i].physical)
4816					continue;
4817				index_srcdev = i;
4818				found = 1;
4819				physical_of_found =
4820					tmp_bbio->stripes[i].physical;
4821			}
4822		}
 
 
 
4823
4824		if (found) {
4825			mirror_num = index_srcdev + 1;
4826			patch_the_first_stripe_for_dev_replace = 1;
4827			physical_to_patch_in_first_stripe = physical_of_found;
4828		} else {
4829			WARN_ON(1);
4830			ret = -EIO;
4831			kfree(tmp_bbio);
4832			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4833		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4834
4835		kfree(tmp_bbio);
 
 
 
 
 
 
 
 
 
4836	} else if (mirror_num > map->num_stripes) {
4837		mirror_num = 0;
4838	}
4839
4840	num_stripes = 1;
4841	stripe_index = 0;
4842	stripe_nr_orig = stripe_nr;
4843	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4844	do_div(stripe_nr_end, map->stripe_len);
4845	stripe_end_offset = stripe_nr_end * map->stripe_len -
4846			    (offset + *length);
4847
4848	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4849		if (rw & REQ_DISCARD)
4850			num_stripes = min_t(u64, map->num_stripes,
4851					    stripe_nr_end - stripe_nr_orig);
4852		stripe_index = do_div(stripe_nr, map->num_stripes);
4853	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4854		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4855			num_stripes = map->num_stripes;
4856		else if (mirror_num)
4857			stripe_index = mirror_num - 1;
4858		else {
4859			stripe_index = find_live_mirror(fs_info, map, 0,
4860					    map->num_stripes,
4861					    current->pid % map->num_stripes,
4862					    dev_replace_is_ongoing);
4863			mirror_num = stripe_index + 1;
4864		}
4865
4866	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4867		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4868			num_stripes = map->num_stripes;
4869		} else if (mirror_num) {
4870			stripe_index = mirror_num - 1;
4871		} else {
4872			mirror_num = 1;
4873		}
4874
4875	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4876		int factor = map->num_stripes / map->sub_stripes;
4877
4878		stripe_index = do_div(stripe_nr, factor);
4879		stripe_index *= map->sub_stripes;
4880
4881		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4882			num_stripes = map->sub_stripes;
4883		else if (rw & REQ_DISCARD)
4884			num_stripes = min_t(u64, map->sub_stripes *
4885					    (stripe_nr_end - stripe_nr_orig),
4886					    map->num_stripes);
4887		else if (mirror_num)
4888			stripe_index += mirror_num - 1;
4889		else {
4890			int old_stripe_index = stripe_index;
4891			stripe_index = find_live_mirror(fs_info, map,
4892					      stripe_index,
4893					      map->sub_stripes, stripe_index +
4894					      current->pid % map->sub_stripes,
4895					      dev_replace_is_ongoing);
4896			mirror_num = stripe_index - old_stripe_index + 1;
4897		}
4898
4899	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4900				BTRFS_BLOCK_GROUP_RAID6)) {
4901		u64 tmp;
4902
4903		if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4904		    && raid_map_ret) {
4905			int i, rot;
4906
4907			/* push stripe_nr back to the start of the full stripe */
4908			stripe_nr = raid56_full_stripe_start;
4909			do_div(stripe_nr, stripe_len);
4910
4911			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4912
4913			/* RAID[56] write or recovery. Return all stripes */
4914			num_stripes = map->num_stripes;
4915			max_errors = nr_parity_stripes(map);
4916
4917			raid_map = kmalloc_array(num_stripes, sizeof(u64),
4918					   GFP_NOFS);
4919			if (!raid_map) {
4920				ret = -ENOMEM;
4921				goto out;
4922			}
4923
4924			/* Work out the disk rotation on this stripe-set */
4925			tmp = stripe_nr;
4926			rot = do_div(tmp, num_stripes);
4927
4928			/* Fill in the logical address of each stripe */
4929			tmp = stripe_nr * nr_data_stripes(map);
4930			for (i = 0; i < nr_data_stripes(map); i++)
4931				raid_map[(i+rot) % num_stripes] =
4932					em->start + (tmp + i) * map->stripe_len;
4933
4934			raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4935			if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4936				raid_map[(i+rot+1) % num_stripes] =
4937					RAID6_Q_STRIPE;
4938
4939			*length = map->stripe_len;
 
 
 
4940			stripe_index = 0;
4941			stripe_offset = 0;
4942		} else {
4943			/*
4944			 * Mirror #0 or #1 means the original data block.
4945			 * Mirror #2 is RAID5 parity block.
4946			 * Mirror #3 is RAID6 Q block.
4947			 */
4948			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
 
4949			if (mirror_num > 1)
4950				stripe_index = nr_data_stripes(map) +
4951						mirror_num - 2;
4952
4953			/* We distribute the parity blocks across stripes */
4954			tmp = stripe_nr + stripe_index;
4955			stripe_index = do_div(tmp, map->num_stripes);
 
 
4956		}
4957	} else {
4958		/*
4959		 * after this do_div call, stripe_nr is the number of stripes
4960		 * on this device we have to walk to find the data, and
4961		 * stripe_index is the number of our device in the stripe array
4962		 */
4963		stripe_index = do_div(stripe_nr, map->num_stripes);
 
4964		mirror_num = stripe_index + 1;
4965	}
4966	BUG_ON(stripe_index >= map->num_stripes);
 
 
 
 
 
 
4967
4968	num_alloc_stripes = num_stripes;
4969	if (dev_replace_is_ongoing) {
4970		if (rw & (REQ_WRITE | REQ_DISCARD))
4971			num_alloc_stripes <<= 1;
4972		if (rw & REQ_GET_READ_MIRRORS)
4973			num_alloc_stripes++;
 
4974	}
4975	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4976	if (!bbio) {
4977		kfree(raid_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4978		ret = -ENOMEM;
4979		goto out;
4980	}
4981	atomic_set(&bbio->error, 0);
4982
4983	if (rw & REQ_DISCARD) {
4984		int factor = 0;
4985		int sub_stripes = 0;
4986		u64 stripes_per_dev = 0;
4987		u32 remaining_stripes = 0;
4988		u32 last_stripe = 0;
4989
4990		if (map->type &
4991		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4992			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4993				sub_stripes = 1;
4994			else
4995				sub_stripes = map->sub_stripes;
4996
4997			factor = map->num_stripes / sub_stripes;
4998			stripes_per_dev = div_u64_rem(stripe_nr_end -
4999						      stripe_nr_orig,
5000						      factor,
5001						      &remaining_stripes);
5002			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5003			last_stripe *= sub_stripes;
5004		}
5005
5006		for (i = 0; i < num_stripes; i++) {
5007			bbio->stripes[i].physical =
5008				map->stripes[stripe_index].physical +
5009				stripe_offset + stripe_nr * map->stripe_len;
5010			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5011
5012			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5013					 BTRFS_BLOCK_GROUP_RAID10)) {
5014				bbio->stripes[i].length = stripes_per_dev *
5015							  map->stripe_len;
5016
5017				if (i / sub_stripes < remaining_stripes)
5018					bbio->stripes[i].length +=
5019						map->stripe_len;
5020
5021				/*
5022				 * Special for the first stripe and
5023				 * the last stripe:
5024				 *
5025				 * |-------|...|-------|
5026				 *     |----------|
5027				 *    off     end_off
5028				 */
5029				if (i < sub_stripes)
5030					bbio->stripes[i].length -=
5031						stripe_offset;
5032
5033				if (stripe_index >= last_stripe &&
5034				    stripe_index <= (last_stripe +
5035						     sub_stripes - 1))
5036					bbio->stripes[i].length -=
5037						stripe_end_offset;
5038
5039				if (i == sub_stripes - 1)
5040					stripe_offset = 0;
5041			} else
5042				bbio->stripes[i].length = *length;
5043
5044			stripe_index++;
5045			if (stripe_index == map->num_stripes) {
5046				/* This could only happen for RAID0/10 */
5047				stripe_index = 0;
5048				stripe_nr++;
5049			}
5050		}
5051	} else {
5052		for (i = 0; i < num_stripes; i++) {
5053			bbio->stripes[i].physical =
5054				map->stripes[stripe_index].physical +
5055				stripe_offset +
5056				stripe_nr * map->stripe_len;
5057			bbio->stripes[i].dev =
5058				map->stripes[stripe_index].dev;
5059			stripe_index++;
5060		}
5061	}
5062
5063	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5064		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5065				 BTRFS_BLOCK_GROUP_RAID10 |
5066				 BTRFS_BLOCK_GROUP_RAID5 |
5067				 BTRFS_BLOCK_GROUP_DUP)) {
5068			max_errors = 1;
5069		} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5070			max_errors = 2;
5071		}
5072	}
5073
5074	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5075	    dev_replace->tgtdev != NULL) {
5076		int index_where_to_add;
5077		u64 srcdev_devid = dev_replace->srcdev->devid;
5078
5079		/*
5080		 * duplicate the write operations while the dev replace
5081		 * procedure is running. Since the copying of the old disk
5082		 * to the new disk takes place at run time while the
5083		 * filesystem is mounted writable, the regular write
5084		 * operations to the old disk have to be duplicated to go
5085		 * to the new disk as well.
5086		 * Note that device->missing is handled by the caller, and
5087		 * that the write to the old disk is already set up in the
5088		 * stripes array.
5089		 */
5090		index_where_to_add = num_stripes;
5091		for (i = 0; i < num_stripes; i++) {
5092			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5093				/* write to new disk, too */
5094				struct btrfs_bio_stripe *new =
5095					bbio->stripes + index_where_to_add;
5096				struct btrfs_bio_stripe *old =
5097					bbio->stripes + i;
5098
5099				new->physical = old->physical;
5100				new->length = old->length;
5101				new->dev = dev_replace->tgtdev;
5102				index_where_to_add++;
5103				max_errors++;
5104			}
5105		}
5106		num_stripes = index_where_to_add;
5107	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5108		   dev_replace->tgtdev != NULL) {
5109		u64 srcdev_devid = dev_replace->srcdev->devid;
5110		int index_srcdev = 0;
5111		int found = 0;
5112		u64 physical_of_found = 0;
5113
5114		/*
5115		 * During the dev-replace procedure, the target drive can
5116		 * also be used to read data in case it is needed to repair
5117		 * a corrupt block elsewhere. This is possible if the
5118		 * requested area is left of the left cursor. In this area,
5119		 * the target drive is a full copy of the source drive.
5120		 */
5121		for (i = 0; i < num_stripes; i++) {
5122			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5123				/*
5124				 * In case of DUP, in order to keep it
5125				 * simple, only add the mirror with the
5126				 * lowest physical address
5127				 */
5128				if (found &&
5129				    physical_of_found <=
5130				     bbio->stripes[i].physical)
5131					continue;
5132				index_srcdev = i;
5133				found = 1;
5134				physical_of_found = bbio->stripes[i].physical;
5135			}
5136		}
5137		if (found) {
5138			u64 length = map->stripe_len;
5139
5140			if (physical_of_found + length <=
5141			    dev_replace->cursor_left) {
5142				struct btrfs_bio_stripe *tgtdev_stripe =
5143					bbio->stripes + num_stripes;
5144
5145				tgtdev_stripe->physical = physical_of_found;
5146				tgtdev_stripe->length =
5147					bbio->stripes[index_srcdev].length;
5148				tgtdev_stripe->dev = dev_replace->tgtdev;
5149
5150				num_stripes++;
5151			}
5152		}
 
5153	}
5154
5155	*bbio_ret = bbio;
5156	bbio->num_stripes = num_stripes;
5157	bbio->max_errors = max_errors;
5158	bbio->mirror_num = mirror_num;
 
5159
5160	/*
5161	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5162	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5163	 * available as a mirror
5164	 */
5165	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5166		WARN_ON(num_stripes > 1);
5167		bbio->stripes[0].dev = dev_replace->tgtdev;
5168		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5169		bbio->mirror_num = map->num_stripes + 1;
5170	}
5171	if (raid_map) {
5172		sort_parity_stripes(bbio, raid_map);
5173		*raid_map_ret = raid_map;
5174	}
5175out:
5176	if (dev_replace_is_ongoing)
5177		btrfs_dev_replace_unlock(dev_replace);
 
 
 
5178	free_extent_map(em);
5179	return ret;
5180}
5181
5182int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5183		      u64 logical, u64 *length,
5184		      struct btrfs_bio **bbio_ret, int mirror_num)
5185{
5186	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5187				 mirror_num, NULL);
5188}
5189
5190int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5191		     u64 chunk_start, u64 physical, u64 devid,
5192		     u64 **logical, int *naddrs, int *stripe_len)
 
5193{
5194	struct extent_map_tree *em_tree = &map_tree->map_tree;
5195	struct extent_map *em;
5196	struct map_lookup *map;
5197	u64 *buf;
5198	u64 bytenr;
5199	u64 length;
5200	u64 stripe_nr;
5201	u64 rmap_len;
5202	int i, j, nr = 0;
5203
5204	read_lock(&em_tree->lock);
5205	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5206	read_unlock(&em_tree->lock);
5207
5208	if (!em) {
5209		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5210		       chunk_start);
5211		return -EIO;
5212	}
5213
5214	if (em->start != chunk_start) {
5215		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5216		       em->start, chunk_start);
5217		free_extent_map(em);
5218		return -EIO;
5219	}
5220	map = (struct map_lookup *)em->bdev;
5221
5222	length = em->len;
5223	rmap_len = map->stripe_len;
5224
5225	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5226		do_div(length, map->num_stripes / map->sub_stripes);
5227	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5228		do_div(length, map->num_stripes);
5229	else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5230			      BTRFS_BLOCK_GROUP_RAID6)) {
5231		do_div(length, nr_data_stripes(map));
5232		rmap_len = map->stripe_len * nr_data_stripes(map);
5233	}
5234
5235	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5236	BUG_ON(!buf); /* -ENOMEM */
5237
5238	for (i = 0; i < map->num_stripes; i++) {
5239		if (devid && map->stripes[i].dev->devid != devid)
5240			continue;
5241		if (map->stripes[i].physical > physical ||
5242		    map->stripes[i].physical + length <= physical)
5243			continue;
5244
5245		stripe_nr = physical - map->stripes[i].physical;
5246		do_div(stripe_nr, map->stripe_len);
5247
5248		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5249			stripe_nr = stripe_nr * map->num_stripes + i;
5250			do_div(stripe_nr, map->sub_stripes);
5251		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5252			stripe_nr = stripe_nr * map->num_stripes + i;
5253		} /* else if RAID[56], multiply by nr_data_stripes().
5254		   * Alternatively, just use rmap_len below instead of
5255		   * map->stripe_len */
5256
5257		bytenr = chunk_start + stripe_nr * rmap_len;
5258		WARN_ON(nr >= map->num_stripes);
5259		for (j = 0; j < nr; j++) {
5260			if (buf[j] == bytenr)
5261				break;
5262		}
5263		if (j == nr) {
5264			WARN_ON(nr >= map->num_stripes);
5265			buf[nr++] = bytenr;
5266		}
5267	}
5268
5269	*logical = buf;
5270	*naddrs = nr;
5271	*stripe_len = rmap_len;
5272
5273	free_extent_map(em);
5274	return 0;
5275}
5276
5277static void btrfs_end_bio(struct bio *bio, int err)
 
5278{
5279	struct btrfs_bio *bbio = bio->bi_private;
5280	struct btrfs_device *dev = bbio->stripes[0].dev;
5281	int is_orig_bio = 0;
5282
5283	if (err) {
5284		atomic_inc(&bbio->error);
5285		if (err == -EIO || err == -EREMOTEIO) {
5286			unsigned int stripe_index =
5287				btrfs_io_bio(bio)->stripe_index;
5288
5289			BUG_ON(stripe_index >= bbio->num_stripes);
5290			dev = bbio->stripes[stripe_index].dev;
5291			if (dev->bdev) {
5292				if (bio->bi_rw & WRITE)
5293					btrfs_dev_stat_inc(dev,
5294						BTRFS_DEV_STAT_WRITE_ERRS);
5295				else
5296					btrfs_dev_stat_inc(dev,
5297						BTRFS_DEV_STAT_READ_ERRS);
5298				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5299					btrfs_dev_stat_inc(dev,
5300						BTRFS_DEV_STAT_FLUSH_ERRS);
5301				btrfs_dev_stat_print_on_error(dev);
5302			}
5303		}
5304	}
5305
5306	if (bio == bbio->orig_bio)
5307		is_orig_bio = 1;
5308
5309	btrfs_bio_counter_dec(bbio->fs_info);
5310
5311	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5312		if (!is_orig_bio) {
5313			bio_put(bio);
5314			bio = bbio->orig_bio;
5315		}
5316
5317 		/*
5318		 * We have original bio now. So increment bi_remaining to
5319		 * account for it in endio
5320		 */
5321		atomic_inc(&bio->bi_remaining);
5322
5323		bio->bi_private = bbio->private;
5324		bio->bi_end_io = bbio->end_io;
5325		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5326		/* only send an error to the higher layers if it is
5327		 * beyond the tolerance of the btrfs bio
5328		 */
5329		if (atomic_read(&bbio->error) > bbio->max_errors) {
5330			err = -EIO;
5331		} else {
5332			/*
5333			 * this bio is actually up to date, we didn't
5334			 * go over the max number of errors
5335			 */
5336			set_bit(BIO_UPTODATE, &bio->bi_flags);
5337			err = 0;
5338		}
5339		kfree(bbio);
5340
5341		bio_endio(bio, err);
5342	} else if (!is_orig_bio) {
5343		bio_put(bio);
5344	}
5345}
5346
5347/*
5348 * see run_scheduled_bios for a description of why bios are collected for
5349 * async submit.
5350 *
5351 * This will add one bio to the pending list for a device and make sure
5352 * the work struct is scheduled.
5353 */
5354static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5355					struct btrfs_device *device,
5356					int rw, struct bio *bio)
5357{
5358	int should_queue = 1;
5359	struct btrfs_pending_bios *pending_bios;
5360
5361	if (device->missing || !device->bdev) {
5362		bio_endio(bio, -EIO);
5363		return;
5364	}
5365
5366	/* don't bother with additional async steps for reads, right now */
5367	if (!(rw & REQ_WRITE)) {
5368		bio_get(bio);
5369		btrfsic_submit_bio(rw, bio);
5370		bio_put(bio);
5371		return;
5372	}
5373
5374	/*
5375	 * nr_async_bios allows us to reliably return congestion to the
5376	 * higher layers.  Otherwise, the async bio makes it appear we have
5377	 * made progress against dirty pages when we've really just put it
5378	 * on a queue for later
5379	 */
5380	atomic_inc(&root->fs_info->nr_async_bios);
5381	WARN_ON(bio->bi_next);
5382	bio->bi_next = NULL;
5383	bio->bi_rw |= rw;
5384
5385	spin_lock(&device->io_lock);
5386	if (bio->bi_rw & REQ_SYNC)
5387		pending_bios = &device->pending_sync_bios;
5388	else
5389		pending_bios = &device->pending_bios;
5390
5391	if (pending_bios->tail)
5392		pending_bios->tail->bi_next = bio;
5393
5394	pending_bios->tail = bio;
5395	if (!pending_bios->head)
5396		pending_bios->head = bio;
5397	if (device->running_pending)
5398		should_queue = 0;
5399
5400	spin_unlock(&device->io_lock);
5401
5402	if (should_queue)
5403		btrfs_queue_work(root->fs_info->submit_workers,
5404				 &device->work);
5405}
5406
5407static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5408		       sector_t sector)
5409{
5410	struct bio_vec *prev;
5411	struct request_queue *q = bdev_get_queue(bdev);
5412	unsigned int max_sectors = queue_max_sectors(q);
5413	struct bvec_merge_data bvm = {
5414		.bi_bdev = bdev,
5415		.bi_sector = sector,
5416		.bi_rw = bio->bi_rw,
5417	};
5418
5419	if (WARN_ON(bio->bi_vcnt == 0))
5420		return 1;
5421
5422	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5423	if (bio_sectors(bio) > max_sectors)
5424		return 0;
5425
5426	if (!q->merge_bvec_fn)
5427		return 1;
5428
5429	bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5430	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5431		return 0;
5432	return 1;
5433}
5434
5435static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5436			      struct bio *bio, u64 physical, int dev_nr,
5437			      int rw, int async)
5438{
5439	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5440
5441	bio->bi_private = bbio;
5442	btrfs_io_bio(bio)->stripe_index = dev_nr;
5443	bio->bi_end_io = btrfs_end_bio;
5444	bio->bi_iter.bi_sector = physical >> 9;
5445#ifdef DEBUG
5446	{
5447		struct rcu_string *name;
5448
5449		rcu_read_lock();
5450		name = rcu_dereference(dev->name);
5451		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5452			 "(%s id %llu), size=%u\n", rw,
5453			 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5454			 name->str, dev->devid, bio->bi_size);
5455		rcu_read_unlock();
5456	}
5457#endif
5458	bio->bi_bdev = dev->bdev;
5459
5460	btrfs_bio_counter_inc_noblocked(root->fs_info);
5461
5462	if (async)
5463		btrfs_schedule_bio(root, dev, rw, bio);
5464	else
5465		btrfsic_submit_bio(rw, bio);
5466}
5467
5468static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5469			      struct bio *first_bio, struct btrfs_device *dev,
5470			      int dev_nr, int rw, int async)
5471{
5472	struct bio_vec *bvec = first_bio->bi_io_vec;
5473	struct bio *bio;
5474	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5475	u64 physical = bbio->stripes[dev_nr].physical;
5476
5477again:
5478	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5479	if (!bio)
5480		return -ENOMEM;
5481
5482	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5483		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5484				 bvec->bv_offset) < bvec->bv_len) {
5485			u64 len = bio->bi_iter.bi_size;
5486
5487			atomic_inc(&bbio->stripes_pending);
5488			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5489					  rw, async);
5490			physical += len;
5491			goto again;
5492		}
5493		bvec++;
5494	}
5495
5496	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5497	return 0;
5498}
5499
5500static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5501{
5502	atomic_inc(&bbio->error);
5503	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5504		bio->bi_private = bbio->private;
5505		bio->bi_end_io = bbio->end_io;
5506		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5507		bio->bi_iter.bi_sector = logical >> 9;
5508		kfree(bbio);
5509		bio_endio(bio, -EIO);
5510	}
5511}
5512
5513int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5514		  int mirror_num, int async_submit)
5515{
5516	struct btrfs_device *dev;
5517	struct bio *first_bio = bio;
5518	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5519	u64 length = 0;
5520	u64 map_length;
5521	u64 *raid_map = NULL;
5522	int ret;
5523	int dev_nr = 0;
5524	int total_devs = 1;
5525	struct btrfs_bio *bbio = NULL;
5526
5527	length = bio->bi_iter.bi_size;
5528	map_length = length;
5529
5530	btrfs_bio_counter_inc_blocked(root->fs_info);
5531	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5532			      mirror_num, &raid_map);
5533	if (ret) {
5534		btrfs_bio_counter_dec(root->fs_info);
5535		return ret;
5536	}
5537
5538	total_devs = bbio->num_stripes;
5539	bbio->orig_bio = first_bio;
5540	bbio->private = first_bio->bi_private;
5541	bbio->end_io = first_bio->bi_end_io;
5542	bbio->fs_info = root->fs_info;
5543	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5544
5545	if (raid_map) {
5546		/* In this case, map_length has been set to the length of
5547		   a single stripe; not the whole write */
5548		if (rw & WRITE) {
5549			ret = raid56_parity_write(root, bio, bbio,
5550						  raid_map, map_length);
5551		} else {
5552			ret = raid56_parity_recover(root, bio, bbio,
5553						    raid_map, map_length,
5554						    mirror_num);
5555		}
5556		/*
5557		 * FIXME, replace dosen't support raid56 yet, please fix
5558		 * it in the future.
5559		 */
5560		btrfs_bio_counter_dec(root->fs_info);
5561		return ret;
5562	}
5563
5564	if (map_length < length) {
5565		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5566			logical, length, map_length);
5567		BUG();
5568	}
5569
5570	while (dev_nr < total_devs) {
5571		dev = bbio->stripes[dev_nr].dev;
5572		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5573			bbio_error(bbio, first_bio, logical);
5574			dev_nr++;
5575			continue;
 
 
 
5576		}
5577
5578		/*
5579		 * Check and see if we're ok with this bio based on it's size
5580		 * and offset with the given device.
5581		 */
5582		if (!bio_size_ok(dev->bdev, first_bio,
5583				 bbio->stripes[dev_nr].physical >> 9)) {
5584			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5585						 dev_nr, rw, async_submit);
5586			BUG_ON(ret);
5587			dev_nr++;
5588			continue;
5589		}
5590
5591		if (dev_nr < total_devs - 1) {
5592			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5593			BUG_ON(!bio); /* -ENOMEM */
5594		} else {
5595			bio = first_bio;
5596		}
5597
5598		submit_stripe_bio(root, bbio, bio,
5599				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5600				  async_submit);
5601		dev_nr++;
5602	}
5603	btrfs_bio_counter_dec(root->fs_info);
5604	return 0;
5605}
5606
5607struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5608				       u8 *uuid, u8 *fsid)
5609{
5610	struct btrfs_device *device;
5611	struct btrfs_fs_devices *cur_devices;
5612
5613	cur_devices = fs_info->fs_devices;
5614	while (cur_devices) {
5615		if (!fsid ||
5616		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5617			device = __find_device(&cur_devices->devices,
5618					       devid, uuid);
5619			if (device)
5620				return device;
5621		}
5622		cur_devices = cur_devices->seed;
5623	}
5624	return NULL;
5625}
5626
5627static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5628					    u64 devid, u8 *dev_uuid)
5629{
5630	struct btrfs_device *device;
5631	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5632
5633	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
 
 
 
 
 
 
 
 
 
5634	if (IS_ERR(device))
5635		return NULL;
5636
5637	list_add(&device->dev_list, &fs_devices->devices);
5638	device->fs_devices = fs_devices;
5639	fs_devices->num_devices++;
5640
5641	device->missing = 1;
5642	fs_devices->missing_devices++;
5643
5644	return device;
5645}
5646
5647/**
5648 * btrfs_alloc_device - allocate struct btrfs_device
 
5649 * @fs_info:	used only for generating a new devid, can be NULL if
5650 *		devid is provided (i.e. @devid != NULL).
5651 * @devid:	a pointer to devid for this device.  If NULL a new devid
5652 *		is generated.
5653 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
5654 *		is generated.
 
5655 *
5656 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5657 * on error.  Returned struct is not linked onto any lists and can be
5658 * destroyed with kfree() right away.
5659 */
5660struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5661					const u64 *devid,
5662					const u8 *uuid)
5663{
5664	struct btrfs_device *dev;
5665	u64 tmp;
5666
5667	if (WARN_ON(!devid && !fs_info))
5668		return ERR_PTR(-EINVAL);
5669
5670	dev = __alloc_device();
5671	if (IS_ERR(dev))
5672		return dev;
 
 
 
 
 
 
 
 
5673
5674	if (devid)
5675		tmp = *devid;
5676	else {
5677		int ret;
5678
5679		ret = find_next_devid(fs_info, &tmp);
5680		if (ret) {
5681			kfree(dev);
5682			return ERR_PTR(ret);
5683		}
5684	}
5685	dev->devid = tmp;
5686
5687	if (uuid)
5688		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5689	else
5690		generate_random_uuid(dev->uuid);
5691
5692	btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5693
5694	return dev;
5695}
5696
5697static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5698			  struct extent_buffer *leaf,
5699			  struct btrfs_chunk *chunk)
5700{
5701	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
 
 
5702	struct map_lookup *map;
5703	struct extent_map *em;
5704	u64 logical;
5705	u64 length;
5706	u64 devid;
 
5707	u8 uuid[BTRFS_UUID_SIZE];
 
5708	int num_stripes;
5709	int ret;
5710	int i;
5711
5712	logical = key->offset;
5713	length = btrfs_chunk_length(leaf, chunk);
 
 
 
5714
5715	read_lock(&map_tree->map_tree.lock);
5716	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5717	read_unlock(&map_tree->map_tree.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5718
5719	/* already mapped? */
5720	if (em && em->start <= logical && em->start + em->len > logical) {
5721		free_extent_map(em);
5722		return 0;
5723	} else if (em) {
5724		free_extent_map(em);
5725	}
5726
5727	em = alloc_extent_map();
5728	if (!em)
5729		return -ENOMEM;
5730	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5731	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5732	if (!map) {
5733		free_extent_map(em);
5734		return -ENOMEM;
5735	}
5736
5737	em->bdev = (struct block_device *)map;
 
5738	em->start = logical;
5739	em->len = length;
5740	em->orig_start = 0;
5741	em->block_start = 0;
5742	em->block_len = em->len;
5743
5744	map->num_stripes = num_stripes;
5745	map->io_width = btrfs_chunk_io_width(leaf, chunk);
5746	map->io_align = btrfs_chunk_io_align(leaf, chunk);
5747	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5748	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5749	map->type = btrfs_chunk_type(leaf, chunk);
5750	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
 
 
 
 
 
 
 
 
 
 
5751	for (i = 0; i < num_stripes; i++) {
5752		map->stripes[i].physical =
5753			btrfs_stripe_offset_nr(leaf, chunk, i);
5754		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
 
5755		read_extent_buffer(leaf, uuid, (unsigned long)
5756				   btrfs_stripe_dev_uuid_nr(chunk, i),
5757				   BTRFS_UUID_SIZE);
5758		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5759							uuid, NULL);
5760		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5761			kfree(map);
5762			free_extent_map(em);
5763			return -EIO;
5764		}
5765		if (!map->stripes[i].dev) {
5766			map->stripes[i].dev =
5767				add_missing_dev(root, devid, uuid);
5768			if (!map->stripes[i].dev) {
5769				kfree(map);
5770				free_extent_map(em);
5771				return -EIO;
5772			}
5773		}
5774		map->stripes[i].dev->in_fs_metadata = 1;
 
 
5775	}
5776
5777	write_lock(&map_tree->map_tree.lock);
5778	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5779	write_unlock(&map_tree->map_tree.lock);
5780	BUG_ON(ret); /* Tree corruption */
 
 
 
 
5781	free_extent_map(em);
5782
5783	return 0;
5784}
5785
5786static void fill_device_from_item(struct extent_buffer *leaf,
5787				 struct btrfs_dev_item *dev_item,
5788				 struct btrfs_device *device)
5789{
5790	unsigned long ptr;
5791
5792	device->devid = btrfs_device_id(leaf, dev_item);
5793	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5794	device->total_bytes = device->disk_total_bytes;
 
5795	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
 
5796	device->type = btrfs_device_type(leaf, dev_item);
5797	device->io_align = btrfs_device_io_align(leaf, dev_item);
5798	device->io_width = btrfs_device_io_width(leaf, dev_item);
5799	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5800	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5801	device->is_tgtdev_for_dev_replace = 0;
5802
5803	ptr = btrfs_device_uuid(dev_item);
5804	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5805}
5806
5807static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
 
5808{
5809	struct btrfs_fs_devices *fs_devices;
5810	int ret;
5811
5812	BUG_ON(!mutex_is_locked(&uuid_mutex));
 
 
 
 
 
 
5813
5814	fs_devices = root->fs_info->fs_devices->seed;
5815	while (fs_devices) {
5816		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5817			ret = 0;
5818			goto out;
5819		}
5820		fs_devices = fs_devices->seed;
5821	}
5822
5823	fs_devices = find_fsid(fsid);
5824	if (!fs_devices) {
5825		ret = -ENOENT;
5826		goto out;
 
 
 
 
 
 
 
 
5827	}
5828
 
 
 
 
5829	fs_devices = clone_fs_devices(fs_devices);
5830	if (IS_ERR(fs_devices)) {
5831		ret = PTR_ERR(fs_devices);
5832		goto out;
5833	}
5834
5835	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5836				   root->fs_info->bdev_holder);
5837	if (ret) {
5838		free_fs_devices(fs_devices);
5839		goto out;
5840	}
5841
5842	if (!fs_devices->seeding) {
5843		__btrfs_close_devices(fs_devices);
5844		free_fs_devices(fs_devices);
5845		ret = -EINVAL;
5846		goto out;
5847	}
5848
5849	fs_devices->seed = root->fs_info->fs_devices->seed;
5850	root->fs_info->fs_devices->seed = fs_devices;
5851out:
5852	return ret;
5853}
5854
5855static int read_one_dev(struct btrfs_root *root,
5856			struct extent_buffer *leaf,
5857			struct btrfs_dev_item *dev_item)
5858{
 
 
 
5859	struct btrfs_device *device;
5860	u64 devid;
5861	int ret;
5862	u8 fs_uuid[BTRFS_UUID_SIZE];
5863	u8 dev_uuid[BTRFS_UUID_SIZE];
5864
5865	devid = btrfs_device_id(leaf, dev_item);
 
5866	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
5867			   BTRFS_UUID_SIZE);
5868	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
5869			   BTRFS_UUID_SIZE);
 
 
5870
5871	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5872		ret = open_seed_devices(root, fs_uuid);
5873		if (ret && !btrfs_test_opt(root, DEGRADED))
5874			return ret;
5875	}
5876
5877	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5878	if (!device || !device->bdev) {
5879		if (!btrfs_test_opt(root, DEGRADED))
5880			return -EIO;
5881
5882		if (!device) {
5883			btrfs_warn(root->fs_info, "devid %llu missing", devid);
5884			device = add_missing_dev(root, devid, dev_uuid);
5885			if (!device)
5886				return -ENOMEM;
5887		} else if (!device->missing) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5888			/*
5889			 * this happens when a device that was properly setup
5890			 * in the device info lists suddenly goes bad.
5891			 * device->bdev is NULL, and so we have to set
5892			 * device->missing to one here
5893			 */
5894			root->fs_info->fs_devices->missing_devices++;
5895			device->missing = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5896		}
5897	}
5898
5899	if (device->fs_devices != root->fs_info->fs_devices) {
5900		BUG_ON(device->writeable);
5901		if (device->generation !=
5902		    btrfs_device_generation(leaf, dev_item))
5903			return -EINVAL;
5904	}
5905
5906	fill_device_from_item(leaf, dev_item, device);
5907	device->in_fs_metadata = 1;
5908	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
 
 
 
 
 
 
 
 
 
 
 
5909		device->fs_devices->total_rw_bytes += device->total_bytes;
5910		spin_lock(&root->fs_info->free_chunk_lock);
5911		root->fs_info->free_chunk_space += device->total_bytes -
5912			device->bytes_used;
5913		spin_unlock(&root->fs_info->free_chunk_lock);
5914	}
5915	ret = 0;
5916	return ret;
5917}
5918
5919int btrfs_read_sys_array(struct btrfs_root *root)
5920{
5921	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5922	struct extent_buffer *sb;
5923	struct btrfs_disk_key *disk_key;
5924	struct btrfs_chunk *chunk;
5925	u8 *ptr;
5926	unsigned long sb_ptr;
5927	int ret = 0;
5928	u32 num_stripes;
5929	u32 array_size;
5930	u32 len = 0;
5931	u32 cur;
 
5932	struct btrfs_key key;
5933
5934	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5935					  BTRFS_SUPER_INFO_SIZE);
 
 
 
 
 
 
5936	if (!sb)
5937		return -ENOMEM;
5938	btrfs_set_buffer_uptodate(sb);
5939	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5940	/*
5941	 * The sb extent buffer is artifical and just used to read the system array.
5942	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5943	 * pages up-to-date when the page is larger: extent does not cover the
5944	 * whole page and consequently check_page_uptodate does not find all
5945	 * the page's extents up-to-date (the hole beyond sb),
5946	 * write_extent_buffer then triggers a WARN_ON.
5947	 *
5948	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5949	 * but sb spans only this function. Add an explicit SetPageUptodate call
5950	 * to silence the warning eg. on PowerPC 64.
5951	 */
5952	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5953		SetPageUptodate(sb->pages[0]);
5954
5955	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5956	array_size = btrfs_super_sys_array_size(super_copy);
5957
5958	ptr = super_copy->sys_chunk_array;
5959	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5960	cur = 0;
 
 
 
 
 
 
5961
5962	while (cur < array_size) {
5963		disk_key = (struct btrfs_disk_key *)ptr;
5964		btrfs_disk_key_to_cpu(&key, disk_key);
5965
5966		len = sizeof(*disk_key); ptr += len;
5967		sb_ptr += len;
5968		cur += len;
 
 
 
 
 
 
 
 
5969
5970		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5971			chunk = (struct btrfs_chunk *)sb_ptr;
5972			ret = read_one_chunk(root, &key, sb, chunk);
5973			if (ret)
5974				break;
5975			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5976			len = btrfs_chunk_item_size(num_stripes);
5977		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5978			ret = -EIO;
5979			break;
5980		}
5981		ptr += len;
5982		sb_ptr += len;
5983		cur += len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5984	}
5985	free_extent_buffer(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5986	return ret;
5987}
5988
5989int btrfs_read_chunk_tree(struct btrfs_root *root)
 
 
 
 
 
 
 
 
 
5990{
 
5991	struct btrfs_path *path;
5992	struct extent_buffer *leaf;
5993	struct btrfs_key key;
5994	struct btrfs_key found_key;
5995	int ret;
5996	int slot;
5997
5998	root = root->fs_info->chunk_root;
 
5999
6000	path = btrfs_alloc_path();
6001	if (!path)
6002		return -ENOMEM;
6003
 
 
 
 
6004	mutex_lock(&uuid_mutex);
6005	lock_chunks(root);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6006
6007	/*
6008	 * Read all device items, and then all the chunk items. All
6009	 * device items are found before any chunk item (their object id
6010	 * is smaller than the lowest possible object id for a chunk
6011	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6012	 */
6013	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6014	key.offset = 0;
6015	key.type = 0;
6016	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6017	if (ret < 0)
6018		goto error;
6019	while (1) {
6020		leaf = path->nodes[0];
6021		slot = path->slots[0];
6022		if (slot >= btrfs_header_nritems(leaf)) {
6023			ret = btrfs_next_leaf(root, path);
6024			if (ret == 0)
6025				continue;
6026			if (ret < 0)
6027				goto error;
6028			break;
6029		}
6030		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6031		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6032			struct btrfs_dev_item *dev_item;
6033			dev_item = btrfs_item_ptr(leaf, slot,
6034						  struct btrfs_dev_item);
6035			ret = read_one_dev(root, leaf, dev_item);
6036			if (ret)
6037				goto error;
 
6038		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6039			struct btrfs_chunk *chunk;
 
 
 
 
 
 
 
 
 
6040			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6041			ret = read_one_chunk(root, &found_key, leaf, chunk);
6042			if (ret)
6043				goto error;
6044		}
6045		path->slots[0]++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6046	}
6047	ret = 0;
6048error:
6049	unlock_chunks(root);
6050	mutex_unlock(&uuid_mutex);
6051
6052	btrfs_free_path(path);
6053	return ret;
6054}
6055
6056void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6057{
6058	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6059	struct btrfs_device *device;
 
 
 
6060
6061	mutex_lock(&fs_devices->device_list_mutex);
6062	list_for_each_entry(device, &fs_devices->devices, dev_list)
6063		device->dev_root = fs_info->dev_root;
 
 
 
 
 
 
 
 
 
 
 
6064	mutex_unlock(&fs_devices->device_list_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6065}
6066
6067static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
 
6068{
6069	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6070
6071	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6072		btrfs_dev_stat_reset(dev, i);
6073}
6074
6075int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6076{
6077	struct btrfs_key key;
6078	struct btrfs_key found_key;
6079	struct btrfs_root *dev_root = fs_info->dev_root;
6080	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6081	struct extent_buffer *eb;
6082	int slot;
6083	int ret = 0;
6084	struct btrfs_device *device;
6085	struct btrfs_path *path = NULL;
6086	int i;
6087
6088	path = btrfs_alloc_path();
6089	if (!path) {
6090		ret = -ENOMEM;
6091		goto out;
6092	}
6093
6094	mutex_lock(&fs_devices->device_list_mutex);
6095	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6096		int item_size;
6097		struct btrfs_dev_stats_item *ptr;
6098
6099		key.objectid = 0;
6100		key.type = BTRFS_DEV_STATS_KEY;
6101		key.offset = device->devid;
6102		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6103		if (ret) {
6104			__btrfs_reset_dev_stats(device);
6105			device->dev_stats_valid = 1;
6106			btrfs_release_path(path);
6107			continue;
6108		}
6109		slot = path->slots[0];
6110		eb = path->nodes[0];
6111		btrfs_item_key_to_cpu(eb, &found_key, slot);
6112		item_size = btrfs_item_size_nr(eb, slot);
6113
6114		ptr = btrfs_item_ptr(eb, slot,
6115				     struct btrfs_dev_stats_item);
6116
6117		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6118			if (item_size >= (1 + i) * sizeof(__le64))
6119				btrfs_dev_stat_set(device, i,
6120					btrfs_dev_stats_value(eb, ptr, i));
6121			else
6122				btrfs_dev_stat_reset(device, i);
6123		}
6124
6125		device->dev_stats_valid = 1;
6126		btrfs_dev_stat_print_on_load(device);
6127		btrfs_release_path(path);
6128	}
 
6129	mutex_unlock(&fs_devices->device_list_mutex);
6130
6131out:
6132	btrfs_free_path(path);
6133	return ret < 0 ? ret : 0;
6134}
6135
6136static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6137				struct btrfs_root *dev_root,
6138				struct btrfs_device *device)
6139{
 
 
6140	struct btrfs_path *path;
6141	struct btrfs_key key;
6142	struct extent_buffer *eb;
6143	struct btrfs_dev_stats_item *ptr;
6144	int ret;
6145	int i;
6146
6147	key.objectid = 0;
6148	key.type = BTRFS_DEV_STATS_KEY;
6149	key.offset = device->devid;
6150
6151	path = btrfs_alloc_path();
6152	BUG_ON(!path);
 
6153	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6154	if (ret < 0) {
6155		printk_in_rcu(KERN_WARNING "BTRFS: "
6156			"error %d while searching for dev_stats item for device %s!\n",
6157			      ret, rcu_str_deref(device->name));
6158		goto out;
6159	}
6160
6161	if (ret == 0 &&
6162	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6163		/* need to delete old one and insert a new one */
6164		ret = btrfs_del_item(trans, dev_root, path);
6165		if (ret != 0) {
6166			printk_in_rcu(KERN_WARNING "BTRFS: "
6167				"delete too small dev_stats item for device %s failed %d!\n",
6168				      rcu_str_deref(device->name), ret);
6169			goto out;
6170		}
6171		ret = 1;
6172	}
6173
6174	if (ret == 1) {
6175		/* need to insert a new item */
6176		btrfs_release_path(path);
6177		ret = btrfs_insert_empty_item(trans, dev_root, path,
6178					      &key, sizeof(*ptr));
6179		if (ret < 0) {
6180			printk_in_rcu(KERN_WARNING "BTRFS: "
6181					  "insert dev_stats item for device %s failed %d!\n",
6182				      rcu_str_deref(device->name), ret);
6183			goto out;
6184		}
6185	}
6186
6187	eb = path->nodes[0];
6188	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6189	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6190		btrfs_set_dev_stats_value(eb, ptr, i,
6191					  btrfs_dev_stat_read(device, i));
6192	btrfs_mark_buffer_dirty(eb);
6193
6194out:
6195	btrfs_free_path(path);
6196	return ret;
6197}
6198
6199/*
6200 * called from commit_transaction. Writes all changed device stats to disk.
6201 */
6202int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6203			struct btrfs_fs_info *fs_info)
6204{
6205	struct btrfs_root *dev_root = fs_info->dev_root;
6206	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6207	struct btrfs_device *device;
 
6208	int ret = 0;
6209
6210	mutex_lock(&fs_devices->device_list_mutex);
6211	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6212		if (!device->dev_stats_valid || !device->dev_stats_dirty)
 
6213			continue;
6214
6215		ret = update_dev_stat_item(trans, dev_root, device);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6216		if (!ret)
6217			device->dev_stats_dirty = 0;
6218	}
6219	mutex_unlock(&fs_devices->device_list_mutex);
6220
6221	return ret;
6222}
6223
6224void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6225{
6226	btrfs_dev_stat_inc(dev, index);
6227	btrfs_dev_stat_print_on_error(dev);
6228}
6229
6230static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6231{
6232	if (!dev->dev_stats_valid)
6233		return;
6234	printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
6235			   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6236			   rcu_str_deref(dev->name),
6237			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6238			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6239			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6240			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6241			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6242}
6243
6244static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6245{
6246	int i;
6247
6248	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6249		if (btrfs_dev_stat_read(dev, i) != 0)
6250			break;
6251	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6252		return; /* all values == 0, suppress message */
6253
6254	printk_in_rcu(KERN_INFO "BTRFS: "
6255		   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6256	       rcu_str_deref(dev->name),
6257	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6258	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6259	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6260	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6261	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6262}
6263
6264int btrfs_get_dev_stats(struct btrfs_root *root,
6265			struct btrfs_ioctl_get_dev_stats *stats)
6266{
 
6267	struct btrfs_device *dev;
6268	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6269	int i;
6270
6271	mutex_lock(&fs_devices->device_list_mutex);
6272	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
 
6273	mutex_unlock(&fs_devices->device_list_mutex);
6274
6275	if (!dev) {
6276		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6277		return -ENODEV;
6278	} else if (!dev->dev_stats_valid) {
6279		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6280		return -ENODEV;
6281	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6282		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6283			if (stats->nr_items > i)
6284				stats->values[i] =
6285					btrfs_dev_stat_read_and_reset(dev, i);
6286			else
6287				btrfs_dev_stat_reset(dev, i);
6288		}
 
 
6289	} else {
6290		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6291			if (stats->nr_items > i)
6292				stats->values[i] = btrfs_dev_stat_read(dev, i);
6293	}
6294	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6295		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6296	return 0;
6297}
6298
6299int btrfs_scratch_superblock(struct btrfs_device *device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6300{
6301	struct buffer_head *bh;
6302	struct btrfs_super_block *disk_super;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6303
6304	bh = btrfs_read_dev_super(device->bdev);
6305	if (!bh)
6306		return -EINVAL;
6307	disk_super = (struct btrfs_super_block *)bh->b_data;
6308
6309	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6310	set_buffer_dirty(bh);
6311	sync_dirty_buffer(bh);
6312	brelse(bh);
6313
6314	return 0;
6315}