Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18#include <linux/sched.h>
  19#include <linux/bio.h>
  20#include <linux/slab.h>
  21#include <linux/buffer_head.h>
  22#include <linux/blkdev.h>
  23#include <linux/random.h>
  24#include <linux/iocontext.h>
  25#include <linux/capability.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
  28#include <linux/raid/pq.h>
  29#include <linux/semaphore.h>
  30#include <asm/div64.h>
 
  31#include "ctree.h"
  32#include "extent_map.h"
  33#include "disk-io.h"
  34#include "transaction.h"
  35#include "print-tree.h"
  36#include "volumes.h"
  37#include "raid56.h"
  38#include "async-thread.h"
  39#include "check-integrity.h"
  40#include "rcu-string.h"
  41#include "math.h"
  42#include "dev-replace.h"
  43#include "sysfs.h"
  44
  45const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
  46	[BTRFS_RAID_RAID10] = {
  47		.sub_stripes	= 2,
  48		.dev_stripes	= 1,
  49		.devs_max	= 0,	/* 0 == as many as possible */
  50		.devs_min	= 4,
  51		.tolerated_failures = 1,
  52		.devs_increment	= 2,
  53		.ncopies	= 2,
  54	},
  55	[BTRFS_RAID_RAID1] = {
  56		.sub_stripes	= 1,
  57		.dev_stripes	= 1,
  58		.devs_max	= 2,
  59		.devs_min	= 2,
  60		.tolerated_failures = 1,
  61		.devs_increment	= 2,
  62		.ncopies	= 2,
  63	},
  64	[BTRFS_RAID_DUP] = {
  65		.sub_stripes	= 1,
  66		.dev_stripes	= 2,
  67		.devs_max	= 1,
  68		.devs_min	= 1,
  69		.tolerated_failures = 0,
  70		.devs_increment	= 1,
  71		.ncopies	= 2,
  72	},
  73	[BTRFS_RAID_RAID0] = {
  74		.sub_stripes	= 1,
  75		.dev_stripes	= 1,
  76		.devs_max	= 0,
  77		.devs_min	= 2,
  78		.tolerated_failures = 0,
  79		.devs_increment	= 1,
  80		.ncopies	= 1,
  81	},
  82	[BTRFS_RAID_SINGLE] = {
  83		.sub_stripes	= 1,
  84		.dev_stripes	= 1,
  85		.devs_max	= 1,
  86		.devs_min	= 1,
  87		.tolerated_failures = 0,
  88		.devs_increment	= 1,
  89		.ncopies	= 1,
  90	},
  91	[BTRFS_RAID_RAID5] = {
  92		.sub_stripes	= 1,
  93		.dev_stripes	= 1,
  94		.devs_max	= 0,
  95		.devs_min	= 2,
  96		.tolerated_failures = 1,
  97		.devs_increment	= 1,
  98		.ncopies	= 2,
  99	},
 100	[BTRFS_RAID_RAID6] = {
 101		.sub_stripes	= 1,
 102		.dev_stripes	= 1,
 103		.devs_max	= 0,
 104		.devs_min	= 3,
 105		.tolerated_failures = 2,
 106		.devs_increment	= 1,
 107		.ncopies	= 3,
 108	},
 109};
 110
 111const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
 112	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
 113	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
 114	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
 115	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
 116	[BTRFS_RAID_SINGLE] = 0,
 117	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
 118	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
 119};
 120
 121static int init_first_rw_device(struct btrfs_trans_handle *trans,
 122				struct btrfs_root *root,
 123				struct btrfs_device *device);
 124static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
 125static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
 126static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
 127static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 128static void btrfs_close_one_device(struct btrfs_device *device);
 129
 130DEFINE_MUTEX(uuid_mutex);
 131static LIST_HEAD(fs_uuids);
 132struct list_head *btrfs_get_fs_uuids(void)
 133{
 134	return &fs_uuids;
 135}
 136
 137static struct btrfs_fs_devices *__alloc_fs_devices(void)
 138{
 139	struct btrfs_fs_devices *fs_devs;
 140
 141	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
 142	if (!fs_devs)
 143		return ERR_PTR(-ENOMEM);
 144
 145	mutex_init(&fs_devs->device_list_mutex);
 146
 147	INIT_LIST_HEAD(&fs_devs->devices);
 148	INIT_LIST_HEAD(&fs_devs->resized_devices);
 149	INIT_LIST_HEAD(&fs_devs->alloc_list);
 150	INIT_LIST_HEAD(&fs_devs->list);
 151
 152	return fs_devs;
 153}
 154
 155/**
 156 * alloc_fs_devices - allocate struct btrfs_fs_devices
 157 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
 158 *		generated.
 159 *
 160 * Return: a pointer to a new &struct btrfs_fs_devices on success;
 161 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
 162 * can be destroyed with kfree() right away.
 163 */
 164static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
 165{
 166	struct btrfs_fs_devices *fs_devs;
 167
 168	fs_devs = __alloc_fs_devices();
 169	if (IS_ERR(fs_devs))
 170		return fs_devs;
 171
 172	if (fsid)
 173		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 174	else
 175		generate_random_uuid(fs_devs->fsid);
 176
 177	return fs_devs;
 178}
 179
 180static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
 181{
 182	struct btrfs_device *device;
 183	WARN_ON(fs_devices->opened);
 184	while (!list_empty(&fs_devices->devices)) {
 185		device = list_entry(fs_devices->devices.next,
 186				    struct btrfs_device, dev_list);
 187		list_del(&device->dev_list);
 188		rcu_string_free(device->name);
 189		kfree(device);
 190	}
 191	kfree(fs_devices);
 192}
 193
 194static void btrfs_kobject_uevent(struct block_device *bdev,
 195				 enum kobject_action action)
 196{
 197	int ret;
 198
 199	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
 200	if (ret)
 201		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
 202			action,
 203			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
 204			&disk_to_dev(bdev->bd_disk)->kobj);
 205}
 206
 207void btrfs_cleanup_fs_uuids(void)
 208{
 209	struct btrfs_fs_devices *fs_devices;
 210
 211	while (!list_empty(&fs_uuids)) {
 212		fs_devices = list_entry(fs_uuids.next,
 213					struct btrfs_fs_devices, list);
 214		list_del(&fs_devices->list);
 215		free_fs_devices(fs_devices);
 216	}
 217}
 218
 219static struct btrfs_device *__alloc_device(void)
 220{
 221	struct btrfs_device *dev;
 222
 223	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 224	if (!dev)
 225		return ERR_PTR(-ENOMEM);
 226
 227	INIT_LIST_HEAD(&dev->dev_list);
 228	INIT_LIST_HEAD(&dev->dev_alloc_list);
 229	INIT_LIST_HEAD(&dev->resized_list);
 230
 231	spin_lock_init(&dev->io_lock);
 232
 233	spin_lock_init(&dev->reada_lock);
 234	atomic_set(&dev->reada_in_flight, 0);
 235	atomic_set(&dev->dev_stats_ccnt, 0);
 236	btrfs_device_data_ordered_init(dev);
 237	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
 238	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
 239
 240	return dev;
 241}
 242
 243static noinline struct btrfs_device *__find_device(struct list_head *head,
 244						   u64 devid, u8 *uuid)
 245{
 246	struct btrfs_device *dev;
 247
 248	list_for_each_entry(dev, head, dev_list) {
 249		if (dev->devid == devid &&
 250		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
 251			return dev;
 252		}
 253	}
 254	return NULL;
 255}
 256
 257static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 258{
 259	struct btrfs_fs_devices *fs_devices;
 260
 261	list_for_each_entry(fs_devices, &fs_uuids, list) {
 262		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 263			return fs_devices;
 264	}
 265	return NULL;
 266}
 267
 268static int
 269btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
 270		      int flush, struct block_device **bdev,
 271		      struct buffer_head **bh)
 272{
 273	int ret;
 274
 275	*bdev = blkdev_get_by_path(device_path, flags, holder);
 276
 277	if (IS_ERR(*bdev)) {
 278		ret = PTR_ERR(*bdev);
 279		goto error;
 280	}
 281
 282	if (flush)
 283		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
 284	ret = set_blocksize(*bdev, 4096);
 285	if (ret) {
 286		blkdev_put(*bdev, flags);
 287		goto error;
 288	}
 289	invalidate_bdev(*bdev);
 290	*bh = btrfs_read_dev_super(*bdev);
 291	if (IS_ERR(*bh)) {
 292		ret = PTR_ERR(*bh);
 293		blkdev_put(*bdev, flags);
 294		goto error;
 295	}
 296
 297	return 0;
 298
 299error:
 300	*bdev = NULL;
 301	*bh = NULL;
 302	return ret;
 303}
 304
 305static void requeue_list(struct btrfs_pending_bios *pending_bios,
 306			struct bio *head, struct bio *tail)
 307{
 308
 309	struct bio *old_head;
 310
 311	old_head = pending_bios->head;
 312	pending_bios->head = head;
 313	if (pending_bios->tail)
 314		tail->bi_next = old_head;
 315	else
 316		pending_bios->tail = tail;
 317}
 318
 319/*
 320 * we try to collect pending bios for a device so we don't get a large
 321 * number of procs sending bios down to the same device.  This greatly
 322 * improves the schedulers ability to collect and merge the bios.
 323 *
 324 * But, it also turns into a long list of bios to process and that is sure
 325 * to eventually make the worker thread block.  The solution here is to
 326 * make some progress and then put this work struct back at the end of
 327 * the list if the block device is congested.  This way, multiple devices
 328 * can make progress from a single worker thread.
 329 */
 330static noinline void run_scheduled_bios(struct btrfs_device *device)
 331{
 332	struct bio *pending;
 333	struct backing_dev_info *bdi;
 334	struct btrfs_fs_info *fs_info;
 335	struct btrfs_pending_bios *pending_bios;
 336	struct bio *tail;
 337	struct bio *cur;
 338	int again = 0;
 339	unsigned long num_run;
 340	unsigned long batch_run = 0;
 341	unsigned long limit;
 342	unsigned long last_waited = 0;
 343	int force_reg = 0;
 344	int sync_pending = 0;
 345	struct blk_plug plug;
 346
 347	/*
 348	 * this function runs all the bios we've collected for
 349	 * a particular device.  We don't want to wander off to
 350	 * another device without first sending all of these down.
 351	 * So, setup a plug here and finish it off before we return
 352	 */
 353	blk_start_plug(&plug);
 354
 355	bdi = blk_get_backing_dev_info(device->bdev);
 356	fs_info = device->dev_root->fs_info;
 357	limit = btrfs_async_submit_limit(fs_info);
 358	limit = limit * 2 / 3;
 359
 360loop:
 361	spin_lock(&device->io_lock);
 362
 363loop_lock:
 364	num_run = 0;
 365
 366	/* take all the bios off the list at once and process them
 367	 * later on (without the lock held).  But, remember the
 368	 * tail and other pointers so the bios can be properly reinserted
 369	 * into the list if we hit congestion
 370	 */
 371	if (!force_reg && device->pending_sync_bios.head) {
 372		pending_bios = &device->pending_sync_bios;
 373		force_reg = 1;
 374	} else {
 375		pending_bios = &device->pending_bios;
 376		force_reg = 0;
 377	}
 378
 379	pending = pending_bios->head;
 380	tail = pending_bios->tail;
 381	WARN_ON(pending && !tail);
 382
 383	/*
 384	 * if pending was null this time around, no bios need processing
 385	 * at all and we can stop.  Otherwise it'll loop back up again
 386	 * and do an additional check so no bios are missed.
 387	 *
 388	 * device->running_pending is used to synchronize with the
 389	 * schedule_bio code.
 390	 */
 391	if (device->pending_sync_bios.head == NULL &&
 392	    device->pending_bios.head == NULL) {
 393		again = 0;
 394		device->running_pending = 0;
 395	} else {
 396		again = 1;
 397		device->running_pending = 1;
 398	}
 399
 400	pending_bios->head = NULL;
 401	pending_bios->tail = NULL;
 402
 403	spin_unlock(&device->io_lock);
 404
 405	while (pending) {
 406
 407		rmb();
 408		/* we want to work on both lists, but do more bios on the
 409		 * sync list than the regular list
 410		 */
 411		if ((num_run > 32 &&
 412		    pending_bios != &device->pending_sync_bios &&
 413		    device->pending_sync_bios.head) ||
 414		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
 415		    device->pending_bios.head)) {
 416			spin_lock(&device->io_lock);
 417			requeue_list(pending_bios, pending, tail);
 418			goto loop_lock;
 419		}
 420
 421		cur = pending;
 422		pending = pending->bi_next;
 423		cur->bi_next = NULL;
 
 424
 425		/*
 426		 * atomic_dec_return implies a barrier for waitqueue_active
 427		 */
 428		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
 429		    waitqueue_active(&fs_info->async_submit_wait))
 430			wake_up(&fs_info->async_submit_wait);
 431
 432		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
 433
 434		/*
 435		 * if we're doing the sync list, record that our
 436		 * plug has some sync requests on it
 437		 *
 438		 * If we're doing the regular list and there are
 439		 * sync requests sitting around, unplug before
 440		 * we add more
 441		 */
 442		if (pending_bios == &device->pending_sync_bios) {
 443			sync_pending = 1;
 444		} else if (sync_pending) {
 445			blk_finish_plug(&plug);
 446			blk_start_plug(&plug);
 447			sync_pending = 0;
 448		}
 449
 450		btrfsic_submit_bio(cur->bi_rw, cur);
 451		num_run++;
 452		batch_run++;
 453
 454		cond_resched();
 455
 456		/*
 457		 * we made progress, there is more work to do and the bdi
 458		 * is now congested.  Back off and let other work structs
 459		 * run instead
 460		 */
 461		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
 462		    fs_info->fs_devices->open_devices > 1) {
 463			struct io_context *ioc;
 464
 465			ioc = current->io_context;
 466
 467			/*
 468			 * the main goal here is that we don't want to
 469			 * block if we're going to be able to submit
 470			 * more requests without blocking.
 471			 *
 472			 * This code does two great things, it pokes into
 473			 * the elevator code from a filesystem _and_
 474			 * it makes assumptions about how batching works.
 475			 */
 476			if (ioc && ioc->nr_batch_requests > 0 &&
 477			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
 478			    (last_waited == 0 ||
 479			     ioc->last_waited == last_waited)) {
 480				/*
 481				 * we want to go through our batch of
 482				 * requests and stop.  So, we copy out
 483				 * the ioc->last_waited time and test
 484				 * against it before looping
 485				 */
 486				last_waited = ioc->last_waited;
 487				cond_resched();
 
 488				continue;
 489			}
 490			spin_lock(&device->io_lock);
 491			requeue_list(pending_bios, pending, tail);
 492			device->running_pending = 1;
 493
 494			spin_unlock(&device->io_lock);
 495			btrfs_queue_work(fs_info->submit_workers,
 496					 &device->work);
 497			goto done;
 498		}
 499		/* unplug every 64 requests just for good measure */
 500		if (batch_run % 64 == 0) {
 501			blk_finish_plug(&plug);
 502			blk_start_plug(&plug);
 503			sync_pending = 0;
 504		}
 505	}
 506
 507	cond_resched();
 508	if (again)
 509		goto loop;
 510
 511	spin_lock(&device->io_lock);
 512	if (device->pending_bios.head || device->pending_sync_bios.head)
 513		goto loop_lock;
 514	spin_unlock(&device->io_lock);
 515
 516done:
 517	blk_finish_plug(&plug);
 518}
 519
 520static void pending_bios_fn(struct btrfs_work *work)
 521{
 522	struct btrfs_device *device;
 523
 524	device = container_of(work, struct btrfs_device, work);
 525	run_scheduled_bios(device);
 526}
 527
 528
 529void btrfs_free_stale_device(struct btrfs_device *cur_dev)
 530{
 531	struct btrfs_fs_devices *fs_devs;
 532	struct btrfs_device *dev;
 533
 534	if (!cur_dev->name)
 535		return;
 536
 537	list_for_each_entry(fs_devs, &fs_uuids, list) {
 538		int del = 1;
 539
 540		if (fs_devs->opened)
 541			continue;
 542		if (fs_devs->seeding)
 543			continue;
 544
 545		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
 546
 547			if (dev == cur_dev)
 548				continue;
 549			if (!dev->name)
 550				continue;
 551
 552			/*
 553			 * Todo: This won't be enough. What if the same device
 554			 * comes back (with new uuid and) with its mapper path?
 555			 * But for now, this does help as mostly an admin will
 556			 * either use mapper or non mapper path throughout.
 557			 */
 558			rcu_read_lock();
 559			del = strcmp(rcu_str_deref(dev->name),
 560						rcu_str_deref(cur_dev->name));
 561			rcu_read_unlock();
 562			if (!del)
 563				break;
 564		}
 565
 566		if (!del) {
 567			/* delete the stale device */
 568			if (fs_devs->num_devices == 1) {
 569				btrfs_sysfs_remove_fsid(fs_devs);
 570				list_del(&fs_devs->list);
 571				free_fs_devices(fs_devs);
 572			} else {
 573				fs_devs->num_devices--;
 574				list_del(&dev->dev_list);
 575				rcu_string_free(dev->name);
 576				kfree(dev);
 577			}
 578			break;
 579		}
 580	}
 581}
 582
 583/*
 584 * Add new device to list of registered devices
 585 *
 586 * Returns:
 587 * 1   - first time device is seen
 588 * 0   - device already known
 589 * < 0 - error
 590 */
 591static noinline int device_list_add(const char *path,
 592			   struct btrfs_super_block *disk_super,
 593			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 594{
 595	struct btrfs_device *device;
 596	struct btrfs_fs_devices *fs_devices;
 597	struct rcu_string *name;
 598	int ret = 0;
 599	u64 found_transid = btrfs_super_generation(disk_super);
 600
 601	fs_devices = find_fsid(disk_super->fsid);
 602	if (!fs_devices) {
 603		fs_devices = alloc_fs_devices(disk_super->fsid);
 604		if (IS_ERR(fs_devices))
 605			return PTR_ERR(fs_devices);
 606
 
 607		list_add(&fs_devices->list, &fs_uuids);
 608
 
 
 
 609		device = NULL;
 610	} else {
 611		device = __find_device(&fs_devices->devices, devid,
 612				       disk_super->dev_item.uuid);
 613	}
 614
 615	if (!device) {
 616		if (fs_devices->opened)
 617			return -EBUSY;
 618
 619		device = btrfs_alloc_device(NULL, &devid,
 620					    disk_super->dev_item.uuid);
 621		if (IS_ERR(device)) {
 622			/* we can safely leave the fs_devices entry around */
 623			return PTR_ERR(device);
 624		}
 
 
 
 
 
 
 625
 626		name = rcu_string_strdup(path, GFP_NOFS);
 627		if (!name) {
 628			kfree(device);
 629			return -ENOMEM;
 630		}
 631		rcu_assign_pointer(device->name, name);
 
 
 
 
 
 
 
 
 
 632
 633		mutex_lock(&fs_devices->device_list_mutex);
 634		list_add_rcu(&device->dev_list, &fs_devices->devices);
 635		fs_devices->num_devices++;
 636		mutex_unlock(&fs_devices->device_list_mutex);
 637
 638		ret = 1;
 639		device->fs_devices = fs_devices;
 
 640	} else if (!device->name || strcmp(device->name->str, path)) {
 641		/*
 642		 * When FS is already mounted.
 643		 * 1. If you are here and if the device->name is NULL that
 644		 *    means this device was missing at time of FS mount.
 645		 * 2. If you are here and if the device->name is different
 646		 *    from 'path' that means either
 647		 *      a. The same device disappeared and reappeared with
 648		 *         different name. or
 649		 *      b. The missing-disk-which-was-replaced, has
 650		 *         reappeared now.
 651		 *
 652		 * We must allow 1 and 2a above. But 2b would be a spurious
 653		 * and unintentional.
 654		 *
 655		 * Further in case of 1 and 2a above, the disk at 'path'
 656		 * would have missed some transaction when it was away and
 657		 * in case of 2a the stale bdev has to be updated as well.
 658		 * 2b must not be allowed at all time.
 659		 */
 660
 661		/*
 662		 * For now, we do allow update to btrfs_fs_device through the
 663		 * btrfs dev scan cli after FS has been mounted.  We're still
 664		 * tracking a problem where systems fail mount by subvolume id
 665		 * when we reject replacement on a mounted FS.
 666		 */
 667		if (!fs_devices->opened && found_transid < device->generation) {
 668			/*
 669			 * That is if the FS is _not_ mounted and if you
 670			 * are here, that means there is more than one
 671			 * disk with same uuid and devid.We keep the one
 672			 * with larger generation number or the last-in if
 673			 * generation are equal.
 674			 */
 675			return -EEXIST;
 676		}
 677
 678		name = rcu_string_strdup(path, GFP_NOFS);
 679		if (!name)
 680			return -ENOMEM;
 681		rcu_string_free(device->name);
 682		rcu_assign_pointer(device->name, name);
 683		if (device->missing) {
 684			fs_devices->missing_devices--;
 685			device->missing = 0;
 686		}
 687	}
 688
 689	/*
 690	 * Unmount does not free the btrfs_device struct but would zero
 691	 * generation along with most of the other members. So just update
 692	 * it back. We need it to pick the disk with largest generation
 693	 * (as above).
 694	 */
 695	if (!fs_devices->opened)
 696		device->generation = found_transid;
 697
 698	/*
 699	 * if there is new btrfs on an already registered device,
 700	 * then remove the stale device entry.
 701	 */
 702	btrfs_free_stale_device(device);
 703
 704	*fs_devices_ret = fs_devices;
 705
 706	return ret;
 707}
 708
 709static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 710{
 711	struct btrfs_fs_devices *fs_devices;
 712	struct btrfs_device *device;
 713	struct btrfs_device *orig_dev;
 714
 715	fs_devices = alloc_fs_devices(orig->fsid);
 716	if (IS_ERR(fs_devices))
 717		return fs_devices;
 718
 719	mutex_lock(&orig->device_list_mutex);
 720	fs_devices->total_devices = orig->total_devices;
 
 
 
 
 
 721
 722	/* We have held the volume lock, it is safe to get the devices. */
 723	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 724		struct rcu_string *name;
 725
 726		device = btrfs_alloc_device(NULL, &orig_dev->devid,
 727					    orig_dev->uuid);
 728		if (IS_ERR(device))
 729			goto error;
 730
 731		/*
 732		 * This is ok to do without rcu read locked because we hold the
 733		 * uuid mutex so nothing we touch in here is going to disappear.
 734		 */
 735		if (orig_dev->name) {
 736			name = rcu_string_strdup(orig_dev->name->str,
 737					GFP_KERNEL);
 738			if (!name) {
 739				kfree(device);
 740				goto error;
 741			}
 742			rcu_assign_pointer(device->name, name);
 743		}
 
 
 
 
 
 
 
 
 744
 745		list_add(&device->dev_list, &fs_devices->devices);
 746		device->fs_devices = fs_devices;
 747		fs_devices->num_devices++;
 748	}
 749	mutex_unlock(&orig->device_list_mutex);
 750	return fs_devices;
 751error:
 752	mutex_unlock(&orig->device_list_mutex);
 753	free_fs_devices(fs_devices);
 754	return ERR_PTR(-ENOMEM);
 755}
 756
 757void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
 758{
 759	struct btrfs_device *device, *next;
 760	struct btrfs_device *latest_dev = NULL;
 
 
 
 761
 762	mutex_lock(&uuid_mutex);
 763again:
 764	/* This is the initialized path, it is safe to release the devices. */
 765	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
 766		if (device->in_fs_metadata) {
 767			if (!device->is_tgtdev_for_dev_replace &&
 768			    (!latest_dev ||
 769			     device->generation > latest_dev->generation)) {
 770				latest_dev = device;
 
 771			}
 772			continue;
 773		}
 774
 775		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
 776			/*
 777			 * In the first step, keep the device which has
 778			 * the correct fsid and the devid that is used
 779			 * for the dev_replace procedure.
 780			 * In the second step, the dev_replace state is
 781			 * read from the device tree and it is known
 782			 * whether the procedure is really active or
 783			 * not, which means whether this device is
 784			 * used or whether it should be removed.
 785			 */
 786			if (step == 0 || device->is_tgtdev_for_dev_replace) {
 787				continue;
 788			}
 789		}
 790		if (device->bdev) {
 791			blkdev_put(device->bdev, device->mode);
 792			device->bdev = NULL;
 793			fs_devices->open_devices--;
 794		}
 795		if (device->writeable) {
 796			list_del_init(&device->dev_alloc_list);
 797			device->writeable = 0;
 798			if (!device->is_tgtdev_for_dev_replace)
 799				fs_devices->rw_devices--;
 800		}
 801		list_del_init(&device->dev_list);
 802		fs_devices->num_devices--;
 803		rcu_string_free(device->name);
 804		kfree(device);
 805	}
 806
 807	if (fs_devices->seed) {
 808		fs_devices = fs_devices->seed;
 809		goto again;
 810	}
 811
 812	fs_devices->latest_bdev = latest_dev->bdev;
 
 
 813
 814	mutex_unlock(&uuid_mutex);
 815}
 816
 817static void __free_device(struct work_struct *work)
 818{
 819	struct btrfs_device *device;
 820
 821	device = container_of(work, struct btrfs_device, rcu_work);
 822
 823	if (device->bdev)
 824		blkdev_put(device->bdev, device->mode);
 825
 826	rcu_string_free(device->name);
 827	kfree(device);
 828}
 829
 830static void free_device(struct rcu_head *head)
 831{
 832	struct btrfs_device *device;
 833
 834	device = container_of(head, struct btrfs_device, rcu);
 835
 836	INIT_WORK(&device->rcu_work, __free_device);
 837	schedule_work(&device->rcu_work);
 838}
 839
 840static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 841{
 842	struct btrfs_device *device, *tmp;
 843
 844	if (--fs_devices->opened > 0)
 845		return 0;
 846
 847	mutex_lock(&fs_devices->device_list_mutex);
 848	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
 849		btrfs_close_one_device(device);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850	}
 851	mutex_unlock(&fs_devices->device_list_mutex);
 852
 853	WARN_ON(fs_devices->open_devices);
 854	WARN_ON(fs_devices->rw_devices);
 855	fs_devices->opened = 0;
 856	fs_devices->seeding = 0;
 857
 858	return 0;
 859}
 860
 861int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 862{
 863	struct btrfs_fs_devices *seed_devices = NULL;
 864	int ret;
 865
 866	mutex_lock(&uuid_mutex);
 867	ret = __btrfs_close_devices(fs_devices);
 868	if (!fs_devices->opened) {
 869		seed_devices = fs_devices->seed;
 870		fs_devices->seed = NULL;
 871	}
 872	mutex_unlock(&uuid_mutex);
 873
 874	while (seed_devices) {
 875		fs_devices = seed_devices;
 876		seed_devices = fs_devices->seed;
 877		__btrfs_close_devices(fs_devices);
 878		free_fs_devices(fs_devices);
 879	}
 880	/*
 881	 * Wait for rcu kworkers under __btrfs_close_devices
 882	 * to finish all blkdev_puts so device is really
 883	 * free when umount is done.
 884	 */
 885	rcu_barrier();
 886	return ret;
 887}
 888
 889static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 890				fmode_t flags, void *holder)
 891{
 892	struct request_queue *q;
 893	struct block_device *bdev;
 894	struct list_head *head = &fs_devices->devices;
 895	struct btrfs_device *device;
 896	struct btrfs_device *latest_dev = NULL;
 897	struct buffer_head *bh;
 898	struct btrfs_super_block *disk_super;
 
 
 899	u64 devid;
 900	int seeding = 1;
 901	int ret = 0;
 902
 903	flags |= FMODE_EXCL;
 904
 905	list_for_each_entry(device, head, dev_list) {
 906		if (device->bdev)
 907			continue;
 908		if (!device->name)
 909			continue;
 910
 911		/* Just open everything we can; ignore failures here */
 912		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
 913					    &bdev, &bh))
 914			continue;
 
 
 
 
 
 
 
 
 915
 916		disk_super = (struct btrfs_super_block *)bh->b_data;
 917		devid = btrfs_stack_device_id(&disk_super->dev_item);
 918		if (devid != device->devid)
 919			goto error_brelse;
 920
 921		if (memcmp(device->uuid, disk_super->dev_item.uuid,
 922			   BTRFS_UUID_SIZE))
 923			goto error_brelse;
 924
 925		device->generation = btrfs_super_generation(disk_super);
 926		if (!latest_dev ||
 927		    device->generation > latest_dev->generation)
 928			latest_dev = device;
 
 
 929
 930		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 931			device->writeable = 0;
 932		} else {
 933			device->writeable = !bdev_read_only(bdev);
 934			seeding = 0;
 935		}
 936
 937		q = bdev_get_queue(bdev);
 938		if (blk_queue_discard(q))
 939			device->can_discard = 1;
 
 
 940
 941		device->bdev = bdev;
 942		device->in_fs_metadata = 0;
 943		device->mode = flags;
 944
 945		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
 946			fs_devices->rotating = 1;
 947
 948		fs_devices->open_devices++;
 949		if (device->writeable &&
 950		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
 951			fs_devices->rw_devices++;
 952			list_add(&device->dev_alloc_list,
 953				 &fs_devices->alloc_list);
 954		}
 955		brelse(bh);
 956		continue;
 957
 958error_brelse:
 959		brelse(bh);
 
 960		blkdev_put(bdev, flags);
 
 961		continue;
 962	}
 963	if (fs_devices->open_devices == 0) {
 964		ret = -EINVAL;
 965		goto out;
 966	}
 967	fs_devices->seeding = seeding;
 968	fs_devices->opened = 1;
 969	fs_devices->latest_bdev = latest_dev->bdev;
 
 
 970	fs_devices->total_rw_bytes = 0;
 971out:
 972	return ret;
 973}
 974
 975int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 976		       fmode_t flags, void *holder)
 977{
 978	int ret;
 979
 980	mutex_lock(&uuid_mutex);
 981	if (fs_devices->opened) {
 982		fs_devices->opened++;
 983		ret = 0;
 984	} else {
 985		ret = __btrfs_open_devices(fs_devices, flags, holder);
 986	}
 987	mutex_unlock(&uuid_mutex);
 988	return ret;
 989}
 990
 991/*
 992 * Look for a btrfs signature on a device. This may be called out of the mount path
 993 * and we are not allowed to call set_blocksize during the scan. The superblock
 994 * is read via pagecache
 995 */
 996int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 997			  struct btrfs_fs_devices **fs_devices_ret)
 998{
 999	struct btrfs_super_block *disk_super;
1000	struct block_device *bdev;
1001	struct page *page;
1002	void *p;
1003	int ret = -EINVAL;
1004	u64 devid;
1005	u64 transid;
1006	u64 total_devices;
1007	u64 bytenr;
1008	pgoff_t index;
1009
1010	/*
1011	 * we would like to check all the supers, but that would make
1012	 * a btrfs mount succeed after a mkfs from a different FS.
1013	 * So, we need to add a special mount option to scan for
1014	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1015	 */
1016	bytenr = btrfs_sb_offset(0);
1017	flags |= FMODE_EXCL;
1018	mutex_lock(&uuid_mutex);
1019
1020	bdev = blkdev_get_by_path(path, flags, holder);
1021
1022	if (IS_ERR(bdev)) {
1023		ret = PTR_ERR(bdev);
1024		goto error;
1025	}
1026
1027	/* make sure our super fits in the device */
1028	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1029		goto error_bdev_put;
1030
1031	/* make sure our super fits in the page */
1032	if (sizeof(*disk_super) > PAGE_SIZE)
1033		goto error_bdev_put;
1034
1035	/* make sure our super doesn't straddle pages on disk */
1036	index = bytenr >> PAGE_SHIFT;
1037	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1038		goto error_bdev_put;
1039
1040	/* pull in the page with our super */
1041	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1042				   index, GFP_NOFS);
1043
1044	if (IS_ERR_OR_NULL(page))
1045		goto error_bdev_put;
1046
1047	p = kmap(page);
1048
1049	/* align our pointer to the offset of the super block */
1050	disk_super = p + (bytenr & ~PAGE_MASK);
1051
1052	if (btrfs_super_bytenr(disk_super) != bytenr ||
1053	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
1054		goto error_unmap;
1055
1056	devid = btrfs_stack_device_id(&disk_super->dev_item);
1057	transid = btrfs_super_generation(disk_super);
1058	total_devices = btrfs_super_num_devices(disk_super);
1059
 
 
 
 
1060	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1061	if (ret > 0) {
1062		if (disk_super->label[0]) {
1063			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
1064				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
1065			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1066		} else {
1067			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1068		}
1069
1070		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1071		ret = 0;
1072	}
1073	if (!ret && fs_devices_ret)
1074		(*fs_devices_ret)->total_devices = total_devices;
1075
1076error_unmap:
1077	kunmap(page);
1078	put_page(page);
1079
1080error_bdev_put:
 
 
1081	blkdev_put(bdev, flags);
1082error:
1083	mutex_unlock(&uuid_mutex);
1084	return ret;
1085}
1086
1087/* helper to account the used device space in the range */
1088int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1089				   u64 end, u64 *length)
1090{
1091	struct btrfs_key key;
1092	struct btrfs_root *root = device->dev_root;
1093	struct btrfs_dev_extent *dev_extent;
1094	struct btrfs_path *path;
1095	u64 extent_end;
1096	int ret;
1097	int slot;
1098	struct extent_buffer *l;
1099
1100	*length = 0;
1101
1102	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1103		return 0;
1104
1105	path = btrfs_alloc_path();
1106	if (!path)
1107		return -ENOMEM;
1108	path->reada = READA_FORWARD;
1109
1110	key.objectid = device->devid;
1111	key.offset = start;
1112	key.type = BTRFS_DEV_EXTENT_KEY;
1113
1114	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1115	if (ret < 0)
1116		goto out;
1117	if (ret > 0) {
1118		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1119		if (ret < 0)
1120			goto out;
1121	}
1122
1123	while (1) {
1124		l = path->nodes[0];
1125		slot = path->slots[0];
1126		if (slot >= btrfs_header_nritems(l)) {
1127			ret = btrfs_next_leaf(root, path);
1128			if (ret == 0)
1129				continue;
1130			if (ret < 0)
1131				goto out;
1132
1133			break;
1134		}
1135		btrfs_item_key_to_cpu(l, &key, slot);
1136
1137		if (key.objectid < device->devid)
1138			goto next;
1139
1140		if (key.objectid > device->devid)
1141			break;
1142
1143		if (key.type != BTRFS_DEV_EXTENT_KEY)
1144			goto next;
1145
1146		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1147		extent_end = key.offset + btrfs_dev_extent_length(l,
1148								  dev_extent);
1149		if (key.offset <= start && extent_end > end) {
1150			*length = end - start + 1;
1151			break;
1152		} else if (key.offset <= start && extent_end > start)
1153			*length += extent_end - start;
1154		else if (key.offset > start && extent_end <= end)
1155			*length += extent_end - key.offset;
1156		else if (key.offset > start && key.offset <= end) {
1157			*length += end - key.offset + 1;
1158			break;
1159		} else if (key.offset > end)
1160			break;
1161
1162next:
1163		path->slots[0]++;
1164	}
1165	ret = 0;
1166out:
1167	btrfs_free_path(path);
1168	return ret;
1169}
1170
1171static int contains_pending_extent(struct btrfs_transaction *transaction,
1172				   struct btrfs_device *device,
1173				   u64 *start, u64 len)
1174{
1175	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1176	struct extent_map *em;
1177	struct list_head *search_list = &fs_info->pinned_chunks;
1178	int ret = 0;
1179	u64 physical_start = *start;
1180
1181	if (transaction)
1182		search_list = &transaction->pending_chunks;
1183again:
1184	list_for_each_entry(em, search_list, list) {
1185		struct map_lookup *map;
1186		int i;
1187
1188		map = em->map_lookup;
1189		for (i = 0; i < map->num_stripes; i++) {
1190			u64 end;
1191
1192			if (map->stripes[i].dev != device)
1193				continue;
1194			if (map->stripes[i].physical >= physical_start + len ||
1195			    map->stripes[i].physical + em->orig_block_len <=
1196			    physical_start)
1197				continue;
1198			/*
1199			 * Make sure that while processing the pinned list we do
1200			 * not override our *start with a lower value, because
1201			 * we can have pinned chunks that fall within this
1202			 * device hole and that have lower physical addresses
1203			 * than the pending chunks we processed before. If we
1204			 * do not take this special care we can end up getting
1205			 * 2 pending chunks that start at the same physical
1206			 * device offsets because the end offset of a pinned
1207			 * chunk can be equal to the start offset of some
1208			 * pending chunk.
1209			 */
1210			end = map->stripes[i].physical + em->orig_block_len;
1211			if (end > *start) {
1212				*start = end;
1213				ret = 1;
1214			}
1215		}
1216	}
1217	if (search_list != &fs_info->pinned_chunks) {
1218		search_list = &fs_info->pinned_chunks;
1219		goto again;
1220	}
1221
1222	return ret;
1223}
1224
1225
1226/*
1227 * find_free_dev_extent_start - find free space in the specified device
1228 * @device:	  the device which we search the free space in
1229 * @num_bytes:	  the size of the free space that we need
1230 * @search_start: the position from which to begin the search
1231 * @start:	  store the start of the free space.
1232 * @len:	  the size of the free space. that we find, or the size
1233 *		  of the max free space if we don't find suitable free space
1234 *
1235 * this uses a pretty simple search, the expectation is that it is
1236 * called very infrequently and that a given device has a small number
1237 * of extents
1238 *
1239 * @start is used to store the start of the free space if we find. But if we
1240 * don't find suitable free space, it will be used to store the start position
1241 * of the max free space.
1242 *
1243 * @len is used to store the size of the free space that we find.
1244 * But if we don't find suitable free space, it is used to store the size of
1245 * the max free space.
1246 */
1247int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1248			       struct btrfs_device *device, u64 num_bytes,
1249			       u64 search_start, u64 *start, u64 *len)
1250{
1251	struct btrfs_key key;
1252	struct btrfs_root *root = device->dev_root;
1253	struct btrfs_dev_extent *dev_extent;
1254	struct btrfs_path *path;
1255	u64 hole_size;
1256	u64 max_hole_start;
1257	u64 max_hole_size;
1258	u64 extent_end;
 
1259	u64 search_end = device->total_bytes;
1260	int ret;
1261	int slot;
1262	struct extent_buffer *l;
1263	u64 min_search_start;
1264
1265	/*
1266	 * We don't want to overwrite the superblock on the drive nor any area
1267	 * used by the boot loader (grub for example), so we make sure to start
1268	 * at an offset of at least 1MB.
1269	 */
1270	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1271	search_start = max(search_start, min_search_start);
1272
1273	path = btrfs_alloc_path();
1274	if (!path)
1275		return -ENOMEM;
 
1276
1277	max_hole_start = search_start;
1278	max_hole_size = 0;
 
1279
1280again:
1281	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1282		ret = -ENOSPC;
1283		goto out;
1284	}
1285
1286	path->reada = READA_FORWARD;
1287	path->search_commit_root = 1;
1288	path->skip_locking = 1;
 
 
 
1289
1290	key.objectid = device->devid;
1291	key.offset = search_start;
1292	key.type = BTRFS_DEV_EXTENT_KEY;
1293
1294	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1295	if (ret < 0)
1296		goto out;
1297	if (ret > 0) {
1298		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1299		if (ret < 0)
1300			goto out;
1301	}
1302
1303	while (1) {
1304		l = path->nodes[0];
1305		slot = path->slots[0];
1306		if (slot >= btrfs_header_nritems(l)) {
1307			ret = btrfs_next_leaf(root, path);
1308			if (ret == 0)
1309				continue;
1310			if (ret < 0)
1311				goto out;
1312
1313			break;
1314		}
1315		btrfs_item_key_to_cpu(l, &key, slot);
1316
1317		if (key.objectid < device->devid)
1318			goto next;
1319
1320		if (key.objectid > device->devid)
1321			break;
1322
1323		if (key.type != BTRFS_DEV_EXTENT_KEY)
1324			goto next;
1325
1326		if (key.offset > search_start) {
1327			hole_size = key.offset - search_start;
1328
1329			/*
1330			 * Have to check before we set max_hole_start, otherwise
1331			 * we could end up sending back this offset anyway.
1332			 */
1333			if (contains_pending_extent(transaction, device,
1334						    &search_start,
1335						    hole_size)) {
1336				if (key.offset >= search_start) {
1337					hole_size = key.offset - search_start;
1338				} else {
1339					WARN_ON_ONCE(1);
1340					hole_size = 0;
1341				}
1342			}
1343
1344			if (hole_size > max_hole_size) {
1345				max_hole_start = search_start;
1346				max_hole_size = hole_size;
1347			}
1348
1349			/*
1350			 * If this free space is greater than which we need,
1351			 * it must be the max free space that we have found
1352			 * until now, so max_hole_start must point to the start
1353			 * of this free space and the length of this free space
1354			 * is stored in max_hole_size. Thus, we return
1355			 * max_hole_start and max_hole_size and go back to the
1356			 * caller.
1357			 */
1358			if (hole_size >= num_bytes) {
1359				ret = 0;
1360				goto out;
1361			}
1362		}
1363
1364		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1365		extent_end = key.offset + btrfs_dev_extent_length(l,
1366								  dev_extent);
1367		if (extent_end > search_start)
1368			search_start = extent_end;
1369next:
1370		path->slots[0]++;
1371		cond_resched();
1372	}
1373
1374	/*
1375	 * At this point, search_start should be the end of
1376	 * allocated dev extents, and when shrinking the device,
1377	 * search_end may be smaller than search_start.
1378	 */
1379	if (search_end > search_start) {
1380		hole_size = search_end - search_start;
1381
1382		if (contains_pending_extent(transaction, device, &search_start,
1383					    hole_size)) {
1384			btrfs_release_path(path);
1385			goto again;
1386		}
1387
1388		if (hole_size > max_hole_size) {
1389			max_hole_start = search_start;
1390			max_hole_size = hole_size;
1391		}
1392	}
1393
1394	/* See above. */
1395	if (max_hole_size < num_bytes)
1396		ret = -ENOSPC;
1397	else
1398		ret = 0;
1399
1400out:
1401	btrfs_free_path(path);
 
1402	*start = max_hole_start;
1403	if (len)
1404		*len = max_hole_size;
1405	return ret;
1406}
1407
1408int find_free_dev_extent(struct btrfs_trans_handle *trans,
1409			 struct btrfs_device *device, u64 num_bytes,
1410			 u64 *start, u64 *len)
1411{
1412	/* FIXME use last free of some kind */
1413	return find_free_dev_extent_start(trans->transaction, device,
1414					  num_bytes, 0, start, len);
1415}
1416
1417static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1418			  struct btrfs_device *device,
1419			  u64 start, u64 *dev_extent_len)
1420{
1421	int ret;
1422	struct btrfs_path *path;
1423	struct btrfs_root *root = device->dev_root;
1424	struct btrfs_key key;
1425	struct btrfs_key found_key;
1426	struct extent_buffer *leaf = NULL;
1427	struct btrfs_dev_extent *extent = NULL;
1428
1429	path = btrfs_alloc_path();
1430	if (!path)
1431		return -ENOMEM;
1432
1433	key.objectid = device->devid;
1434	key.offset = start;
1435	key.type = BTRFS_DEV_EXTENT_KEY;
1436again:
1437	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1438	if (ret > 0) {
1439		ret = btrfs_previous_item(root, path, key.objectid,
1440					  BTRFS_DEV_EXTENT_KEY);
1441		if (ret)
1442			goto out;
1443		leaf = path->nodes[0];
1444		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1445		extent = btrfs_item_ptr(leaf, path->slots[0],
1446					struct btrfs_dev_extent);
1447		BUG_ON(found_key.offset > start || found_key.offset +
1448		       btrfs_dev_extent_length(leaf, extent) < start);
1449		key = found_key;
1450		btrfs_release_path(path);
1451		goto again;
1452	} else if (ret == 0) {
1453		leaf = path->nodes[0];
1454		extent = btrfs_item_ptr(leaf, path->slots[0],
1455					struct btrfs_dev_extent);
1456	} else {
1457		btrfs_std_error(root->fs_info, ret, "Slot search failed");
1458		goto out;
1459	}
1460
1461	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1462
 
 
 
 
 
1463	ret = btrfs_del_item(trans, root, path);
1464	if (ret) {
1465		btrfs_std_error(root->fs_info, ret,
1466			    "Failed to remove dev extent item");
1467	} else {
1468		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1469	}
1470out:
1471	btrfs_free_path(path);
1472	return ret;
1473}
1474
1475static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1476				  struct btrfs_device *device,
1477				  u64 chunk_tree, u64 chunk_objectid,
1478				  u64 chunk_offset, u64 start, u64 num_bytes)
1479{
1480	int ret;
1481	struct btrfs_path *path;
1482	struct btrfs_root *root = device->dev_root;
1483	struct btrfs_dev_extent *extent;
1484	struct extent_buffer *leaf;
1485	struct btrfs_key key;
1486
1487	WARN_ON(!device->in_fs_metadata);
1488	WARN_ON(device->is_tgtdev_for_dev_replace);
1489	path = btrfs_alloc_path();
1490	if (!path)
1491		return -ENOMEM;
1492
1493	key.objectid = device->devid;
1494	key.offset = start;
1495	key.type = BTRFS_DEV_EXTENT_KEY;
1496	ret = btrfs_insert_empty_item(trans, root, path, &key,
1497				      sizeof(*extent));
1498	if (ret)
1499		goto out;
1500
1501	leaf = path->nodes[0];
1502	extent = btrfs_item_ptr(leaf, path->slots[0],
1503				struct btrfs_dev_extent);
1504	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1505	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1506	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1507
1508	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1509		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
 
1510
1511	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1512	btrfs_mark_buffer_dirty(leaf);
1513out:
1514	btrfs_free_path(path);
1515	return ret;
1516}
1517
1518static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
 
1519{
1520	struct extent_map_tree *em_tree;
1521	struct extent_map *em;
1522	struct rb_node *n;
1523	u64 ret = 0;
 
1524
1525	em_tree = &fs_info->mapping_tree.map_tree;
1526	read_lock(&em_tree->lock);
1527	n = rb_last(&em_tree->map);
1528	if (n) {
1529		em = rb_entry(n, struct extent_map, rb_node);
1530		ret = em->start + em->len;
1531	}
1532	read_unlock(&em_tree->lock);
1533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1534	return ret;
1535}
1536
1537static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1538				    u64 *devid_ret)
1539{
1540	int ret;
1541	struct btrfs_key key;
1542	struct btrfs_key found_key;
1543	struct btrfs_path *path;
1544
 
 
1545	path = btrfs_alloc_path();
1546	if (!path)
1547		return -ENOMEM;
1548
1549	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1550	key.type = BTRFS_DEV_ITEM_KEY;
1551	key.offset = (u64)-1;
1552
1553	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1554	if (ret < 0)
1555		goto error;
1556
1557	BUG_ON(ret == 0); /* Corruption */
1558
1559	ret = btrfs_previous_item(fs_info->chunk_root, path,
1560				  BTRFS_DEV_ITEMS_OBJECTID,
1561				  BTRFS_DEV_ITEM_KEY);
1562	if (ret) {
1563		*devid_ret = 1;
1564	} else {
1565		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1566				      path->slots[0]);
1567		*devid_ret = found_key.offset + 1;
1568	}
1569	ret = 0;
1570error:
1571	btrfs_free_path(path);
1572	return ret;
1573}
1574
1575/*
1576 * the device information is stored in the chunk root
1577 * the btrfs_device struct should be fully filled in
1578 */
1579static int btrfs_add_device(struct btrfs_trans_handle *trans,
1580			    struct btrfs_root *root,
1581			    struct btrfs_device *device)
1582{
1583	int ret;
1584	struct btrfs_path *path;
1585	struct btrfs_dev_item *dev_item;
1586	struct extent_buffer *leaf;
1587	struct btrfs_key key;
1588	unsigned long ptr;
1589
1590	root = root->fs_info->chunk_root;
1591
1592	path = btrfs_alloc_path();
1593	if (!path)
1594		return -ENOMEM;
1595
1596	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1597	key.type = BTRFS_DEV_ITEM_KEY;
1598	key.offset = device->devid;
1599
1600	ret = btrfs_insert_empty_item(trans, root, path, &key,
1601				      sizeof(*dev_item));
1602	if (ret)
1603		goto out;
1604
1605	leaf = path->nodes[0];
1606	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1607
1608	btrfs_set_device_id(leaf, dev_item, device->devid);
1609	btrfs_set_device_generation(leaf, dev_item, 0);
1610	btrfs_set_device_type(leaf, dev_item, device->type);
1611	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1612	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1613	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1614	btrfs_set_device_total_bytes(leaf, dev_item,
1615				     btrfs_device_get_disk_total_bytes(device));
1616	btrfs_set_device_bytes_used(leaf, dev_item,
1617				    btrfs_device_get_bytes_used(device));
1618	btrfs_set_device_group(leaf, dev_item, 0);
1619	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1620	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1621	btrfs_set_device_start_offset(leaf, dev_item, 0);
1622
1623	ptr = btrfs_device_uuid(dev_item);
1624	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1625	ptr = btrfs_device_fsid(dev_item);
1626	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1627	btrfs_mark_buffer_dirty(leaf);
1628
1629	ret = 0;
1630out:
1631	btrfs_free_path(path);
1632	return ret;
1633}
1634
1635/*
1636 * Function to update ctime/mtime for a given device path.
1637 * Mainly used for ctime/mtime based probe like libblkid.
1638 */
1639static void update_dev_time(char *path_name)
1640{
1641	struct file *filp;
1642
1643	filp = filp_open(path_name, O_RDWR, 0);
1644	if (IS_ERR(filp))
1645		return;
1646	file_update_time(filp);
1647	filp_close(filp, NULL);
1648}
1649
1650static int btrfs_rm_dev_item(struct btrfs_root *root,
1651			     struct btrfs_device *device)
1652{
1653	int ret;
1654	struct btrfs_path *path;
1655	struct btrfs_key key;
1656	struct btrfs_trans_handle *trans;
1657
1658	root = root->fs_info->chunk_root;
1659
1660	path = btrfs_alloc_path();
1661	if (!path)
1662		return -ENOMEM;
1663
1664	trans = btrfs_start_transaction(root, 0);
1665	if (IS_ERR(trans)) {
1666		btrfs_free_path(path);
1667		return PTR_ERR(trans);
1668	}
1669	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1670	key.type = BTRFS_DEV_ITEM_KEY;
1671	key.offset = device->devid;
 
1672
1673	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1674	if (ret < 0)
1675		goto out;
1676
1677	if (ret > 0) {
1678		ret = -ENOENT;
1679		goto out;
1680	}
1681
1682	ret = btrfs_del_item(trans, root, path);
1683	if (ret)
1684		goto out;
1685out:
1686	btrfs_free_path(path);
 
1687	btrfs_commit_transaction(trans, root);
1688	return ret;
1689}
1690
1691int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1692{
1693	struct btrfs_device *device;
1694	struct btrfs_device *next_device;
1695	struct block_device *bdev;
1696	struct buffer_head *bh = NULL;
1697	struct btrfs_super_block *disk_super;
1698	struct btrfs_fs_devices *cur_devices;
1699	u64 all_avail;
1700	u64 devid;
1701	u64 num_devices;
1702	u8 *dev_uuid;
1703	unsigned seq;
1704	int ret = 0;
1705	bool clear_super = false;
1706
1707	mutex_lock(&uuid_mutex);
1708
1709	do {
1710		seq = read_seqbegin(&root->fs_info->profiles_lock);
1711
1712		all_avail = root->fs_info->avail_data_alloc_bits |
1713			    root->fs_info->avail_system_alloc_bits |
1714			    root->fs_info->avail_metadata_alloc_bits;
1715	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1716
1717	num_devices = root->fs_info->fs_devices->num_devices;
1718	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1719	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1720		WARN_ON(num_devices < 1);
1721		num_devices--;
1722	}
1723	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1724
1725	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1726		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1727		goto out;
1728	}
1729
1730	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1731		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1732		goto out;
1733	}
1734
1735	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1736	    root->fs_info->fs_devices->rw_devices <= 2) {
1737		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1738		goto out;
1739	}
1740	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1741	    root->fs_info->fs_devices->rw_devices <= 3) {
1742		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1743		goto out;
1744	}
1745
1746	if (strcmp(device_path, "missing") == 0) {
1747		struct list_head *devices;
1748		struct btrfs_device *tmp;
1749
1750		device = NULL;
1751		devices = &root->fs_info->fs_devices->devices;
1752		/*
1753		 * It is safe to read the devices since the volume_mutex
1754		 * is held.
1755		 */
1756		list_for_each_entry(tmp, devices, dev_list) {
1757			if (tmp->in_fs_metadata &&
1758			    !tmp->is_tgtdev_for_dev_replace &&
1759			    !tmp->bdev) {
1760				device = tmp;
1761				break;
1762			}
1763		}
1764		bdev = NULL;
1765		bh = NULL;
1766		disk_super = NULL;
1767		if (!device) {
1768			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
 
1769			goto out;
1770		}
1771	} else {
1772		ret = btrfs_get_bdev_and_sb(device_path,
1773					    FMODE_WRITE | FMODE_EXCL,
1774					    root->fs_info->bdev_holder, 0,
1775					    &bdev, &bh);
1776		if (ret)
1777			goto out;
 
 
 
 
 
 
 
 
 
1778		disk_super = (struct btrfs_super_block *)bh->b_data;
1779		devid = btrfs_stack_device_id(&disk_super->dev_item);
1780		dev_uuid = disk_super->dev_item.uuid;
1781		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1782					   disk_super->fsid);
1783		if (!device) {
1784			ret = -ENOENT;
1785			goto error_brelse;
1786		}
1787	}
1788
1789	if (device->is_tgtdev_for_dev_replace) {
1790		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1791		goto error_brelse;
1792	}
1793
1794	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1795		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
 
 
1796		goto error_brelse;
1797	}
1798
1799	if (device->writeable) {
1800		lock_chunks(root);
1801		list_del_init(&device->dev_alloc_list);
1802		device->fs_devices->rw_devices--;
1803		unlock_chunks(root);
 
1804		clear_super = true;
1805	}
1806
1807	mutex_unlock(&uuid_mutex);
1808	ret = btrfs_shrink_device(device, 0);
1809	mutex_lock(&uuid_mutex);
1810	if (ret)
1811		goto error_undo;
1812
1813	/*
1814	 * TODO: the superblock still includes this device in its num_devices
1815	 * counter although write_all_supers() is not locked out. This
1816	 * could give a filesystem state which requires a degraded mount.
1817	 */
1818	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1819	if (ret)
1820		goto error_undo;
1821
 
 
 
 
 
1822	device->in_fs_metadata = 0;
1823	btrfs_scrub_cancel_dev(root->fs_info, device);
1824
1825	/*
1826	 * the device list mutex makes sure that we don't change
1827	 * the device list while someone else is writing out all
1828	 * the device supers. Whoever is writing all supers, should
1829	 * lock the device list mutex before getting the number of
1830	 * devices in the super block (super_copy). Conversely,
1831	 * whoever updates the number of devices in the super block
1832	 * (super_copy) should hold the device list mutex.
1833	 */
1834
1835	cur_devices = device->fs_devices;
1836	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1837	list_del_rcu(&device->dev_list);
1838
1839	device->fs_devices->num_devices--;
1840	device->fs_devices->total_devices--;
1841
1842	if (device->missing)
1843		device->fs_devices->missing_devices--;
1844
1845	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1846				 struct btrfs_device, dev_list);
1847	if (device->bdev == root->fs_info->sb->s_bdev)
1848		root->fs_info->sb->s_bdev = next_device->bdev;
1849	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1850		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1851
1852	if (device->bdev) {
1853		device->fs_devices->open_devices--;
1854		/* remove sysfs entry */
1855		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1856	}
1857
1858	call_rcu(&device->rcu, free_device);
 
1859
1860	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1861	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1862	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1863
1864	if (cur_devices->open_devices == 0) {
1865		struct btrfs_fs_devices *fs_devices;
1866		fs_devices = root->fs_info->fs_devices;
1867		while (fs_devices) {
1868			if (fs_devices->seed == cur_devices) {
1869				fs_devices->seed = cur_devices->seed;
1870				break;
1871			}
1872			fs_devices = fs_devices->seed;
1873		}
 
1874		cur_devices->seed = NULL;
 
1875		__btrfs_close_devices(cur_devices);
 
1876		free_fs_devices(cur_devices);
1877	}
1878
1879	root->fs_info->num_tolerated_disk_barrier_failures =
1880		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1881
1882	/*
1883	 * at this point, the device is zero sized.  We want to
1884	 * remove it from the devices list and zero out the old super
1885	 */
1886	if (clear_super && disk_super) {
1887		u64 bytenr;
1888		int i;
1889
1890		/* make sure this device isn't detected as part of
1891		 * the FS anymore
1892		 */
1893		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1894		set_buffer_dirty(bh);
1895		sync_dirty_buffer(bh);
1896
1897		/* clear the mirror copies of super block on the disk
1898		 * being removed, 0th copy is been taken care above and
1899		 * the below would take of the rest
1900		 */
1901		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1902			bytenr = btrfs_sb_offset(i);
1903			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1904					i_size_read(bdev->bd_inode))
1905				break;
1906
1907			brelse(bh);
1908			bh = __bread(bdev, bytenr / 4096,
1909					BTRFS_SUPER_INFO_SIZE);
1910			if (!bh)
1911				continue;
1912
1913			disk_super = (struct btrfs_super_block *)bh->b_data;
1914
1915			if (btrfs_super_bytenr(disk_super) != bytenr ||
1916				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1917				continue;
1918			}
1919			memset(&disk_super->magic, 0,
1920						sizeof(disk_super->magic));
1921			set_buffer_dirty(bh);
1922			sync_dirty_buffer(bh);
1923		}
1924	}
1925
1926	ret = 0;
1927
1928	if (bdev) {
1929		/* Notify udev that device has changed */
1930		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1931
1932		/* Update ctime/mtime for device path for libblkid */
1933		update_dev_time(device_path);
1934	}
1935
1936error_brelse:
1937	brelse(bh);
 
1938	if (bdev)
1939		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1940out:
1941	mutex_unlock(&uuid_mutex);
1942	return ret;
1943error_undo:
1944	if (device->writeable) {
1945		lock_chunks(root);
1946		list_add(&device->dev_alloc_list,
1947			 &root->fs_info->fs_devices->alloc_list);
1948		device->fs_devices->rw_devices++;
1949		unlock_chunks(root);
 
1950	}
1951	goto error_brelse;
1952}
1953
1954void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1955					struct btrfs_device *srcdev)
1956{
1957	struct btrfs_fs_devices *fs_devices;
1958
1959	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1960
1961	/*
1962	 * in case of fs with no seed, srcdev->fs_devices will point
1963	 * to fs_devices of fs_info. However when the dev being replaced is
1964	 * a seed dev it will point to the seed's local fs_devices. In short
1965	 * srcdev will have its correct fs_devices in both the cases.
1966	 */
1967	fs_devices = srcdev->fs_devices;
1968
1969	list_del_rcu(&srcdev->dev_list);
1970	list_del_rcu(&srcdev->dev_alloc_list);
1971	fs_devices->num_devices--;
1972	if (srcdev->missing)
1973		fs_devices->missing_devices--;
1974
1975	if (srcdev->writeable) {
1976		fs_devices->rw_devices--;
1977		/* zero out the old super if it is writable */
1978		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1979	}
1980
1981	if (srcdev->bdev)
1982		fs_devices->open_devices--;
1983}
1984
1985void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1986				      struct btrfs_device *srcdev)
1987{
1988	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1989
1990	call_rcu(&srcdev->rcu, free_device);
1991
1992	/*
1993	 * unless fs_devices is seed fs, num_devices shouldn't go
1994	 * zero
1995	 */
1996	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1997
1998	/* if this is no devs we rather delete the fs_devices */
1999	if (!fs_devices->num_devices) {
2000		struct btrfs_fs_devices *tmp_fs_devices;
2001
2002		tmp_fs_devices = fs_info->fs_devices;
2003		while (tmp_fs_devices) {
2004			if (tmp_fs_devices->seed == fs_devices) {
2005				tmp_fs_devices->seed = fs_devices->seed;
2006				break;
2007			}
2008			tmp_fs_devices = tmp_fs_devices->seed;
2009		}
2010		fs_devices->seed = NULL;
2011		__btrfs_close_devices(fs_devices);
2012		free_fs_devices(fs_devices);
2013	}
2014}
2015
2016void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2017				      struct btrfs_device *tgtdev)
2018{
2019	struct btrfs_device *next_device;
2020
2021	mutex_lock(&uuid_mutex);
2022	WARN_ON(!tgtdev);
2023	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2024
2025	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2026
2027	if (tgtdev->bdev) {
2028		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2029		fs_info->fs_devices->open_devices--;
2030	}
2031	fs_info->fs_devices->num_devices--;
2032
2033	next_device = list_entry(fs_info->fs_devices->devices.next,
2034				 struct btrfs_device, dev_list);
2035	if (tgtdev->bdev == fs_info->sb->s_bdev)
2036		fs_info->sb->s_bdev = next_device->bdev;
2037	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
2038		fs_info->fs_devices->latest_bdev = next_device->bdev;
2039	list_del_rcu(&tgtdev->dev_list);
2040
2041	call_rcu(&tgtdev->rcu, free_device);
2042
2043	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2044	mutex_unlock(&uuid_mutex);
2045}
2046
2047static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2048				     struct btrfs_device **device)
2049{
2050	int ret = 0;
2051	struct btrfs_super_block *disk_super;
2052	u64 devid;
2053	u8 *dev_uuid;
2054	struct block_device *bdev;
2055	struct buffer_head *bh;
2056
2057	*device = NULL;
2058	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2059				    root->fs_info->bdev_holder, 0, &bdev, &bh);
2060	if (ret)
2061		return ret;
2062	disk_super = (struct btrfs_super_block *)bh->b_data;
2063	devid = btrfs_stack_device_id(&disk_super->dev_item);
2064	dev_uuid = disk_super->dev_item.uuid;
2065	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2066				    disk_super->fsid);
2067	brelse(bh);
2068	if (!*device)
2069		ret = -ENOENT;
2070	blkdev_put(bdev, FMODE_READ);
2071	return ret;
2072}
2073
2074int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2075					 char *device_path,
2076					 struct btrfs_device **device)
2077{
2078	*device = NULL;
2079	if (strcmp(device_path, "missing") == 0) {
2080		struct list_head *devices;
2081		struct btrfs_device *tmp;
2082
2083		devices = &root->fs_info->fs_devices->devices;
2084		/*
2085		 * It is safe to read the devices since the volume_mutex
2086		 * is held by the caller.
2087		 */
2088		list_for_each_entry(tmp, devices, dev_list) {
2089			if (tmp->in_fs_metadata && !tmp->bdev) {
2090				*device = tmp;
2091				break;
2092			}
2093		}
2094
2095		if (!*device)
2096			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2097
2098		return 0;
2099	} else {
2100		return btrfs_find_device_by_path(root, device_path, device);
2101	}
2102}
2103
2104/*
2105 * does all the dirty work required for changing file system's UUID.
2106 */
2107static int btrfs_prepare_sprout(struct btrfs_root *root)
2108{
2109	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2110	struct btrfs_fs_devices *old_devices;
2111	struct btrfs_fs_devices *seed_devices;
2112	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2113	struct btrfs_device *device;
2114	u64 super_flags;
2115
2116	BUG_ON(!mutex_is_locked(&uuid_mutex));
2117	if (!fs_devices->seeding)
2118		return -EINVAL;
2119
2120	seed_devices = __alloc_fs_devices();
2121	if (IS_ERR(seed_devices))
2122		return PTR_ERR(seed_devices);
2123
2124	old_devices = clone_fs_devices(fs_devices);
2125	if (IS_ERR(old_devices)) {
2126		kfree(seed_devices);
2127		return PTR_ERR(old_devices);
2128	}
2129
2130	list_add(&old_devices->list, &fs_uuids);
2131
2132	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2133	seed_devices->opened = 1;
2134	INIT_LIST_HEAD(&seed_devices->devices);
2135	INIT_LIST_HEAD(&seed_devices->alloc_list);
2136	mutex_init(&seed_devices->device_list_mutex);
2137
2138	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2139	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2140			      synchronize_rcu);
2141	list_for_each_entry(device, &seed_devices->devices, dev_list)
2142		device->fs_devices = seed_devices;
2143
2144	lock_chunks(root);
2145	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2146	unlock_chunks(root);
 
 
2147
2148	fs_devices->seeding = 0;
2149	fs_devices->num_devices = 0;
2150	fs_devices->open_devices = 0;
2151	fs_devices->missing_devices = 0;
2152	fs_devices->rotating = 0;
2153	fs_devices->seed = seed_devices;
2154
2155	generate_random_uuid(fs_devices->fsid);
2156	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2157	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2158	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2159
2160	super_flags = btrfs_super_flags(disk_super) &
2161		      ~BTRFS_SUPER_FLAG_SEEDING;
2162	btrfs_set_super_flags(disk_super, super_flags);
2163
2164	return 0;
2165}
2166
2167/*
2168 * strore the expected generation for seed devices in device items.
2169 */
2170static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2171			       struct btrfs_root *root)
2172{
2173	struct btrfs_path *path;
2174	struct extent_buffer *leaf;
2175	struct btrfs_dev_item *dev_item;
2176	struct btrfs_device *device;
2177	struct btrfs_key key;
2178	u8 fs_uuid[BTRFS_UUID_SIZE];
2179	u8 dev_uuid[BTRFS_UUID_SIZE];
2180	u64 devid;
2181	int ret;
2182
2183	path = btrfs_alloc_path();
2184	if (!path)
2185		return -ENOMEM;
2186
2187	root = root->fs_info->chunk_root;
2188	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2189	key.offset = 0;
2190	key.type = BTRFS_DEV_ITEM_KEY;
2191
2192	while (1) {
2193		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2194		if (ret < 0)
2195			goto error;
2196
2197		leaf = path->nodes[0];
2198next_slot:
2199		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2200			ret = btrfs_next_leaf(root, path);
2201			if (ret > 0)
2202				break;
2203			if (ret < 0)
2204				goto error;
2205			leaf = path->nodes[0];
2206			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2207			btrfs_release_path(path);
2208			continue;
2209		}
2210
2211		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2212		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2213		    key.type != BTRFS_DEV_ITEM_KEY)
2214			break;
2215
2216		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2217					  struct btrfs_dev_item);
2218		devid = btrfs_device_id(leaf, dev_item);
2219		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
 
2220				   BTRFS_UUID_SIZE);
2221		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
 
2222				   BTRFS_UUID_SIZE);
2223		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2224					   fs_uuid);
2225		BUG_ON(!device); /* Logic error */
2226
2227		if (device->fs_devices->seeding) {
2228			btrfs_set_device_generation(leaf, dev_item,
2229						    device->generation);
2230			btrfs_mark_buffer_dirty(leaf);
2231		}
2232
2233		path->slots[0]++;
2234		goto next_slot;
2235	}
2236	ret = 0;
2237error:
2238	btrfs_free_path(path);
2239	return ret;
2240}
2241
2242int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2243{
2244	struct request_queue *q;
2245	struct btrfs_trans_handle *trans;
2246	struct btrfs_device *device;
2247	struct block_device *bdev;
2248	struct list_head *devices;
2249	struct super_block *sb = root->fs_info->sb;
2250	struct rcu_string *name;
2251	u64 tmp;
2252	int seeding_dev = 0;
2253	int ret = 0;
2254
2255	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2256		return -EROFS;
2257
2258	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2259				  root->fs_info->bdev_holder);
2260	if (IS_ERR(bdev))
2261		return PTR_ERR(bdev);
2262
2263	if (root->fs_info->fs_devices->seeding) {
2264		seeding_dev = 1;
2265		down_write(&sb->s_umount);
2266		mutex_lock(&uuid_mutex);
2267	}
2268
2269	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2270
2271	devices = &root->fs_info->fs_devices->devices;
2272
2273	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
 
 
2274	list_for_each_entry(device, devices, dev_list) {
2275		if (device->bdev == bdev) {
2276			ret = -EEXIST;
2277			mutex_unlock(
2278				&root->fs_info->fs_devices->device_list_mutex);
2279			goto error;
2280		}
2281	}
2282	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2283
2284	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2285	if (IS_ERR(device)) {
2286		/* we can safely leave the fs_devices entry around */
2287		ret = PTR_ERR(device);
2288		goto error;
2289	}
2290
2291	name = rcu_string_strdup(device_path, GFP_KERNEL);
2292	if (!name) {
2293		kfree(device);
2294		ret = -ENOMEM;
2295		goto error;
2296	}
2297	rcu_assign_pointer(device->name, name);
2298
 
 
 
 
 
 
 
2299	trans = btrfs_start_transaction(root, 0);
2300	if (IS_ERR(trans)) {
2301		rcu_string_free(device->name);
2302		kfree(device);
2303		ret = PTR_ERR(trans);
2304		goto error;
2305	}
2306
 
 
2307	q = bdev_get_queue(bdev);
2308	if (blk_queue_discard(q))
2309		device->can_discard = 1;
2310	device->writeable = 1;
 
 
 
2311	device->generation = trans->transid;
2312	device->io_width = root->sectorsize;
2313	device->io_align = root->sectorsize;
2314	device->sector_size = root->sectorsize;
2315	device->total_bytes = i_size_read(bdev->bd_inode);
2316	device->disk_total_bytes = device->total_bytes;
2317	device->commit_total_bytes = device->total_bytes;
2318	device->dev_root = root->fs_info->dev_root;
2319	device->bdev = bdev;
2320	device->in_fs_metadata = 1;
2321	device->is_tgtdev_for_dev_replace = 0;
2322	device->mode = FMODE_EXCL;
2323	device->dev_stats_valid = 1;
2324	set_blocksize(device->bdev, 4096);
2325
2326	if (seeding_dev) {
2327		sb->s_flags &= ~MS_RDONLY;
2328		ret = btrfs_prepare_sprout(root);
2329		BUG_ON(ret); /* -ENOMEM */
2330	}
2331
2332	device->fs_devices = root->fs_info->fs_devices;
2333
 
 
 
 
2334	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2335	lock_chunks(root);
2336	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2337	list_add(&device->dev_alloc_list,
2338		 &root->fs_info->fs_devices->alloc_list);
2339	root->fs_info->fs_devices->num_devices++;
2340	root->fs_info->fs_devices->open_devices++;
2341	root->fs_info->fs_devices->rw_devices++;
2342	root->fs_info->fs_devices->total_devices++;
 
2343	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2344
2345	spin_lock(&root->fs_info->free_chunk_lock);
2346	root->fs_info->free_chunk_space += device->total_bytes;
2347	spin_unlock(&root->fs_info->free_chunk_lock);
2348
2349	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2350		root->fs_info->fs_devices->rotating = 1;
2351
2352	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2353	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2354				    tmp + device->total_bytes);
2355
2356	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2357	btrfs_set_super_num_devices(root->fs_info->super_copy,
2358				    tmp + 1);
2359
2360	/* add sysfs device entry */
2361	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2362
2363	/*
2364	 * we've got more storage, clear any full flags on the space
2365	 * infos
2366	 */
2367	btrfs_clear_space_info_full(root->fs_info);
2368
2369	unlock_chunks(root);
2370	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2371
2372	if (seeding_dev) {
2373		lock_chunks(root);
2374		ret = init_first_rw_device(trans, root, device);
2375		unlock_chunks(root);
2376		if (ret) {
2377			btrfs_abort_transaction(trans, root, ret);
2378			goto error_trans;
2379		}
2380	}
2381
2382	ret = btrfs_add_device(trans, root, device);
2383	if (ret) {
2384		btrfs_abort_transaction(trans, root, ret);
2385		goto error_trans;
2386	}
2387
2388	if (seeding_dev) {
2389		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2390
2391		ret = btrfs_finish_sprout(trans, root);
2392		if (ret) {
2393			btrfs_abort_transaction(trans, root, ret);
 
 
 
2394			goto error_trans;
2395		}
2396
2397		/* Sprouting would change fsid of the mounted root,
2398		 * so rename the fsid on the sysfs
2399		 */
2400		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2401						root->fs_info->fsid);
2402		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2403								fsid_buf))
2404			btrfs_warn(root->fs_info,
2405				"sysfs: failed to create fsid for sprout");
2406	}
2407
2408	root->fs_info->num_tolerated_disk_barrier_failures =
2409		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
 
 
 
 
 
2410	ret = btrfs_commit_transaction(trans, root);
2411
2412	if (seeding_dev) {
2413		mutex_unlock(&uuid_mutex);
2414		up_write(&sb->s_umount);
2415
2416		if (ret) /* transaction commit */
2417			return ret;
2418
2419		ret = btrfs_relocate_sys_chunks(root);
2420		if (ret < 0)
2421			btrfs_std_error(root->fs_info, ret,
2422				    "Failed to relocate sys chunks after "
2423				    "device initialization. This can be fixed "
2424				    "using the \"btrfs balance\" command.");
2425		trans = btrfs_attach_transaction(root);
2426		if (IS_ERR(trans)) {
2427			if (PTR_ERR(trans) == -ENOENT)
2428				return 0;
2429			return PTR_ERR(trans);
2430		}
2431		ret = btrfs_commit_transaction(trans, root);
2432	}
2433
2434	/* Update ctime/mtime for libblkid */
2435	update_dev_time(device_path);
2436	return ret;
2437
2438error_trans:
 
 
2439	btrfs_end_transaction(trans, root);
2440	rcu_string_free(device->name);
2441	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2442	kfree(device);
2443error:
2444	blkdev_put(bdev, FMODE_EXCL);
2445	if (seeding_dev) {
2446		mutex_unlock(&uuid_mutex);
2447		up_write(&sb->s_umount);
2448	}
2449	return ret;
2450}
2451
2452int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2453				  struct btrfs_device *srcdev,
2454				  struct btrfs_device **device_out)
2455{
2456	struct request_queue *q;
2457	struct btrfs_device *device;
2458	struct block_device *bdev;
2459	struct btrfs_fs_info *fs_info = root->fs_info;
2460	struct list_head *devices;
2461	struct rcu_string *name;
2462	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2463	int ret = 0;
2464
2465	*device_out = NULL;
2466	if (fs_info->fs_devices->seeding) {
2467		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2468		return -EINVAL;
2469	}
2470
2471	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2472				  fs_info->bdev_holder);
2473	if (IS_ERR(bdev)) {
2474		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2475		return PTR_ERR(bdev);
2476	}
2477
2478	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2479
2480	devices = &fs_info->fs_devices->devices;
2481	list_for_each_entry(device, devices, dev_list) {
2482		if (device->bdev == bdev) {
2483			btrfs_err(fs_info, "target device is in the filesystem!");
2484			ret = -EEXIST;
2485			goto error;
2486		}
2487	}
2488
2489
2490	if (i_size_read(bdev->bd_inode) <
2491	    btrfs_device_get_total_bytes(srcdev)) {
2492		btrfs_err(fs_info, "target device is smaller than source device!");
2493		ret = -EINVAL;
2494		goto error;
2495	}
2496
2497
2498	device = btrfs_alloc_device(NULL, &devid, NULL);
2499	if (IS_ERR(device)) {
2500		ret = PTR_ERR(device);
2501		goto error;
2502	}
2503
2504	name = rcu_string_strdup(device_path, GFP_NOFS);
2505	if (!name) {
2506		kfree(device);
2507		ret = -ENOMEM;
2508		goto error;
2509	}
2510	rcu_assign_pointer(device->name, name);
2511
2512	q = bdev_get_queue(bdev);
2513	if (blk_queue_discard(q))
2514		device->can_discard = 1;
2515	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2516	device->writeable = 1;
2517	device->generation = 0;
2518	device->io_width = root->sectorsize;
2519	device->io_align = root->sectorsize;
2520	device->sector_size = root->sectorsize;
2521	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2522	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2523	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2524	ASSERT(list_empty(&srcdev->resized_list));
2525	device->commit_total_bytes = srcdev->commit_total_bytes;
2526	device->commit_bytes_used = device->bytes_used;
2527	device->dev_root = fs_info->dev_root;
2528	device->bdev = bdev;
2529	device->in_fs_metadata = 1;
2530	device->is_tgtdev_for_dev_replace = 1;
2531	device->mode = FMODE_EXCL;
2532	device->dev_stats_valid = 1;
2533	set_blocksize(device->bdev, 4096);
2534	device->fs_devices = fs_info->fs_devices;
2535	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2536	fs_info->fs_devices->num_devices++;
2537	fs_info->fs_devices->open_devices++;
2538	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2539
2540	*device_out = device;
2541	return ret;
2542
2543error:
2544	blkdev_put(bdev, FMODE_EXCL);
2545	return ret;
2546}
2547
2548void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2549					      struct btrfs_device *tgtdev)
2550{
2551	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2552	tgtdev->io_width = fs_info->dev_root->sectorsize;
2553	tgtdev->io_align = fs_info->dev_root->sectorsize;
2554	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2555	tgtdev->dev_root = fs_info->dev_root;
2556	tgtdev->in_fs_metadata = 1;
2557}
2558
2559static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2560					struct btrfs_device *device)
2561{
2562	int ret;
2563	struct btrfs_path *path;
2564	struct btrfs_root *root;
2565	struct btrfs_dev_item *dev_item;
2566	struct extent_buffer *leaf;
2567	struct btrfs_key key;
2568
2569	root = device->dev_root->fs_info->chunk_root;
2570
2571	path = btrfs_alloc_path();
2572	if (!path)
2573		return -ENOMEM;
2574
2575	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2576	key.type = BTRFS_DEV_ITEM_KEY;
2577	key.offset = device->devid;
2578
2579	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2580	if (ret < 0)
2581		goto out;
2582
2583	if (ret > 0) {
2584		ret = -ENOENT;
2585		goto out;
2586	}
2587
2588	leaf = path->nodes[0];
2589	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2590
2591	btrfs_set_device_id(leaf, dev_item, device->devid);
2592	btrfs_set_device_type(leaf, dev_item, device->type);
2593	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2594	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2595	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2596	btrfs_set_device_total_bytes(leaf, dev_item,
2597				     btrfs_device_get_disk_total_bytes(device));
2598	btrfs_set_device_bytes_used(leaf, dev_item,
2599				    btrfs_device_get_bytes_used(device));
2600	btrfs_mark_buffer_dirty(leaf);
2601
2602out:
2603	btrfs_free_path(path);
2604	return ret;
2605}
2606
2607int btrfs_grow_device(struct btrfs_trans_handle *trans,
2608		      struct btrfs_device *device, u64 new_size)
2609{
2610	struct btrfs_super_block *super_copy =
2611		device->dev_root->fs_info->super_copy;
2612	struct btrfs_fs_devices *fs_devices;
2613	u64 old_total;
2614	u64 diff;
2615
2616	if (!device->writeable)
2617		return -EACCES;
2618
2619	lock_chunks(device->dev_root);
2620	old_total = btrfs_super_total_bytes(super_copy);
2621	diff = new_size - device->total_bytes;
2622
2623	if (new_size <= device->total_bytes ||
2624	    device->is_tgtdev_for_dev_replace) {
2625		unlock_chunks(device->dev_root);
2626		return -EINVAL;
2627	}
2628
2629	fs_devices = device->dev_root->fs_info->fs_devices;
2630
2631	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2632	device->fs_devices->total_rw_bytes += diff;
2633
2634	btrfs_device_set_total_bytes(device, new_size);
2635	btrfs_device_set_disk_total_bytes(device, new_size);
2636	btrfs_clear_space_info_full(device->dev_root->fs_info);
2637	if (list_empty(&device->resized_list))
2638		list_add_tail(&device->resized_list,
2639			      &fs_devices->resized_devices);
2640	unlock_chunks(device->dev_root);
2641
2642	return btrfs_update_device(trans, device);
2643}
2644
 
 
 
 
 
 
 
 
 
 
2645static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2646			    struct btrfs_root *root, u64 chunk_objectid,
 
2647			    u64 chunk_offset)
2648{
2649	int ret;
2650	struct btrfs_path *path;
2651	struct btrfs_key key;
2652
2653	root = root->fs_info->chunk_root;
2654	path = btrfs_alloc_path();
2655	if (!path)
2656		return -ENOMEM;
2657
2658	key.objectid = chunk_objectid;
2659	key.offset = chunk_offset;
2660	key.type = BTRFS_CHUNK_ITEM_KEY;
2661
2662	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2663	if (ret < 0)
2664		goto out;
2665	else if (ret > 0) { /* Logic error or corruption */
2666		btrfs_std_error(root->fs_info, -ENOENT,
2667			    "Failed lookup while freeing chunk.");
2668		ret = -ENOENT;
2669		goto out;
2670	}
2671
2672	ret = btrfs_del_item(trans, root, path);
2673	if (ret < 0)
2674		btrfs_std_error(root->fs_info, ret,
2675			    "Failed to delete chunk item.");
2676out:
2677	btrfs_free_path(path);
2678	return ret;
2679}
2680
2681static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2682			chunk_offset)
2683{
2684	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2685	struct btrfs_disk_key *disk_key;
2686	struct btrfs_chunk *chunk;
2687	u8 *ptr;
2688	int ret = 0;
2689	u32 num_stripes;
2690	u32 array_size;
2691	u32 len = 0;
2692	u32 cur;
2693	struct btrfs_key key;
2694
2695	lock_chunks(root);
2696	array_size = btrfs_super_sys_array_size(super_copy);
2697
2698	ptr = super_copy->sys_chunk_array;
2699	cur = 0;
2700
2701	while (cur < array_size) {
2702		disk_key = (struct btrfs_disk_key *)ptr;
2703		btrfs_disk_key_to_cpu(&key, disk_key);
2704
2705		len = sizeof(*disk_key);
2706
2707		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2708			chunk = (struct btrfs_chunk *)(ptr + len);
2709			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2710			len += btrfs_chunk_item_size(num_stripes);
2711		} else {
2712			ret = -EIO;
2713			break;
2714		}
2715		if (key.objectid == chunk_objectid &&
2716		    key.offset == chunk_offset) {
2717			memmove(ptr, ptr + len, array_size - (cur + len));
2718			array_size -= len;
2719			btrfs_set_super_sys_array_size(super_copy, array_size);
2720		} else {
2721			ptr += len;
2722			cur += len;
2723		}
2724	}
2725	unlock_chunks(root);
2726	return ret;
2727}
2728
2729int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2730		       struct btrfs_root *root, u64 chunk_offset)
 
2731{
2732	struct extent_map_tree *em_tree;
 
 
2733	struct extent_map *em;
2734	struct btrfs_root *extent_root = root->fs_info->extent_root;
2735	struct map_lookup *map;
2736	u64 dev_extent_len = 0;
2737	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2738	int i, ret = 0;
2739
2740	/* Just in case */
2741	root = root->fs_info->chunk_root;
 
2742	em_tree = &root->fs_info->mapping_tree.map_tree;
2743
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2744	read_lock(&em_tree->lock);
2745	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2746	read_unlock(&em_tree->lock);
2747
2748	if (!em || em->start > chunk_offset ||
2749	    em->start + em->len < chunk_offset) {
2750		/*
2751		 * This is a logic error, but we don't want to just rely on the
2752		 * user having built with ASSERT enabled, so if ASSERT doesn't
2753		 * do anything we still error out.
2754		 */
2755		ASSERT(0);
2756		if (em)
2757			free_extent_map(em);
2758		return -EINVAL;
2759	}
2760	map = em->map_lookup;
2761	lock_chunks(root->fs_info->chunk_root);
2762	check_system_chunk(trans, extent_root, map->type);
2763	unlock_chunks(root->fs_info->chunk_root);
2764
2765	for (i = 0; i < map->num_stripes; i++) {
2766		struct btrfs_device *device = map->stripes[i].dev;
2767		ret = btrfs_free_dev_extent(trans, device,
2768					    map->stripes[i].physical,
2769					    &dev_extent_len);
2770		if (ret) {
2771			btrfs_abort_transaction(trans, root, ret);
2772			goto out;
2773		}
2774
2775		if (device->bytes_used > 0) {
2776			lock_chunks(root);
2777			btrfs_device_set_bytes_used(device,
2778					device->bytes_used - dev_extent_len);
2779			spin_lock(&root->fs_info->free_chunk_lock);
2780			root->fs_info->free_chunk_space += dev_extent_len;
2781			spin_unlock(&root->fs_info->free_chunk_lock);
2782			btrfs_clear_space_info_full(root->fs_info);
2783			unlock_chunks(root);
2784		}
2785
2786		if (map->stripes[i].dev) {
2787			ret = btrfs_update_device(trans, map->stripes[i].dev);
2788			if (ret) {
2789				btrfs_abort_transaction(trans, root, ret);
2790				goto out;
2791			}
2792		}
2793	}
2794	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2795	if (ret) {
2796		btrfs_abort_transaction(trans, root, ret);
2797		goto out;
2798	}
2799
2800	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2801
2802	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2803		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2804		if (ret) {
2805			btrfs_abort_transaction(trans, root, ret);
2806			goto out;
2807		}
2808	}
2809
2810	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2811	if (ret) {
2812		btrfs_abort_transaction(trans, extent_root, ret);
2813		goto out;
2814	}
2815
2816out:
2817	/* once for us */
2818	free_extent_map(em);
2819	return ret;
2820}
2821
2822static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2823{
2824	struct btrfs_root *extent_root;
2825	struct btrfs_trans_handle *trans;
2826	int ret;
2827
2828	root = root->fs_info->chunk_root;
2829	extent_root = root->fs_info->extent_root;
2830
2831	/*
2832	 * Prevent races with automatic removal of unused block groups.
2833	 * After we relocate and before we remove the chunk with offset
2834	 * chunk_offset, automatic removal of the block group can kick in,
2835	 * resulting in a failure when calling btrfs_remove_chunk() below.
2836	 *
2837	 * Make sure to acquire this mutex before doing a tree search (dev
2838	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2839	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2840	 * we release the path used to search the chunk/dev tree and before
2841	 * the current task acquires this mutex and calls us.
2842	 */
2843	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2844
2845	ret = btrfs_can_relocate(extent_root, chunk_offset);
2846	if (ret)
2847		return -ENOSPC;
2848
2849	/* step one, relocate all the extents inside this chunk */
2850	btrfs_scrub_pause(root);
2851	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2852	btrfs_scrub_continue(root);
2853	if (ret)
2854		return ret;
2855
2856	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2857						     chunk_offset);
2858	if (IS_ERR(trans)) {
2859		ret = PTR_ERR(trans);
2860		btrfs_std_error(root->fs_info, ret, NULL);
2861		return ret;
2862	}
2863
2864	/*
2865	 * step two, delete the device extents and the
2866	 * chunk tree entries
2867	 */
2868	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2869	btrfs_end_transaction(trans, root);
2870	return ret;
2871}
2872
2873static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2874{
2875	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2876	struct btrfs_path *path;
2877	struct extent_buffer *leaf;
2878	struct btrfs_chunk *chunk;
2879	struct btrfs_key key;
2880	struct btrfs_key found_key;
 
2881	u64 chunk_type;
2882	bool retried = false;
2883	int failed = 0;
2884	int ret;
2885
2886	path = btrfs_alloc_path();
2887	if (!path)
2888		return -ENOMEM;
2889
2890again:
2891	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2892	key.offset = (u64)-1;
2893	key.type = BTRFS_CHUNK_ITEM_KEY;
2894
2895	while (1) {
2896		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2897		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2898		if (ret < 0) {
2899			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2900			goto error;
2901		}
2902		BUG_ON(ret == 0); /* Corruption */
2903
2904		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2905					  key.type);
2906		if (ret)
2907			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2908		if (ret < 0)
2909			goto error;
2910		if (ret > 0)
2911			break;
2912
2913		leaf = path->nodes[0];
2914		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2915
2916		chunk = btrfs_item_ptr(leaf, path->slots[0],
2917				       struct btrfs_chunk);
2918		chunk_type = btrfs_chunk_type(leaf, chunk);
2919		btrfs_release_path(path);
2920
2921		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2922			ret = btrfs_relocate_chunk(chunk_root,
 
2923						   found_key.offset);
2924			if (ret == -ENOSPC)
2925				failed++;
2926			else
2927				BUG_ON(ret);
2928		}
2929		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2930
2931		if (found_key.offset == 0)
2932			break;
2933		key.offset = found_key.offset - 1;
2934	}
2935	ret = 0;
2936	if (failed && !retried) {
2937		failed = 0;
2938		retried = true;
2939		goto again;
2940	} else if (WARN_ON(failed && retried)) {
 
2941		ret = -ENOSPC;
2942	}
2943error:
2944	btrfs_free_path(path);
2945	return ret;
2946}
2947
2948static int insert_balance_item(struct btrfs_root *root,
2949			       struct btrfs_balance_control *bctl)
2950{
2951	struct btrfs_trans_handle *trans;
2952	struct btrfs_balance_item *item;
2953	struct btrfs_disk_balance_args disk_bargs;
2954	struct btrfs_path *path;
2955	struct extent_buffer *leaf;
2956	struct btrfs_key key;
2957	int ret, err;
2958
2959	path = btrfs_alloc_path();
2960	if (!path)
2961		return -ENOMEM;
2962
2963	trans = btrfs_start_transaction(root, 0);
2964	if (IS_ERR(trans)) {
2965		btrfs_free_path(path);
2966		return PTR_ERR(trans);
2967	}
2968
2969	key.objectid = BTRFS_BALANCE_OBJECTID;
2970	key.type = BTRFS_TEMPORARY_ITEM_KEY;
2971	key.offset = 0;
2972
2973	ret = btrfs_insert_empty_item(trans, root, path, &key,
2974				      sizeof(*item));
2975	if (ret)
2976		goto out;
2977
2978	leaf = path->nodes[0];
2979	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2980
2981	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2982
2983	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2984	btrfs_set_balance_data(leaf, item, &disk_bargs);
2985	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2986	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2987	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2988	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2989
2990	btrfs_set_balance_flags(leaf, item, bctl->flags);
2991
2992	btrfs_mark_buffer_dirty(leaf);
2993out:
2994	btrfs_free_path(path);
2995	err = btrfs_commit_transaction(trans, root);
2996	if (err && !ret)
2997		ret = err;
2998	return ret;
2999}
3000
3001static int del_balance_item(struct btrfs_root *root)
3002{
3003	struct btrfs_trans_handle *trans;
3004	struct btrfs_path *path;
3005	struct btrfs_key key;
3006	int ret, err;
3007
3008	path = btrfs_alloc_path();
3009	if (!path)
3010		return -ENOMEM;
3011
3012	trans = btrfs_start_transaction(root, 0);
3013	if (IS_ERR(trans)) {
3014		btrfs_free_path(path);
3015		return PTR_ERR(trans);
3016	}
3017
3018	key.objectid = BTRFS_BALANCE_OBJECTID;
3019	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3020	key.offset = 0;
3021
3022	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3023	if (ret < 0)
3024		goto out;
3025	if (ret > 0) {
3026		ret = -ENOENT;
3027		goto out;
3028	}
3029
3030	ret = btrfs_del_item(trans, root, path);
3031out:
3032	btrfs_free_path(path);
3033	err = btrfs_commit_transaction(trans, root);
3034	if (err && !ret)
3035		ret = err;
3036	return ret;
3037}
3038
3039/*
3040 * This is a heuristic used to reduce the number of chunks balanced on
3041 * resume after balance was interrupted.
3042 */
3043static void update_balance_args(struct btrfs_balance_control *bctl)
3044{
3045	/*
3046	 * Turn on soft mode for chunk types that were being converted.
3047	 */
3048	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3049		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3050	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3051		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3052	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3053		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3054
3055	/*
3056	 * Turn on usage filter if is not already used.  The idea is
3057	 * that chunks that we have already balanced should be
3058	 * reasonably full.  Don't do it for chunks that are being
3059	 * converted - that will keep us from relocating unconverted
3060	 * (albeit full) chunks.
3061	 */
3062	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3063	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3064	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3065		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3066		bctl->data.usage = 90;
3067	}
3068	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3069	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3070	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3071		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3072		bctl->sys.usage = 90;
3073	}
3074	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3075	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3076	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3077		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3078		bctl->meta.usage = 90;
3079	}
3080}
3081
3082/*
3083 * Should be called with both balance and volume mutexes held to
3084 * serialize other volume operations (add_dev/rm_dev/resize) with
3085 * restriper.  Same goes for unset_balance_control.
3086 */
3087static void set_balance_control(struct btrfs_balance_control *bctl)
3088{
3089	struct btrfs_fs_info *fs_info = bctl->fs_info;
3090
3091	BUG_ON(fs_info->balance_ctl);
3092
3093	spin_lock(&fs_info->balance_lock);
3094	fs_info->balance_ctl = bctl;
3095	spin_unlock(&fs_info->balance_lock);
3096}
3097
3098static void unset_balance_control(struct btrfs_fs_info *fs_info)
3099{
3100	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3101
3102	BUG_ON(!fs_info->balance_ctl);
3103
3104	spin_lock(&fs_info->balance_lock);
3105	fs_info->balance_ctl = NULL;
3106	spin_unlock(&fs_info->balance_lock);
3107
3108	kfree(bctl);
3109}
3110
3111/*
3112 * Balance filters.  Return 1 if chunk should be filtered out
3113 * (should not be balanced).
3114 */
3115static int chunk_profiles_filter(u64 chunk_type,
3116				 struct btrfs_balance_args *bargs)
3117{
3118	chunk_type = chunk_to_extended(chunk_type) &
3119				BTRFS_EXTENDED_PROFILE_MASK;
3120
3121	if (bargs->profiles & chunk_type)
3122		return 0;
3123
3124	return 1;
3125}
3126
3127static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3128			      struct btrfs_balance_args *bargs)
3129{
3130	struct btrfs_block_group_cache *cache;
3131	u64 chunk_used;
3132	u64 user_thresh_min;
3133	u64 user_thresh_max;
3134	int ret = 1;
3135
3136	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3137	chunk_used = btrfs_block_group_used(&cache->item);
3138
3139	if (bargs->usage_min == 0)
3140		user_thresh_min = 0;
3141	else
3142		user_thresh_min = div_factor_fine(cache->key.offset,
3143					bargs->usage_min);
3144
3145	if (bargs->usage_max == 0)
3146		user_thresh_max = 1;
3147	else if (bargs->usage_max > 100)
3148		user_thresh_max = cache->key.offset;
3149	else
3150		user_thresh_max = div_factor_fine(cache->key.offset,
3151					bargs->usage_max);
3152
3153	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3154		ret = 0;
3155
3156	btrfs_put_block_group(cache);
3157	return ret;
 
3158}
3159
3160static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3161		u64 chunk_offset, struct btrfs_balance_args *bargs)
3162{
3163	struct btrfs_block_group_cache *cache;
3164	u64 chunk_used, user_thresh;
3165	int ret = 1;
3166
3167	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3168	chunk_used = btrfs_block_group_used(&cache->item);
3169
3170	if (bargs->usage_min == 0)
3171		user_thresh = 1;
3172	else if (bargs->usage > 100)
3173		user_thresh = cache->key.offset;
3174	else
3175		user_thresh = div_factor_fine(cache->key.offset,
3176					      bargs->usage);
3177
3178	if (chunk_used < user_thresh)
3179		ret = 0;
3180
3181	btrfs_put_block_group(cache);
3182	return ret;
3183}
3184
3185static int chunk_devid_filter(struct extent_buffer *leaf,
3186			      struct btrfs_chunk *chunk,
3187			      struct btrfs_balance_args *bargs)
3188{
3189	struct btrfs_stripe *stripe;
3190	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3191	int i;
3192
3193	for (i = 0; i < num_stripes; i++) {
3194		stripe = btrfs_stripe_nr(chunk, i);
3195		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3196			return 0;
3197	}
3198
3199	return 1;
3200}
3201
3202/* [pstart, pend) */
3203static int chunk_drange_filter(struct extent_buffer *leaf,
3204			       struct btrfs_chunk *chunk,
3205			       u64 chunk_offset,
3206			       struct btrfs_balance_args *bargs)
3207{
3208	struct btrfs_stripe *stripe;
3209	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3210	u64 stripe_offset;
3211	u64 stripe_length;
3212	int factor;
3213	int i;
3214
3215	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3216		return 0;
3217
3218	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3219	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3220		factor = num_stripes / 2;
3221	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3222		factor = num_stripes - 1;
3223	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3224		factor = num_stripes - 2;
3225	} else {
3226		factor = num_stripes;
3227	}
3228
3229	for (i = 0; i < num_stripes; i++) {
3230		stripe = btrfs_stripe_nr(chunk, i);
3231		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3232			continue;
3233
3234		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3235		stripe_length = btrfs_chunk_length(leaf, chunk);
3236		stripe_length = div_u64(stripe_length, factor);
3237
3238		if (stripe_offset < bargs->pend &&
3239		    stripe_offset + stripe_length > bargs->pstart)
3240			return 0;
3241	}
3242
3243	return 1;
3244}
3245
3246/* [vstart, vend) */
3247static int chunk_vrange_filter(struct extent_buffer *leaf,
3248			       struct btrfs_chunk *chunk,
3249			       u64 chunk_offset,
3250			       struct btrfs_balance_args *bargs)
3251{
3252	if (chunk_offset < bargs->vend &&
3253	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3254		/* at least part of the chunk is inside this vrange */
3255		return 0;
3256
3257	return 1;
3258}
3259
3260static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3261			       struct btrfs_chunk *chunk,
3262			       struct btrfs_balance_args *bargs)
3263{
3264	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3265
3266	if (bargs->stripes_min <= num_stripes
3267			&& num_stripes <= bargs->stripes_max)
3268		return 0;
3269
3270	return 1;
3271}
3272
3273static int chunk_soft_convert_filter(u64 chunk_type,
3274				     struct btrfs_balance_args *bargs)
3275{
3276	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3277		return 0;
3278
3279	chunk_type = chunk_to_extended(chunk_type) &
3280				BTRFS_EXTENDED_PROFILE_MASK;
3281
3282	if (bargs->target == chunk_type)
3283		return 1;
3284
3285	return 0;
3286}
3287
3288static int should_balance_chunk(struct btrfs_root *root,
3289				struct extent_buffer *leaf,
3290				struct btrfs_chunk *chunk, u64 chunk_offset)
3291{
3292	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3293	struct btrfs_balance_args *bargs = NULL;
3294	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3295
3296	/* type filter */
3297	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3298	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3299		return 0;
3300	}
3301
3302	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3303		bargs = &bctl->data;
3304	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3305		bargs = &bctl->sys;
3306	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3307		bargs = &bctl->meta;
3308
3309	/* profiles filter */
3310	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3311	    chunk_profiles_filter(chunk_type, bargs)) {
3312		return 0;
3313	}
3314
3315	/* usage filter */
3316	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3317	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3318		return 0;
3319	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3320	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3321		return 0;
3322	}
3323
3324	/* devid filter */
3325	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3326	    chunk_devid_filter(leaf, chunk, bargs)) {
3327		return 0;
3328	}
3329
3330	/* drange filter, makes sense only with devid filter */
3331	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3332	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3333		return 0;
3334	}
3335
3336	/* vrange filter */
3337	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3338	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3339		return 0;
3340	}
3341
3342	/* stripes filter */
3343	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3344	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3345		return 0;
3346	}
3347
3348	/* soft profile changing mode */
3349	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3350	    chunk_soft_convert_filter(chunk_type, bargs)) {
3351		return 0;
3352	}
3353
3354	/*
3355	 * limited by count, must be the last filter
3356	 */
3357	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3358		if (bargs->limit == 0)
3359			return 0;
3360		else
3361			bargs->limit--;
3362	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3363		/*
3364		 * Same logic as the 'limit' filter; the minimum cannot be
3365		 * determined here because we do not have the global informatoin
3366		 * about the count of all chunks that satisfy the filters.
3367		 */
3368		if (bargs->limit_max == 0)
3369			return 0;
3370		else
3371			bargs->limit_max--;
3372	}
3373
3374	return 1;
3375}
3376
 
 
 
 
 
 
 
 
 
3377static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3378{
3379	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3380	struct btrfs_root *chunk_root = fs_info->chunk_root;
3381	struct btrfs_root *dev_root = fs_info->dev_root;
3382	struct list_head *devices;
3383	struct btrfs_device *device;
3384	u64 old_size;
3385	u64 size_to_free;
3386	u64 chunk_type;
3387	struct btrfs_chunk *chunk;
3388	struct btrfs_path *path;
3389	struct btrfs_key key;
3390	struct btrfs_key found_key;
3391	struct btrfs_trans_handle *trans;
3392	struct extent_buffer *leaf;
3393	int slot;
3394	int ret;
3395	int enospc_errors = 0;
3396	bool counting = true;
3397	/* The single value limit and min/max limits use the same bytes in the */
3398	u64 limit_data = bctl->data.limit;
3399	u64 limit_meta = bctl->meta.limit;
3400	u64 limit_sys = bctl->sys.limit;
3401	u32 count_data = 0;
3402	u32 count_meta = 0;
3403	u32 count_sys = 0;
3404	int chunk_reserved = 0;
3405
3406	/* step one make some room on all the devices */
3407	devices = &fs_info->fs_devices->devices;
3408	list_for_each_entry(device, devices, dev_list) {
3409		old_size = btrfs_device_get_total_bytes(device);
3410		size_to_free = div_factor(old_size, 1);
3411		size_to_free = min_t(u64, size_to_free, SZ_1M);
3412		if (!device->writeable ||
3413		    btrfs_device_get_total_bytes(device) -
3414		    btrfs_device_get_bytes_used(device) > size_to_free ||
3415		    device->is_tgtdev_for_dev_replace)
3416			continue;
3417
3418		ret = btrfs_shrink_device(device, old_size - size_to_free);
3419		if (ret == -ENOSPC)
3420			break;
3421		BUG_ON(ret);
3422
3423		trans = btrfs_start_transaction(dev_root, 0);
3424		BUG_ON(IS_ERR(trans));
3425
3426		ret = btrfs_grow_device(trans, device, old_size);
3427		BUG_ON(ret);
3428
3429		btrfs_end_transaction(trans, dev_root);
3430	}
3431
3432	/* step two, relocate all the chunks */
3433	path = btrfs_alloc_path();
3434	if (!path) {
3435		ret = -ENOMEM;
3436		goto error;
3437	}
3438
3439	/* zero out stat counters */
3440	spin_lock(&fs_info->balance_lock);
3441	memset(&bctl->stat, 0, sizeof(bctl->stat));
3442	spin_unlock(&fs_info->balance_lock);
3443again:
3444	if (!counting) {
3445		/*
3446		 * The single value limit and min/max limits use the same bytes
3447		 * in the
3448		 */
3449		bctl->data.limit = limit_data;
3450		bctl->meta.limit = limit_meta;
3451		bctl->sys.limit = limit_sys;
3452	}
3453	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3454	key.offset = (u64)-1;
3455	key.type = BTRFS_CHUNK_ITEM_KEY;
3456
3457	while (1) {
3458		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3459		    atomic_read(&fs_info->balance_cancel_req)) {
3460			ret = -ECANCELED;
3461			goto error;
3462		}
3463
3464		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3465		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3466		if (ret < 0) {
3467			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3468			goto error;
3469		}
3470
3471		/*
3472		 * this shouldn't happen, it means the last relocate
3473		 * failed
3474		 */
3475		if (ret == 0)
3476			BUG(); /* FIXME break ? */
3477
3478		ret = btrfs_previous_item(chunk_root, path, 0,
3479					  BTRFS_CHUNK_ITEM_KEY);
3480		if (ret) {
3481			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3482			ret = 0;
3483			break;
3484		}
3485
3486		leaf = path->nodes[0];
3487		slot = path->slots[0];
3488		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3489
3490		if (found_key.objectid != key.objectid) {
3491			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 
 
 
3492			break;
3493		}
3494
3495		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3496		chunk_type = btrfs_chunk_type(leaf, chunk);
3497
3498		if (!counting) {
3499			spin_lock(&fs_info->balance_lock);
3500			bctl->stat.considered++;
3501			spin_unlock(&fs_info->balance_lock);
3502		}
3503
3504		ret = should_balance_chunk(chunk_root, leaf, chunk,
3505					   found_key.offset);
3506
3507		btrfs_release_path(path);
3508		if (!ret) {
3509			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3510			goto loop;
3511		}
3512
3513		if (counting) {
3514			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3515			spin_lock(&fs_info->balance_lock);
3516			bctl->stat.expected++;
3517			spin_unlock(&fs_info->balance_lock);
3518
3519			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3520				count_data++;
3521			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3522				count_sys++;
3523			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3524				count_meta++;
3525
3526			goto loop;
3527		}
3528
3529		/*
3530		 * Apply limit_min filter, no need to check if the LIMITS
3531		 * filter is used, limit_min is 0 by default
3532		 */
3533		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3534					count_data < bctl->data.limit_min)
3535				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3536					count_meta < bctl->meta.limit_min)
3537				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3538					count_sys < bctl->sys.limit_min)) {
3539			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3540			goto loop;
3541		}
3542
3543		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
3544			trans = btrfs_start_transaction(chunk_root, 0);
3545			if (IS_ERR(trans)) {
3546				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3547				ret = PTR_ERR(trans);
3548				goto error;
3549			}
3550
3551			ret = btrfs_force_chunk_alloc(trans, chunk_root,
3552						      BTRFS_BLOCK_GROUP_DATA);
3553			btrfs_end_transaction(trans, chunk_root);
3554			if (ret < 0) {
3555				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3556				goto error;
3557			}
3558			chunk_reserved = 1;
3559		}
3560
3561		ret = btrfs_relocate_chunk(chunk_root,
 
 
3562					   found_key.offset);
3563		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3564		if (ret && ret != -ENOSPC)
3565			goto error;
3566		if (ret == -ENOSPC) {
3567			enospc_errors++;
3568		} else {
3569			spin_lock(&fs_info->balance_lock);
3570			bctl->stat.completed++;
3571			spin_unlock(&fs_info->balance_lock);
3572		}
3573loop:
3574		if (found_key.offset == 0)
3575			break;
3576		key.offset = found_key.offset - 1;
3577	}
3578
3579	if (counting) {
3580		btrfs_release_path(path);
3581		counting = false;
3582		goto again;
3583	}
3584error:
3585	btrfs_free_path(path);
3586	if (enospc_errors) {
3587		btrfs_info(fs_info, "%d enospc errors during balance",
3588		       enospc_errors);
3589		if (!ret)
3590			ret = -ENOSPC;
3591	}
3592
3593	return ret;
3594}
3595
3596/**
3597 * alloc_profile_is_valid - see if a given profile is valid and reduced
3598 * @flags: profile to validate
3599 * @extended: if true @flags is treated as an extended profile
3600 */
3601static int alloc_profile_is_valid(u64 flags, int extended)
3602{
3603	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3604			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3605
3606	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3607
3608	/* 1) check that all other bits are zeroed */
3609	if (flags & ~mask)
3610		return 0;
3611
3612	/* 2) see if profile is reduced */
3613	if (flags == 0)
3614		return !extended; /* "0" is valid for usual profiles */
3615
3616	/* true if exactly one bit set */
3617	return (flags & (flags - 1)) == 0;
3618}
3619
3620static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3621{
3622	/* cancel requested || normal exit path */
3623	return atomic_read(&fs_info->balance_cancel_req) ||
3624		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3625		 atomic_read(&fs_info->balance_cancel_req) == 0);
3626}
3627
3628static void __cancel_balance(struct btrfs_fs_info *fs_info)
3629{
3630	int ret;
3631
3632	unset_balance_control(fs_info);
3633	ret = del_balance_item(fs_info->tree_root);
3634	if (ret)
3635		btrfs_std_error(fs_info, ret, NULL);
3636
3637	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3638}
3639
3640/* Non-zero return value signifies invalidity */
3641static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3642		u64 allowed)
3643{
3644	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3645		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3646		 (bctl_arg->target & ~allowed)));
3647}
3648
3649/*
3650 * Should be called with both balance and volume mutexes held
3651 */
3652int btrfs_balance(struct btrfs_balance_control *bctl,
3653		  struct btrfs_ioctl_balance_args *bargs)
3654{
3655	struct btrfs_fs_info *fs_info = bctl->fs_info;
3656	u64 allowed;
3657	int mixed = 0;
3658	int ret;
3659	u64 num_devices;
3660	unsigned seq;
3661
3662	if (btrfs_fs_closing(fs_info) ||
3663	    atomic_read(&fs_info->balance_pause_req) ||
3664	    atomic_read(&fs_info->balance_cancel_req)) {
3665		ret = -EINVAL;
3666		goto out;
3667	}
3668
3669	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3670	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3671		mixed = 1;
3672
3673	/*
3674	 * In case of mixed groups both data and meta should be picked,
3675	 * and identical options should be given for both of them.
3676	 */
3677	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3678	if (mixed && (bctl->flags & allowed)) {
3679		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3680		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3681		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3682			btrfs_err(fs_info, "with mixed groups data and "
3683				   "metadata balance options must be the same");
3684			ret = -EINVAL;
3685			goto out;
3686		}
3687	}
3688
3689	num_devices = fs_info->fs_devices->num_devices;
3690	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3691	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3692		BUG_ON(num_devices < 1);
3693		num_devices--;
3694	}
3695	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3696	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3697	if (num_devices == 1)
3698		allowed |= BTRFS_BLOCK_GROUP_DUP;
3699	else if (num_devices > 1)
3700		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3701	if (num_devices > 2)
3702		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3703	if (num_devices > 3)
3704		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3705			    BTRFS_BLOCK_GROUP_RAID6);
3706	if (validate_convert_profile(&bctl->data, allowed)) {
3707		btrfs_err(fs_info, "unable to start balance with target "
3708			   "data profile %llu",
3709		       bctl->data.target);
 
3710		ret = -EINVAL;
3711		goto out;
3712	}
3713	if (validate_convert_profile(&bctl->meta, allowed)) {
3714		btrfs_err(fs_info,
3715			   "unable to start balance with target metadata profile %llu",
3716		       bctl->meta.target);
 
 
3717		ret = -EINVAL;
3718		goto out;
3719	}
3720	if (validate_convert_profile(&bctl->sys, allowed)) {
3721		btrfs_err(fs_info,
3722			   "unable to start balance with target system profile %llu",
3723		       bctl->sys.target);
 
 
 
 
 
 
 
 
 
 
3724		ret = -EINVAL;
3725		goto out;
3726	}
3727
3728	/* allow to reduce meta or sys integrity only if force set */
3729	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3730			BTRFS_BLOCK_GROUP_RAID10 |
3731			BTRFS_BLOCK_GROUP_RAID5 |
3732			BTRFS_BLOCK_GROUP_RAID6;
3733	do {
3734		seq = read_seqbegin(&fs_info->profiles_lock);
3735
3736		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3737		     (fs_info->avail_system_alloc_bits & allowed) &&
3738		     !(bctl->sys.target & allowed)) ||
3739		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3740		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3741		     !(bctl->meta.target & allowed))) {
3742			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3743				btrfs_info(fs_info, "force reducing metadata integrity");
3744			} else {
3745				btrfs_err(fs_info, "balance will reduce metadata "
3746					   "integrity, use force if you want this");
3747				ret = -EINVAL;
3748				goto out;
3749			}
3750		}
3751	} while (read_seqretry(&fs_info->profiles_lock, seq));
3752
3753	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3754		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3755		btrfs_warn(fs_info,
3756	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3757			bctl->meta.target, bctl->data.target);
3758	}
3759
3760	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3761		fs_info->num_tolerated_disk_barrier_failures = min(
3762			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3763			btrfs_get_num_tolerated_disk_barrier_failures(
3764				bctl->sys.target));
3765	}
3766
3767	ret = insert_balance_item(fs_info->tree_root, bctl);
3768	if (ret && ret != -EEXIST)
3769		goto out;
3770
3771	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3772		BUG_ON(ret == -EEXIST);
3773		set_balance_control(bctl);
3774	} else {
3775		BUG_ON(ret != -EEXIST);
3776		spin_lock(&fs_info->balance_lock);
3777		update_balance_args(bctl);
3778		spin_unlock(&fs_info->balance_lock);
3779	}
3780
3781	atomic_inc(&fs_info->balance_running);
3782	mutex_unlock(&fs_info->balance_mutex);
3783
3784	ret = __btrfs_balance(fs_info);
3785
3786	mutex_lock(&fs_info->balance_mutex);
3787	atomic_dec(&fs_info->balance_running);
3788
3789	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3790		fs_info->num_tolerated_disk_barrier_failures =
3791			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3792	}
3793
3794	if (bargs) {
3795		memset(bargs, 0, sizeof(*bargs));
3796		update_ioctl_balance_args(fs_info, 0, bargs);
3797	}
3798
3799	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3800	    balance_need_close(fs_info)) {
3801		__cancel_balance(fs_info);
3802	}
3803
3804	wake_up(&fs_info->balance_wait_q);
3805
3806	return ret;
3807out:
3808	if (bctl->flags & BTRFS_BALANCE_RESUME)
3809		__cancel_balance(fs_info);
3810	else {
3811		kfree(bctl);
3812		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3813	}
3814	return ret;
3815}
3816
3817static int balance_kthread(void *data)
3818{
3819	struct btrfs_fs_info *fs_info = data;
3820	int ret = 0;
3821
3822	mutex_lock(&fs_info->volume_mutex);
3823	mutex_lock(&fs_info->balance_mutex);
3824
3825	if (fs_info->balance_ctl) {
3826		btrfs_info(fs_info, "continuing balance");
3827		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3828	}
3829
3830	mutex_unlock(&fs_info->balance_mutex);
3831	mutex_unlock(&fs_info->volume_mutex);
3832
3833	return ret;
3834}
3835
3836int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3837{
3838	struct task_struct *tsk;
3839
3840	spin_lock(&fs_info->balance_lock);
3841	if (!fs_info->balance_ctl) {
3842		spin_unlock(&fs_info->balance_lock);
3843		return 0;
3844	}
3845	spin_unlock(&fs_info->balance_lock);
3846
3847	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3848		btrfs_info(fs_info, "force skipping balance");
3849		return 0;
3850	}
3851
3852	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3853	return PTR_ERR_OR_ZERO(tsk);
 
 
 
3854}
3855
3856int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3857{
3858	struct btrfs_balance_control *bctl;
3859	struct btrfs_balance_item *item;
3860	struct btrfs_disk_balance_args disk_bargs;
3861	struct btrfs_path *path;
3862	struct extent_buffer *leaf;
3863	struct btrfs_key key;
3864	int ret;
3865
3866	path = btrfs_alloc_path();
3867	if (!path)
3868		return -ENOMEM;
3869
3870	key.objectid = BTRFS_BALANCE_OBJECTID;
3871	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3872	key.offset = 0;
3873
3874	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3875	if (ret < 0)
3876		goto out;
3877	if (ret > 0) { /* ret = -ENOENT; */
3878		ret = 0;
3879		goto out;
3880	}
3881
3882	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3883	if (!bctl) {
3884		ret = -ENOMEM;
3885		goto out;
3886	}
3887
3888	leaf = path->nodes[0];
3889	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3890
3891	bctl->fs_info = fs_info;
3892	bctl->flags = btrfs_balance_flags(leaf, item);
3893	bctl->flags |= BTRFS_BALANCE_RESUME;
3894
3895	btrfs_balance_data(leaf, item, &disk_bargs);
3896	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3897	btrfs_balance_meta(leaf, item, &disk_bargs);
3898	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3899	btrfs_balance_sys(leaf, item, &disk_bargs);
3900	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3901
3902	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3903
3904	mutex_lock(&fs_info->volume_mutex);
3905	mutex_lock(&fs_info->balance_mutex);
3906
3907	set_balance_control(bctl);
3908
3909	mutex_unlock(&fs_info->balance_mutex);
3910	mutex_unlock(&fs_info->volume_mutex);
3911out:
3912	btrfs_free_path(path);
3913	return ret;
3914}
3915
3916int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3917{
3918	int ret = 0;
3919
3920	mutex_lock(&fs_info->balance_mutex);
3921	if (!fs_info->balance_ctl) {
3922		mutex_unlock(&fs_info->balance_mutex);
3923		return -ENOTCONN;
3924	}
3925
3926	if (atomic_read(&fs_info->balance_running)) {
3927		atomic_inc(&fs_info->balance_pause_req);
3928		mutex_unlock(&fs_info->balance_mutex);
3929
3930		wait_event(fs_info->balance_wait_q,
3931			   atomic_read(&fs_info->balance_running) == 0);
3932
3933		mutex_lock(&fs_info->balance_mutex);
3934		/* we are good with balance_ctl ripped off from under us */
3935		BUG_ON(atomic_read(&fs_info->balance_running));
3936		atomic_dec(&fs_info->balance_pause_req);
3937	} else {
3938		ret = -ENOTCONN;
3939	}
3940
3941	mutex_unlock(&fs_info->balance_mutex);
3942	return ret;
3943}
3944
3945int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3946{
3947	if (fs_info->sb->s_flags & MS_RDONLY)
3948		return -EROFS;
3949
3950	mutex_lock(&fs_info->balance_mutex);
3951	if (!fs_info->balance_ctl) {
3952		mutex_unlock(&fs_info->balance_mutex);
3953		return -ENOTCONN;
3954	}
3955
3956	atomic_inc(&fs_info->balance_cancel_req);
3957	/*
3958	 * if we are running just wait and return, balance item is
3959	 * deleted in btrfs_balance in this case
3960	 */
3961	if (atomic_read(&fs_info->balance_running)) {
3962		mutex_unlock(&fs_info->balance_mutex);
3963		wait_event(fs_info->balance_wait_q,
3964			   atomic_read(&fs_info->balance_running) == 0);
3965		mutex_lock(&fs_info->balance_mutex);
3966	} else {
3967		/* __cancel_balance needs volume_mutex */
3968		mutex_unlock(&fs_info->balance_mutex);
3969		mutex_lock(&fs_info->volume_mutex);
3970		mutex_lock(&fs_info->balance_mutex);
3971
3972		if (fs_info->balance_ctl)
3973			__cancel_balance(fs_info);
3974
3975		mutex_unlock(&fs_info->volume_mutex);
3976	}
3977
3978	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3979	atomic_dec(&fs_info->balance_cancel_req);
3980	mutex_unlock(&fs_info->balance_mutex);
3981	return 0;
3982}
3983
3984static int btrfs_uuid_scan_kthread(void *data)
3985{
3986	struct btrfs_fs_info *fs_info = data;
3987	struct btrfs_root *root = fs_info->tree_root;
3988	struct btrfs_key key;
3989	struct btrfs_key max_key;
3990	struct btrfs_path *path = NULL;
3991	int ret = 0;
3992	struct extent_buffer *eb;
3993	int slot;
3994	struct btrfs_root_item root_item;
3995	u32 item_size;
3996	struct btrfs_trans_handle *trans = NULL;
3997
3998	path = btrfs_alloc_path();
3999	if (!path) {
4000		ret = -ENOMEM;
4001		goto out;
4002	}
4003
4004	key.objectid = 0;
4005	key.type = BTRFS_ROOT_ITEM_KEY;
4006	key.offset = 0;
4007
4008	max_key.objectid = (u64)-1;
4009	max_key.type = BTRFS_ROOT_ITEM_KEY;
4010	max_key.offset = (u64)-1;
4011
4012	while (1) {
4013		ret = btrfs_search_forward(root, &key, path, 0);
4014		if (ret) {
4015			if (ret > 0)
4016				ret = 0;
4017			break;
4018		}
4019
4020		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4021		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4022		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4023		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4024			goto skip;
4025
4026		eb = path->nodes[0];
4027		slot = path->slots[0];
4028		item_size = btrfs_item_size_nr(eb, slot);
4029		if (item_size < sizeof(root_item))
4030			goto skip;
4031
4032		read_extent_buffer(eb, &root_item,
4033				   btrfs_item_ptr_offset(eb, slot),
4034				   (int)sizeof(root_item));
4035		if (btrfs_root_refs(&root_item) == 0)
4036			goto skip;
4037
4038		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4039		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4040			if (trans)
4041				goto update_tree;
4042
4043			btrfs_release_path(path);
4044			/*
4045			 * 1 - subvol uuid item
4046			 * 1 - received_subvol uuid item
4047			 */
4048			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4049			if (IS_ERR(trans)) {
4050				ret = PTR_ERR(trans);
4051				break;
4052			}
4053			continue;
4054		} else {
4055			goto skip;
4056		}
4057update_tree:
4058		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4059			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4060						  root_item.uuid,
4061						  BTRFS_UUID_KEY_SUBVOL,
4062						  key.objectid);
4063			if (ret < 0) {
4064				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4065					ret);
4066				break;
4067			}
4068		}
4069
4070		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4071			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4072						  root_item.received_uuid,
4073						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4074						  key.objectid);
4075			if (ret < 0) {
4076				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4077					ret);
4078				break;
4079			}
4080		}
4081
4082skip:
4083		if (trans) {
4084			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4085			trans = NULL;
4086			if (ret)
4087				break;
4088		}
4089
4090		btrfs_release_path(path);
4091		if (key.offset < (u64)-1) {
4092			key.offset++;
4093		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4094			key.offset = 0;
4095			key.type = BTRFS_ROOT_ITEM_KEY;
4096		} else if (key.objectid < (u64)-1) {
4097			key.offset = 0;
4098			key.type = BTRFS_ROOT_ITEM_KEY;
4099			key.objectid++;
4100		} else {
4101			break;
4102		}
4103		cond_resched();
4104	}
4105
4106out:
4107	btrfs_free_path(path);
4108	if (trans && !IS_ERR(trans))
4109		btrfs_end_transaction(trans, fs_info->uuid_root);
4110	if (ret)
4111		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4112	else
4113		fs_info->update_uuid_tree_gen = 1;
4114	up(&fs_info->uuid_tree_rescan_sem);
4115	return 0;
4116}
4117
4118/*
4119 * Callback for btrfs_uuid_tree_iterate().
4120 * returns:
4121 * 0	check succeeded, the entry is not outdated.
4122 * < 0	if an error occurred.
4123 * > 0	if the check failed, which means the caller shall remove the entry.
4124 */
4125static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4126				       u8 *uuid, u8 type, u64 subid)
4127{
4128	struct btrfs_key key;
4129	int ret = 0;
4130	struct btrfs_root *subvol_root;
4131
4132	if (type != BTRFS_UUID_KEY_SUBVOL &&
4133	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4134		goto out;
4135
4136	key.objectid = subid;
4137	key.type = BTRFS_ROOT_ITEM_KEY;
4138	key.offset = (u64)-1;
4139	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4140	if (IS_ERR(subvol_root)) {
4141		ret = PTR_ERR(subvol_root);
4142		if (ret == -ENOENT)
4143			ret = 1;
4144		goto out;
4145	}
4146
4147	switch (type) {
4148	case BTRFS_UUID_KEY_SUBVOL:
4149		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4150			ret = 1;
4151		break;
4152	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4153		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4154			   BTRFS_UUID_SIZE))
4155			ret = 1;
4156		break;
4157	}
4158
4159out:
4160	return ret;
4161}
4162
4163static int btrfs_uuid_rescan_kthread(void *data)
4164{
4165	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4166	int ret;
4167
4168	/*
4169	 * 1st step is to iterate through the existing UUID tree and
4170	 * to delete all entries that contain outdated data.
4171	 * 2nd step is to add all missing entries to the UUID tree.
4172	 */
4173	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4174	if (ret < 0) {
4175		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4176		up(&fs_info->uuid_tree_rescan_sem);
4177		return ret;
4178	}
4179	return btrfs_uuid_scan_kthread(data);
4180}
4181
4182int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4183{
4184	struct btrfs_trans_handle *trans;
4185	struct btrfs_root *tree_root = fs_info->tree_root;
4186	struct btrfs_root *uuid_root;
4187	struct task_struct *task;
4188	int ret;
4189
4190	/*
4191	 * 1 - root node
4192	 * 1 - root item
4193	 */
4194	trans = btrfs_start_transaction(tree_root, 2);
4195	if (IS_ERR(trans))
4196		return PTR_ERR(trans);
4197
4198	uuid_root = btrfs_create_tree(trans, fs_info,
4199				      BTRFS_UUID_TREE_OBJECTID);
4200	if (IS_ERR(uuid_root)) {
4201		ret = PTR_ERR(uuid_root);
4202		btrfs_abort_transaction(trans, tree_root, ret);
4203		return ret;
4204	}
4205
4206	fs_info->uuid_root = uuid_root;
4207
4208	ret = btrfs_commit_transaction(trans, tree_root);
4209	if (ret)
4210		return ret;
4211
4212	down(&fs_info->uuid_tree_rescan_sem);
4213	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4214	if (IS_ERR(task)) {
4215		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4216		btrfs_warn(fs_info, "failed to start uuid_scan task");
4217		up(&fs_info->uuid_tree_rescan_sem);
4218		return PTR_ERR(task);
4219	}
4220
4221	return 0;
4222}
4223
4224int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4225{
4226	struct task_struct *task;
4227
4228	down(&fs_info->uuid_tree_rescan_sem);
4229	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4230	if (IS_ERR(task)) {
4231		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4232		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4233		up(&fs_info->uuid_tree_rescan_sem);
4234		return PTR_ERR(task);
4235	}
4236
4237	return 0;
4238}
4239
4240/*
4241 * shrinking a device means finding all of the device extents past
4242 * the new size, and then following the back refs to the chunks.
4243 * The chunk relocation code actually frees the device extent
4244 */
4245int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4246{
4247	struct btrfs_trans_handle *trans;
4248	struct btrfs_root *root = device->dev_root;
4249	struct btrfs_dev_extent *dev_extent = NULL;
4250	struct btrfs_path *path;
4251	u64 length;
 
 
4252	u64 chunk_offset;
4253	int ret;
4254	int slot;
4255	int failed = 0;
4256	bool retried = false;
4257	bool checked_pending_chunks = false;
4258	struct extent_buffer *l;
4259	struct btrfs_key key;
4260	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4261	u64 old_total = btrfs_super_total_bytes(super_copy);
4262	u64 old_size = btrfs_device_get_total_bytes(device);
4263	u64 diff = old_size - new_size;
4264
4265	if (device->is_tgtdev_for_dev_replace)
4266		return -EINVAL;
4267
4268	path = btrfs_alloc_path();
4269	if (!path)
4270		return -ENOMEM;
4271
4272	path->reada = READA_FORWARD;
4273
4274	lock_chunks(root);
4275
4276	btrfs_device_set_total_bytes(device, new_size);
4277	if (device->writeable) {
4278		device->fs_devices->total_rw_bytes -= diff;
4279		spin_lock(&root->fs_info->free_chunk_lock);
4280		root->fs_info->free_chunk_space -= diff;
4281		spin_unlock(&root->fs_info->free_chunk_lock);
4282	}
4283	unlock_chunks(root);
4284
4285again:
4286	key.objectid = device->devid;
4287	key.offset = (u64)-1;
4288	key.type = BTRFS_DEV_EXTENT_KEY;
4289
4290	do {
4291		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4292		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4293		if (ret < 0) {
4294			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4295			goto done;
4296		}
4297
4298		ret = btrfs_previous_item(root, path, 0, key.type);
4299		if (ret)
4300			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4301		if (ret < 0)
4302			goto done;
4303		if (ret) {
4304			ret = 0;
4305			btrfs_release_path(path);
4306			break;
4307		}
4308
4309		l = path->nodes[0];
4310		slot = path->slots[0];
4311		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4312
4313		if (key.objectid != device->devid) {
4314			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4315			btrfs_release_path(path);
4316			break;
4317		}
4318
4319		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4320		length = btrfs_dev_extent_length(l, dev_extent);
4321
4322		if (key.offset + length <= new_size) {
4323			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4324			btrfs_release_path(path);
4325			break;
4326		}
4327
 
 
4328		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4329		btrfs_release_path(path);
4330
4331		ret = btrfs_relocate_chunk(root, chunk_offset);
4332		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4333		if (ret && ret != -ENOSPC)
4334			goto done;
4335		if (ret == -ENOSPC)
4336			failed++;
4337	} while (key.offset-- > 0);
4338
4339	if (failed && !retried) {
4340		failed = 0;
4341		retried = true;
4342		goto again;
4343	} else if (failed && retried) {
4344		ret = -ENOSPC;
 
 
 
 
 
 
 
 
 
4345		goto done;
4346	}
4347
4348	/* Shrinking succeeded, else we would be at "done". */
4349	trans = btrfs_start_transaction(root, 0);
4350	if (IS_ERR(trans)) {
4351		ret = PTR_ERR(trans);
4352		goto done;
4353	}
4354
4355	lock_chunks(root);
4356
4357	/*
4358	 * We checked in the above loop all device extents that were already in
4359	 * the device tree. However before we have updated the device's
4360	 * total_bytes to the new size, we might have had chunk allocations that
4361	 * have not complete yet (new block groups attached to transaction
4362	 * handles), and therefore their device extents were not yet in the
4363	 * device tree and we missed them in the loop above. So if we have any
4364	 * pending chunk using a device extent that overlaps the device range
4365	 * that we can not use anymore, commit the current transaction and
4366	 * repeat the search on the device tree - this way we guarantee we will
4367	 * not have chunks using device extents that end beyond 'new_size'.
4368	 */
4369	if (!checked_pending_chunks) {
4370		u64 start = new_size;
4371		u64 len = old_size - new_size;
4372
4373		if (contains_pending_extent(trans->transaction, device,
4374					    &start, len)) {
4375			unlock_chunks(root);
4376			checked_pending_chunks = true;
4377			failed = 0;
4378			retried = false;
4379			ret = btrfs_commit_transaction(trans, root);
4380			if (ret)
4381				goto done;
4382			goto again;
4383		}
4384	}
4385
4386	btrfs_device_set_disk_total_bytes(device, new_size);
4387	if (list_empty(&device->resized_list))
4388		list_add_tail(&device->resized_list,
4389			      &root->fs_info->fs_devices->resized_devices);
4390
4391	WARN_ON(diff > old_total);
4392	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4393	unlock_chunks(root);
4394
4395	/* Now btrfs_update_device() will change the on-disk size. */
4396	ret = btrfs_update_device(trans, device);
4397	btrfs_end_transaction(trans, root);
4398done:
4399	btrfs_free_path(path);
4400	if (ret) {
4401		lock_chunks(root);
4402		btrfs_device_set_total_bytes(device, old_size);
4403		if (device->writeable)
4404			device->fs_devices->total_rw_bytes += diff;
4405		spin_lock(&root->fs_info->free_chunk_lock);
4406		root->fs_info->free_chunk_space += diff;
4407		spin_unlock(&root->fs_info->free_chunk_lock);
4408		unlock_chunks(root);
4409	}
4410	return ret;
4411}
4412
4413static int btrfs_add_system_chunk(struct btrfs_root *root,
4414			   struct btrfs_key *key,
4415			   struct btrfs_chunk *chunk, int item_size)
4416{
4417	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4418	struct btrfs_disk_key disk_key;
4419	u32 array_size;
4420	u8 *ptr;
4421
4422	lock_chunks(root);
4423	array_size = btrfs_super_sys_array_size(super_copy);
4424	if (array_size + item_size + sizeof(disk_key)
4425			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4426		unlock_chunks(root);
4427		return -EFBIG;
4428	}
4429
4430	ptr = super_copy->sys_chunk_array + array_size;
4431	btrfs_cpu_key_to_disk(&disk_key, key);
4432	memcpy(ptr, &disk_key, sizeof(disk_key));
4433	ptr += sizeof(disk_key);
4434	memcpy(ptr, chunk, item_size);
4435	item_size += sizeof(disk_key);
4436	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4437	unlock_chunks(root);
4438
4439	return 0;
4440}
4441
4442/*
4443 * sort the devices in descending order by max_avail, total_avail
4444 */
4445static int btrfs_cmp_device_info(const void *a, const void *b)
4446{
4447	const struct btrfs_device_info *di_a = a;
4448	const struct btrfs_device_info *di_b = b;
4449
4450	if (di_a->max_avail > di_b->max_avail)
4451		return -1;
4452	if (di_a->max_avail < di_b->max_avail)
4453		return 1;
4454	if (di_a->total_avail > di_b->total_avail)
4455		return -1;
4456	if (di_a->total_avail < di_b->total_avail)
4457		return 1;
4458	return 0;
4459}
4460
4461static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4462{
4463	/* TODO allow them to set a preferred stripe size */
4464	return SZ_64K;
4465}
4466
4467static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4468{
4469	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4470		return;
4471
4472	btrfs_set_fs_incompat(info, RAID56);
4473}
4474
4475#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4476			- sizeof(struct btrfs_item)		\
4477			- sizeof(struct btrfs_chunk))		\
4478			/ sizeof(struct btrfs_stripe) + 1)
4479
4480#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4481				- 2 * sizeof(struct btrfs_disk_key)	\
4482				- 2 * sizeof(struct btrfs_chunk))	\
4483				/ sizeof(struct btrfs_stripe) + 1)
4484
4485static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4486			       struct btrfs_root *extent_root, u64 start,
4487			       u64 type)
 
 
4488{
4489	struct btrfs_fs_info *info = extent_root->fs_info;
4490	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4491	struct list_head *cur;
4492	struct map_lookup *map = NULL;
4493	struct extent_map_tree *em_tree;
4494	struct extent_map *em;
4495	struct btrfs_device_info *devices_info = NULL;
4496	u64 total_avail;
4497	int num_stripes;	/* total number of stripes to allocate */
4498	int data_stripes;	/* number of stripes that count for
4499				   block group size */
4500	int sub_stripes;	/* sub_stripes info for map */
4501	int dev_stripes;	/* stripes per dev */
4502	int devs_max;		/* max devs to use */
4503	int devs_min;		/* min devs needed */
4504	int devs_increment;	/* ndevs has to be a multiple of this */
4505	int ncopies;		/* how many copies to data has */
4506	int ret;
4507	u64 max_stripe_size;
4508	u64 max_chunk_size;
4509	u64 stripe_size;
4510	u64 num_bytes;
4511	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4512	int ndevs;
4513	int i;
4514	int j;
4515	int index;
4516
4517	BUG_ON(!alloc_profile_is_valid(type, 0));
4518
4519	if (list_empty(&fs_devices->alloc_list))
4520		return -ENOSPC;
4521
4522	index = __get_raid_index(type);
 
 
 
 
 
4523
4524	sub_stripes = btrfs_raid_array[index].sub_stripes;
4525	dev_stripes = btrfs_raid_array[index].dev_stripes;
4526	devs_max = btrfs_raid_array[index].devs_max;
4527	devs_min = btrfs_raid_array[index].devs_min;
4528	devs_increment = btrfs_raid_array[index].devs_increment;
4529	ncopies = btrfs_raid_array[index].ncopies;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4530
4531	if (type & BTRFS_BLOCK_GROUP_DATA) {
4532		max_stripe_size = SZ_1G;
4533		max_chunk_size = 10 * max_stripe_size;
4534		if (!devs_max)
4535			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4536	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4537		/* for larger filesystems, use larger metadata chunks */
4538		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4539			max_stripe_size = SZ_1G;
4540		else
4541			max_stripe_size = SZ_256M;
4542		max_chunk_size = max_stripe_size;
4543		if (!devs_max)
4544			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4545	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4546		max_stripe_size = SZ_32M;
4547		max_chunk_size = 2 * max_stripe_size;
4548		if (!devs_max)
4549			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4550	} else {
4551		btrfs_err(info, "invalid chunk type 0x%llx requested",
4552		       type);
4553		BUG_ON(1);
4554	}
4555
4556	/* we don't want a chunk larger than 10% of writeable space */
4557	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4558			     max_chunk_size);
4559
4560	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4561			       GFP_NOFS);
4562	if (!devices_info)
4563		return -ENOMEM;
4564
4565	cur = fs_devices->alloc_list.next;
4566
4567	/*
4568	 * in the first pass through the devices list, we gather information
4569	 * about the available holes on each device.
4570	 */
4571	ndevs = 0;
4572	while (cur != &fs_devices->alloc_list) {
4573		struct btrfs_device *device;
4574		u64 max_avail;
4575		u64 dev_offset;
4576
4577		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4578
4579		cur = cur->next;
4580
4581		if (!device->writeable) {
4582			WARN(1, KERN_ERR
4583			       "BTRFS: read-only device in alloc_list\n");
 
4584			continue;
4585		}
4586
4587		if (!device->in_fs_metadata ||
4588		    device->is_tgtdev_for_dev_replace)
4589			continue;
4590
4591		if (device->total_bytes > device->bytes_used)
4592			total_avail = device->total_bytes - device->bytes_used;
4593		else
4594			total_avail = 0;
4595
4596		/* If there is no space on this device, skip it. */
4597		if (total_avail == 0)
4598			continue;
4599
4600		ret = find_free_dev_extent(trans, device,
4601					   max_stripe_size * dev_stripes,
4602					   &dev_offset, &max_avail);
4603		if (ret && ret != -ENOSPC)
4604			goto error;
4605
4606		if (ret == 0)
4607			max_avail = max_stripe_size * dev_stripes;
4608
4609		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4610			continue;
4611
4612		if (ndevs == fs_devices->rw_devices) {
4613			WARN(1, "%s: found more than %llu devices\n",
4614			     __func__, fs_devices->rw_devices);
4615			break;
4616		}
4617		devices_info[ndevs].dev_offset = dev_offset;
4618		devices_info[ndevs].max_avail = max_avail;
4619		devices_info[ndevs].total_avail = total_avail;
4620		devices_info[ndevs].dev = device;
4621		++ndevs;
4622	}
4623
4624	/*
4625	 * now sort the devices by hole size / available space
4626	 */
4627	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4628	     btrfs_cmp_device_info, NULL);
4629
4630	/* round down to number of usable stripes */
4631	ndevs -= ndevs % devs_increment;
4632
4633	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4634		ret = -ENOSPC;
4635		goto error;
4636	}
4637
4638	if (devs_max && ndevs > devs_max)
4639		ndevs = devs_max;
4640	/*
4641	 * the primary goal is to maximize the number of stripes, so use as many
4642	 * devices as possible, even if the stripes are not maximum sized.
4643	 */
4644	stripe_size = devices_info[ndevs-1].max_avail;
4645	num_stripes = ndevs * dev_stripes;
4646
4647	/*
4648	 * this will have to be fixed for RAID1 and RAID10 over
4649	 * more drives
4650	 */
4651	data_stripes = num_stripes / ncopies;
4652
4653	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4654		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4655				 btrfs_super_stripesize(info->super_copy));
4656		data_stripes = num_stripes - 1;
4657	}
4658	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4659		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4660				 btrfs_super_stripesize(info->super_copy));
4661		data_stripes = num_stripes - 2;
4662	}
4663
4664	/*
4665	 * Use the number of data stripes to figure out how big this chunk
4666	 * is really going to be in terms of logical address space,
4667	 * and compare that answer with the max chunk size
4668	 */
4669	if (stripe_size * data_stripes > max_chunk_size) {
4670		u64 mask = (1ULL << 24) - 1;
4671
4672		stripe_size = div_u64(max_chunk_size, data_stripes);
4673
4674		/* bump the answer up to a 16MB boundary */
4675		stripe_size = (stripe_size + mask) & ~mask;
4676
4677		/* but don't go higher than the limits we found
4678		 * while searching for free extents
4679		 */
4680		if (stripe_size > devices_info[ndevs-1].max_avail)
4681			stripe_size = devices_info[ndevs-1].max_avail;
4682	}
4683
4684	stripe_size = div_u64(stripe_size, dev_stripes);
4685
4686	/* align to BTRFS_STRIPE_LEN */
4687	stripe_size = div_u64(stripe_size, raid_stripe_len);
4688	stripe_size *= raid_stripe_len;
4689
4690	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4691	if (!map) {
4692		ret = -ENOMEM;
4693		goto error;
4694	}
4695	map->num_stripes = num_stripes;
4696
4697	for (i = 0; i < ndevs; ++i) {
4698		for (j = 0; j < dev_stripes; ++j) {
4699			int s = i * dev_stripes + j;
4700			map->stripes[s].dev = devices_info[i].dev;
4701			map->stripes[s].physical = devices_info[i].dev_offset +
4702						   j * stripe_size;
4703		}
4704	}
4705	map->sector_size = extent_root->sectorsize;
4706	map->stripe_len = raid_stripe_len;
4707	map->io_align = raid_stripe_len;
4708	map->io_width = raid_stripe_len;
4709	map->type = type;
4710	map->sub_stripes = sub_stripes;
4711
4712	num_bytes = stripe_size * data_stripes;
 
 
 
 
4713
4714	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4715
4716	em = alloc_extent_map();
4717	if (!em) {
4718		kfree(map);
4719		ret = -ENOMEM;
4720		goto error;
4721	}
4722	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4723	em->map_lookup = map;
4724	em->start = start;
4725	em->len = num_bytes;
4726	em->block_start = 0;
4727	em->block_len = em->len;
4728	em->orig_block_len = stripe_size;
4729
4730	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4731	write_lock(&em_tree->lock);
4732	ret = add_extent_mapping(em_tree, em, 0);
4733	if (!ret) {
4734		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4735		atomic_inc(&em->refs);
4736	}
4737	write_unlock(&em_tree->lock);
4738	if (ret) {
4739		free_extent_map(em);
4740		goto error;
4741	}
4742
4743	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4744				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4745				     start, num_bytes);
4746	if (ret)
4747		goto error_del_extent;
4748
4749	for (i = 0; i < map->num_stripes; i++) {
4750		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4751		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4752	}
4753
4754	spin_lock(&extent_root->fs_info->free_chunk_lock);
4755	extent_root->fs_info->free_chunk_space -= (stripe_size *
4756						   map->num_stripes);
4757	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4758
4759	free_extent_map(em);
4760	check_raid56_incompat_flag(extent_root->fs_info, type);
 
 
 
 
 
 
 
4761
4762	kfree(devices_info);
4763	return 0;
4764
4765error_del_extent:
4766	write_lock(&em_tree->lock);
4767	remove_extent_mapping(em_tree, em);
4768	write_unlock(&em_tree->lock);
4769
4770	/* One for our allocation */
4771	free_extent_map(em);
4772	/* One for the tree reference */
4773	free_extent_map(em);
4774	/* One for the pending_chunks list reference */
4775	free_extent_map(em);
4776error:
 
4777	kfree(devices_info);
4778	return ret;
4779}
4780
4781int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4782				struct btrfs_root *extent_root,
4783				u64 chunk_offset, u64 chunk_size)
 
4784{
 
4785	struct btrfs_key key;
4786	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4787	struct btrfs_device *device;
4788	struct btrfs_chunk *chunk;
4789	struct btrfs_stripe *stripe;
4790	struct extent_map_tree *em_tree;
4791	struct extent_map *em;
4792	struct map_lookup *map;
4793	size_t item_size;
4794	u64 dev_offset;
4795	u64 stripe_size;
4796	int i = 0;
4797	int ret = 0;
4798
4799	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4800	read_lock(&em_tree->lock);
4801	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4802	read_unlock(&em_tree->lock);
4803
4804	if (!em) {
4805		btrfs_crit(extent_root->fs_info, "unable to find logical "
4806			   "%Lu len %Lu", chunk_offset, chunk_size);
4807		return -EINVAL;
4808	}
4809
4810	if (em->start != chunk_offset || em->len != chunk_size) {
4811		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4812			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4813			  chunk_size, em->start, em->len);
4814		free_extent_map(em);
4815		return -EINVAL;
4816	}
4817
4818	map = em->map_lookup;
4819	item_size = btrfs_chunk_item_size(map->num_stripes);
4820	stripe_size = em->orig_block_len;
4821
4822	chunk = kzalloc(item_size, GFP_NOFS);
4823	if (!chunk) {
4824		ret = -ENOMEM;
4825		goto out;
4826	}
4827
4828	/*
4829	 * Take the device list mutex to prevent races with the final phase of
4830	 * a device replace operation that replaces the device object associated
4831	 * with the map's stripes, because the device object's id can change
4832	 * at any time during that final phase of the device replace operation
4833	 * (dev-replace.c:btrfs_dev_replace_finishing()).
4834	 */
4835	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4836	for (i = 0; i < map->num_stripes; i++) {
4837		device = map->stripes[i].dev;
4838		dev_offset = map->stripes[i].physical;
4839
 
 
 
 
4840		ret = btrfs_update_device(trans, device);
4841		if (ret)
4842			break;
4843		ret = btrfs_alloc_dev_extent(trans, device,
4844					     chunk_root->root_key.objectid,
4845					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4846					     chunk_offset, dev_offset,
4847					     stripe_size);
4848		if (ret)
4849			break;
4850	}
4851	if (ret) {
4852		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4853		goto out;
4854	}
4855
 
 
 
 
 
 
4856	stripe = &chunk->stripe;
4857	for (i = 0; i < map->num_stripes; i++) {
4858		device = map->stripes[i].dev;
4859		dev_offset = map->stripes[i].physical;
4860
4861		btrfs_set_stack_stripe_devid(stripe, device->devid);
4862		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4863		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4864		stripe++;
 
4865	}
4866	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4867
4868	btrfs_set_stack_chunk_length(chunk, chunk_size);
4869	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4870	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4871	btrfs_set_stack_chunk_type(chunk, map->type);
4872	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4873	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4874	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4875	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4876	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4877
4878	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4879	key.type = BTRFS_CHUNK_ITEM_KEY;
4880	key.offset = chunk_offset;
4881
4882	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
 
4883	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4884		/*
4885		 * TODO: Cleanup of inserted chunk root in case of
4886		 * failure.
4887		 */
4888		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4889					     item_size);
4890	}
4891
4892out:
4893	kfree(chunk);
4894	free_extent_map(em);
4895	return ret;
4896}
4897
4898/*
4899 * Chunk allocation falls into two parts. The first part does works
4900 * that make the new allocated chunk useable, but not do any operation
4901 * that modifies the chunk tree. The second part does the works that
4902 * require modifying the chunk tree. This division is important for the
4903 * bootstrap process of adding storage to a seed btrfs.
4904 */
4905int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4906		      struct btrfs_root *extent_root, u64 type)
4907{
4908	u64 chunk_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4909
4910	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4911	chunk_offset = find_next_chunk(extent_root->fs_info);
4912	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
 
 
4913}
4914
4915static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4916					 struct btrfs_root *root,
4917					 struct btrfs_device *device)
4918{
4919	u64 chunk_offset;
4920	u64 sys_chunk_offset;
 
 
 
 
4921	u64 alloc_profile;
 
 
4922	struct btrfs_fs_info *fs_info = root->fs_info;
4923	struct btrfs_root *extent_root = fs_info->extent_root;
4924	int ret;
4925
4926	chunk_offset = find_next_chunk(fs_info);
4927	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4928	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4929				  alloc_profile);
4930	if (ret)
4931		return ret;
4932
4933	sys_chunk_offset = find_next_chunk(root->fs_info);
4934	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4935	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4936				  alloc_profile);
4937	return ret;
4938}
4939
4940static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4941{
4942	int max_errors;
 
4943
4944	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4945			 BTRFS_BLOCK_GROUP_RAID10 |
4946			 BTRFS_BLOCK_GROUP_RAID5 |
4947			 BTRFS_BLOCK_GROUP_DUP)) {
4948		max_errors = 1;
4949	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4950		max_errors = 2;
4951	} else {
4952		max_errors = 0;
4953	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4954
4955	return max_errors;
 
 
4956}
4957
4958int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4959{
4960	struct extent_map *em;
4961	struct map_lookup *map;
4962	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4963	int readonly = 0;
4964	int miss_ndevs = 0;
4965	int i;
4966
4967	read_lock(&map_tree->map_tree.lock);
4968	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4969	read_unlock(&map_tree->map_tree.lock);
4970	if (!em)
4971		return 1;
4972
4973	map = em->map_lookup;
4974	for (i = 0; i < map->num_stripes; i++) {
4975		if (map->stripes[i].dev->missing) {
4976			miss_ndevs++;
4977			continue;
4978		}
4979
 
 
4980		if (!map->stripes[i].dev->writeable) {
4981			readonly = 1;
4982			goto end;
4983		}
4984	}
4985
4986	/*
4987	 * If the number of missing devices is larger than max errors,
4988	 * we can not write the data into that chunk successfully, so
4989	 * set it readonly.
4990	 */
4991	if (miss_ndevs > btrfs_chunk_max_errors(map))
4992		readonly = 1;
4993end:
4994	free_extent_map(em);
4995	return readonly;
4996}
4997
4998void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4999{
5000	extent_map_tree_init(&tree->map_tree);
5001}
5002
5003void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5004{
5005	struct extent_map *em;
5006
5007	while (1) {
5008		write_lock(&tree->map_tree.lock);
5009		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5010		if (em)
5011			remove_extent_mapping(&tree->map_tree, em);
5012		write_unlock(&tree->map_tree.lock);
5013		if (!em)
5014			break;
 
5015		/* once for us */
5016		free_extent_map(em);
5017		/* once for the tree */
5018		free_extent_map(em);
5019	}
5020}
5021
5022int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5023{
5024	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5025	struct extent_map *em;
5026	struct map_lookup *map;
5027	struct extent_map_tree *em_tree = &map_tree->map_tree;
5028	int ret;
5029
5030	read_lock(&em_tree->lock);
5031	em = lookup_extent_mapping(em_tree, logical, len);
5032	read_unlock(&em_tree->lock);
 
5033
5034	/*
5035	 * We could return errors for these cases, but that could get ugly and
5036	 * we'd probably do the same thing which is just not do anything else
5037	 * and exit, so return 1 so the callers don't try to use other copies.
5038	 */
5039	if (!em) {
5040		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5041			    logical+len);
5042		return 1;
5043	}
5044
5045	if (em->start > logical || em->start + em->len < logical) {
5046		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5047			    "%Lu-%Lu", logical, logical+len, em->start,
5048			    em->start + em->len);
5049		free_extent_map(em);
5050		return 1;
5051	}
5052
5053	map = em->map_lookup;
5054	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5055		ret = map->num_stripes;
5056	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5057		ret = map->sub_stripes;
5058	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5059		ret = 2;
5060	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5061		ret = 3;
5062	else
5063		ret = 1;
5064	free_extent_map(em);
5065
5066	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5067	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5068		ret++;
5069	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5070
5071	return ret;
5072}
5073
5074unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5075				    struct btrfs_mapping_tree *map_tree,
5076				    u64 logical)
5077{
5078	struct extent_map *em;
5079	struct map_lookup *map;
5080	struct extent_map_tree *em_tree = &map_tree->map_tree;
5081	unsigned long len = root->sectorsize;
5082
5083	read_lock(&em_tree->lock);
5084	em = lookup_extent_mapping(em_tree, logical, len);
5085	read_unlock(&em_tree->lock);
5086	BUG_ON(!em);
5087
5088	BUG_ON(em->start > logical || em->start + em->len < logical);
5089	map = em->map_lookup;
5090	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5091		len = map->stripe_len * nr_data_stripes(map);
5092	free_extent_map(em);
5093	return len;
5094}
5095
5096int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5097			   u64 logical, u64 len, int mirror_num)
5098{
5099	struct extent_map *em;
5100	struct map_lookup *map;
5101	struct extent_map_tree *em_tree = &map_tree->map_tree;
5102	int ret = 0;
5103
5104	read_lock(&em_tree->lock);
5105	em = lookup_extent_mapping(em_tree, logical, len);
5106	read_unlock(&em_tree->lock);
5107	BUG_ON(!em);
5108
5109	BUG_ON(em->start > logical || em->start + em->len < logical);
5110	map = em->map_lookup;
5111	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5112		ret = 1;
5113	free_extent_map(em);
5114	return ret;
5115}
5116
5117static int find_live_mirror(struct btrfs_fs_info *fs_info,
5118			    struct map_lookup *map, int first, int num,
5119			    int optimal, int dev_replace_is_ongoing)
5120{
5121	int i;
5122	int tolerance;
5123	struct btrfs_device *srcdev;
5124
5125	if (dev_replace_is_ongoing &&
5126	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5127	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5128		srcdev = fs_info->dev_replace.srcdev;
5129	else
5130		srcdev = NULL;
5131
5132	/*
5133	 * try to avoid the drive that is the source drive for a
5134	 * dev-replace procedure, only choose it if no other non-missing
5135	 * mirror is available
5136	 */
5137	for (tolerance = 0; tolerance < 2; tolerance++) {
5138		if (map->stripes[optimal].dev->bdev &&
5139		    (tolerance || map->stripes[optimal].dev != srcdev))
5140			return optimal;
5141		for (i = first; i < first + num; i++) {
5142			if (map->stripes[i].dev->bdev &&
5143			    (tolerance || map->stripes[i].dev != srcdev))
5144				return i;
5145		}
5146	}
5147
5148	/* we couldn't find one that doesn't fail.  Just return something
5149	 * and the io error handling code will clean up eventually
5150	 */
5151	return optimal;
5152}
5153
5154static inline int parity_smaller(u64 a, u64 b)
5155{
5156	return a > b;
5157}
5158
5159/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5160static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5161{
5162	struct btrfs_bio_stripe s;
5163	int i;
5164	u64 l;
5165	int again = 1;
5166
5167	while (again) {
5168		again = 0;
5169		for (i = 0; i < num_stripes - 1; i++) {
5170			if (parity_smaller(bbio->raid_map[i],
5171					   bbio->raid_map[i+1])) {
5172				s = bbio->stripes[i];
5173				l = bbio->raid_map[i];
5174				bbio->stripes[i] = bbio->stripes[i+1];
5175				bbio->raid_map[i] = bbio->raid_map[i+1];
5176				bbio->stripes[i+1] = s;
5177				bbio->raid_map[i+1] = l;
5178
5179				again = 1;
5180			}
5181		}
5182	}
5183}
5184
5185static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5186{
5187	struct btrfs_bio *bbio = kzalloc(
5188		 /* the size of the btrfs_bio */
5189		sizeof(struct btrfs_bio) +
5190		/* plus the variable array for the stripes */
5191		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5192		/* plus the variable array for the tgt dev */
5193		sizeof(int) * (real_stripes) +
5194		/*
5195		 * plus the raid_map, which includes both the tgt dev
5196		 * and the stripes
5197		 */
5198		sizeof(u64) * (total_stripes),
5199		GFP_NOFS|__GFP_NOFAIL);
5200
5201	atomic_set(&bbio->error, 0);
5202	atomic_set(&bbio->refs, 1);
5203
5204	return bbio;
5205}
5206
5207void btrfs_get_bbio(struct btrfs_bio *bbio)
5208{
5209	WARN_ON(!atomic_read(&bbio->refs));
5210	atomic_inc(&bbio->refs);
5211}
5212
5213void btrfs_put_bbio(struct btrfs_bio *bbio)
5214{
5215	if (!bbio)
5216		return;
5217	if (atomic_dec_and_test(&bbio->refs))
5218		kfree(bbio);
5219}
5220
5221static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5222			     u64 logical, u64 *length,
5223			     struct btrfs_bio **bbio_ret,
5224			     int mirror_num, int need_raid_map)
5225{
5226	struct extent_map *em;
5227	struct map_lookup *map;
5228	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5229	struct extent_map_tree *em_tree = &map_tree->map_tree;
5230	u64 offset;
5231	u64 stripe_offset;
5232	u64 stripe_end_offset;
5233	u64 stripe_nr;
5234	u64 stripe_nr_orig;
5235	u64 stripe_nr_end;
5236	u64 stripe_len;
5237	u32 stripe_index;
5238	int i;
5239	int ret = 0;
5240	int num_stripes;
5241	int max_errors = 0;
5242	int tgtdev_indexes = 0;
5243	struct btrfs_bio *bbio = NULL;
5244	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5245	int dev_replace_is_ongoing = 0;
5246	int num_alloc_stripes;
5247	int patch_the_first_stripe_for_dev_replace = 0;
5248	u64 physical_to_patch_in_first_stripe = 0;
5249	u64 raid56_full_stripe_start = (u64)-1;
5250
5251	read_lock(&em_tree->lock);
5252	em = lookup_extent_mapping(em_tree, logical, *length);
5253	read_unlock(&em_tree->lock);
5254
5255	if (!em) {
5256		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5257			logical, *length);
5258		return -EINVAL;
5259	}
5260
5261	if (em->start > logical || em->start + em->len < logical) {
5262		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5263			   "found %Lu-%Lu", logical, em->start,
5264			   em->start + em->len);
5265		free_extent_map(em);
5266		return -EINVAL;
5267	}
5268
5269	map = em->map_lookup;
 
5270	offset = logical - em->start;
5271
5272	stripe_len = map->stripe_len;
 
 
5273	stripe_nr = offset;
5274	/*
5275	 * stripe_nr counts the total number of stripes we have to stride
5276	 * to get to this block
5277	 */
5278	stripe_nr = div64_u64(stripe_nr, stripe_len);
5279
5280	stripe_offset = stripe_nr * stripe_len;
5281	BUG_ON(offset < stripe_offset);
5282
5283	/* stripe_offset is the offset of this block in its stripe*/
5284	stripe_offset = offset - stripe_offset;
5285
5286	/* if we're here for raid56, we need to know the stripe aligned start */
5287	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5288		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5289		raid56_full_stripe_start = offset;
5290
5291		/* allow a write of a full stripe, but make sure we don't
5292		 * allow straddling of stripes
5293		 */
5294		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5295				full_stripe_len);
5296		raid56_full_stripe_start *= full_stripe_len;
5297	}
5298
5299	if (rw & REQ_DISCARD) {
5300		/* we don't discard raid56 yet */
5301		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5302			ret = -EOPNOTSUPP;
5303			goto out;
5304		}
5305		*length = min_t(u64, em->len - offset, *length);
5306	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5307		u64 max_len;
5308		/* For writes to RAID[56], allow a full stripeset across all disks.
5309		   For other RAID types and for RAID[56] reads, just allow a single
5310		   stripe (on a single disk). */
5311		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5312		    (rw & REQ_WRITE)) {
5313			max_len = stripe_len * nr_data_stripes(map) -
5314				(offset - raid56_full_stripe_start);
5315		} else {
5316			/* we limit the length of each bio to what fits in a stripe */
5317			max_len = stripe_len - stripe_offset;
5318		}
5319		*length = min_t(u64, em->len - offset, max_len);
5320	} else {
5321		*length = em->len - offset;
5322	}
5323
5324	/* This is for when we're called from btrfs_merge_bio_hook() and all
5325	   it cares about is the length */
5326	if (!bbio_ret)
5327		goto out;
5328
5329	btrfs_dev_replace_lock(dev_replace, 0);
5330	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5331	if (!dev_replace_is_ongoing)
5332		btrfs_dev_replace_unlock(dev_replace, 0);
5333	else
5334		btrfs_dev_replace_set_lock_blocking(dev_replace);
5335
5336	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5337	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5338	    dev_replace->tgtdev != NULL) {
5339		/*
5340		 * in dev-replace case, for repair case (that's the only
5341		 * case where the mirror is selected explicitly when
5342		 * calling btrfs_map_block), blocks left of the left cursor
5343		 * can also be read from the target drive.
5344		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5345		 * the last one to the array of stripes. For READ, it also
5346		 * needs to be supported using the same mirror number.
5347		 * If the requested block is not left of the left cursor,
5348		 * EIO is returned. This can happen because btrfs_num_copies()
5349		 * returns one more in the dev-replace case.
5350		 */
5351		u64 tmp_length = *length;
5352		struct btrfs_bio *tmp_bbio = NULL;
5353		int tmp_num_stripes;
5354		u64 srcdev_devid = dev_replace->srcdev->devid;
5355		int index_srcdev = 0;
5356		int found = 0;
5357		u64 physical_of_found = 0;
5358
5359		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5360			     logical, &tmp_length, &tmp_bbio, 0, 0);
5361		if (ret) {
5362			WARN_ON(tmp_bbio != NULL);
5363			goto out;
5364		}
5365
5366		tmp_num_stripes = tmp_bbio->num_stripes;
5367		if (mirror_num > tmp_num_stripes) {
5368			/*
5369			 * REQ_GET_READ_MIRRORS does not contain this
5370			 * mirror, that means that the requested area
5371			 * is not left of the left cursor
5372			 */
5373			ret = -EIO;
5374			btrfs_put_bbio(tmp_bbio);
5375			goto out;
5376		}
5377
5378		/*
5379		 * process the rest of the function using the mirror_num
5380		 * of the source drive. Therefore look it up first.
5381		 * At the end, patch the device pointer to the one of the
5382		 * target drive.
5383		 */
5384		for (i = 0; i < tmp_num_stripes; i++) {
5385			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5386				continue;
5387
5388			/*
5389			 * In case of DUP, in order to keep it simple, only add
5390			 * the mirror with the lowest physical address
5391			 */
5392			if (found &&
5393			    physical_of_found <= tmp_bbio->stripes[i].physical)
5394				continue;
5395
5396			index_srcdev = i;
5397			found = 1;
5398			physical_of_found = tmp_bbio->stripes[i].physical;
5399		}
5400
5401		btrfs_put_bbio(tmp_bbio);
5402
5403		if (!found) {
5404			WARN_ON(1);
5405			ret = -EIO;
5406			goto out;
5407		}
5408
5409		mirror_num = index_srcdev + 1;
5410		patch_the_first_stripe_for_dev_replace = 1;
5411		physical_to_patch_in_first_stripe = physical_of_found;
5412	} else if (mirror_num > map->num_stripes) {
5413		mirror_num = 0;
5414	}
5415
5416	num_stripes = 1;
5417	stripe_index = 0;
5418	stripe_nr_orig = stripe_nr;
5419	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5420	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
 
5421	stripe_end_offset = stripe_nr_end * map->stripe_len -
5422			    (offset + *length);
5423
5424	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5425		if (rw & REQ_DISCARD)
5426			num_stripes = min_t(u64, map->num_stripes,
5427					    stripe_nr_end - stripe_nr_orig);
5428		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5429				&stripe_index);
5430		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5431			mirror_num = 1;
5432	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5433		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5434			num_stripes = map->num_stripes;
5435		else if (mirror_num)
5436			stripe_index = mirror_num - 1;
5437		else {
5438			stripe_index = find_live_mirror(fs_info, map, 0,
5439					    map->num_stripes,
5440					    current->pid % map->num_stripes,
5441					    dev_replace_is_ongoing);
5442			mirror_num = stripe_index + 1;
5443		}
5444
5445	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5446		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5447			num_stripes = map->num_stripes;
5448		} else if (mirror_num) {
5449			stripe_index = mirror_num - 1;
5450		} else {
5451			mirror_num = 1;
5452		}
5453
5454	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5455		u32 factor = map->num_stripes / map->sub_stripes;
5456
5457		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5458		stripe_index *= map->sub_stripes;
5459
5460		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5461			num_stripes = map->sub_stripes;
5462		else if (rw & REQ_DISCARD)
5463			num_stripes = min_t(u64, map->sub_stripes *
5464					    (stripe_nr_end - stripe_nr_orig),
5465					    map->num_stripes);
5466		else if (mirror_num)
5467			stripe_index += mirror_num - 1;
5468		else {
5469			int old_stripe_index = stripe_index;
5470			stripe_index = find_live_mirror(fs_info, map,
5471					      stripe_index,
5472					      map->sub_stripes, stripe_index +
5473					      current->pid % map->sub_stripes,
5474					      dev_replace_is_ongoing);
5475			mirror_num = stripe_index - old_stripe_index + 1;
5476		}
5477
5478	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5479		if (need_raid_map &&
5480		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5481		     mirror_num > 1)) {
5482			/* push stripe_nr back to the start of the full stripe */
5483			stripe_nr = div_u64(raid56_full_stripe_start,
5484					stripe_len * nr_data_stripes(map));
5485
5486			/* RAID[56] write or recovery. Return all stripes */
5487			num_stripes = map->num_stripes;
5488			max_errors = nr_parity_stripes(map);
5489
5490			*length = map->stripe_len;
5491			stripe_index = 0;
5492			stripe_offset = 0;
5493		} else {
5494			/*
5495			 * Mirror #0 or #1 means the original data block.
5496			 * Mirror #2 is RAID5 parity block.
5497			 * Mirror #3 is RAID6 Q block.
5498			 */
5499			stripe_nr = div_u64_rem(stripe_nr,
5500					nr_data_stripes(map), &stripe_index);
5501			if (mirror_num > 1)
5502				stripe_index = nr_data_stripes(map) +
5503						mirror_num - 2;
5504
5505			/* We distribute the parity blocks across stripes */
5506			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5507					&stripe_index);
5508			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5509				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5510				mirror_num = 1;
5511		}
5512	} else {
5513		/*
5514		 * after this, stripe_nr is the number of stripes on this
5515		 * device we have to walk to find the data, and stripe_index is
5516		 * the number of our device in the stripe array
5517		 */
5518		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5519				&stripe_index);
5520		mirror_num = stripe_index + 1;
5521	}
5522	BUG_ON(stripe_index >= map->num_stripes);
5523
5524	num_alloc_stripes = num_stripes;
5525	if (dev_replace_is_ongoing) {
5526		if (rw & (REQ_WRITE | REQ_DISCARD))
5527			num_alloc_stripes <<= 1;
5528		if (rw & REQ_GET_READ_MIRRORS)
5529			num_alloc_stripes++;
5530		tgtdev_indexes = num_stripes;
5531	}
5532
5533	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5534	if (!bbio) {
5535		ret = -ENOMEM;
5536		goto out;
5537	}
5538	if (dev_replace_is_ongoing)
5539		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5540
5541	/* build raid_map */
5542	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5543	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5544	    mirror_num > 1)) {
5545		u64 tmp;
5546		unsigned rot;
5547
5548		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5549				 sizeof(struct btrfs_bio_stripe) *
5550				 num_alloc_stripes +
5551				 sizeof(int) * tgtdev_indexes);
5552
5553		/* Work out the disk rotation on this stripe-set */
5554		div_u64_rem(stripe_nr, num_stripes, &rot);
5555
5556		/* Fill in the logical address of each stripe */
5557		tmp = stripe_nr * nr_data_stripes(map);
5558		for (i = 0; i < nr_data_stripes(map); i++)
5559			bbio->raid_map[(i+rot) % num_stripes] =
5560				em->start + (tmp + i) * map->stripe_len;
5561
5562		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5563		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5564			bbio->raid_map[(i+rot+1) % num_stripes] =
5565				RAID6_Q_STRIPE;
5566	}
5567
5568	if (rw & REQ_DISCARD) {
5569		u32 factor = 0;
5570		u32 sub_stripes = 0;
5571		u64 stripes_per_dev = 0;
5572		u32 remaining_stripes = 0;
5573		u32 last_stripe = 0;
5574
5575		if (map->type &
5576		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5577			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5578				sub_stripes = 1;
5579			else
5580				sub_stripes = map->sub_stripes;
5581
5582			factor = map->num_stripes / sub_stripes;
5583			stripes_per_dev = div_u64_rem(stripe_nr_end -
5584						      stripe_nr_orig,
5585						      factor,
5586						      &remaining_stripes);
5587			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5588			last_stripe *= sub_stripes;
5589		}
5590
5591		for (i = 0; i < num_stripes; i++) {
5592			bbio->stripes[i].physical =
5593				map->stripes[stripe_index].physical +
5594				stripe_offset + stripe_nr * map->stripe_len;
5595			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5596
5597			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5598					 BTRFS_BLOCK_GROUP_RAID10)) {
5599				bbio->stripes[i].length = stripes_per_dev *
5600							  map->stripe_len;
5601
5602				if (i / sub_stripes < remaining_stripes)
5603					bbio->stripes[i].length +=
5604						map->stripe_len;
5605
5606				/*
5607				 * Special for the first stripe and
5608				 * the last stripe:
5609				 *
5610				 * |-------|...|-------|
5611				 *     |----------|
5612				 *    off     end_off
5613				 */
5614				if (i < sub_stripes)
5615					bbio->stripes[i].length -=
5616						stripe_offset;
5617
5618				if (stripe_index >= last_stripe &&
5619				    stripe_index <= (last_stripe +
5620						     sub_stripes - 1))
5621					bbio->stripes[i].length -=
5622						stripe_end_offset;
5623
5624				if (i == sub_stripes - 1)
5625					stripe_offset = 0;
5626			} else
5627				bbio->stripes[i].length = *length;
5628
5629			stripe_index++;
5630			if (stripe_index == map->num_stripes) {
5631				/* This could only happen for RAID0/10 */
5632				stripe_index = 0;
5633				stripe_nr++;
5634			}
5635		}
5636	} else {
5637		for (i = 0; i < num_stripes; i++) {
5638			bbio->stripes[i].physical =
5639				map->stripes[stripe_index].physical +
5640				stripe_offset +
5641				stripe_nr * map->stripe_len;
5642			bbio->stripes[i].dev =
5643				map->stripes[stripe_index].dev;
5644			stripe_index++;
5645		}
5646	}
5647
5648	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5649		max_errors = btrfs_chunk_max_errors(map);
5650
5651	if (bbio->raid_map)
5652		sort_parity_stripes(bbio, num_stripes);
5653
5654	tgtdev_indexes = 0;
5655	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5656	    dev_replace->tgtdev != NULL) {
5657		int index_where_to_add;
5658		u64 srcdev_devid = dev_replace->srcdev->devid;
5659
5660		/*
5661		 * duplicate the write operations while the dev replace
5662		 * procedure is running. Since the copying of the old disk
5663		 * to the new disk takes place at run time while the
5664		 * filesystem is mounted writable, the regular write
5665		 * operations to the old disk have to be duplicated to go
5666		 * to the new disk as well.
5667		 * Note that device->missing is handled by the caller, and
5668		 * that the write to the old disk is already set up in the
5669		 * stripes array.
5670		 */
5671		index_where_to_add = num_stripes;
5672		for (i = 0; i < num_stripes; i++) {
5673			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5674				/* write to new disk, too */
5675				struct btrfs_bio_stripe *new =
5676					bbio->stripes + index_where_to_add;
5677				struct btrfs_bio_stripe *old =
5678					bbio->stripes + i;
5679
5680				new->physical = old->physical;
5681				new->length = old->length;
5682				new->dev = dev_replace->tgtdev;
5683				bbio->tgtdev_map[i] = index_where_to_add;
5684				index_where_to_add++;
5685				max_errors++;
5686				tgtdev_indexes++;
5687			}
5688		}
5689		num_stripes = index_where_to_add;
5690	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5691		   dev_replace->tgtdev != NULL) {
5692		u64 srcdev_devid = dev_replace->srcdev->devid;
5693		int index_srcdev = 0;
5694		int found = 0;
5695		u64 physical_of_found = 0;
5696
5697		/*
5698		 * During the dev-replace procedure, the target drive can
5699		 * also be used to read data in case it is needed to repair
5700		 * a corrupt block elsewhere. This is possible if the
5701		 * requested area is left of the left cursor. In this area,
5702		 * the target drive is a full copy of the source drive.
5703		 */
5704		for (i = 0; i < num_stripes; i++) {
5705			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5706				/*
5707				 * In case of DUP, in order to keep it
5708				 * simple, only add the mirror with the
5709				 * lowest physical address
5710				 */
5711				if (found &&
5712				    physical_of_found <=
5713				     bbio->stripes[i].physical)
5714					continue;
5715				index_srcdev = i;
5716				found = 1;
5717				physical_of_found = bbio->stripes[i].physical;
5718			}
5719		}
5720		if (found) {
5721			if (physical_of_found + map->stripe_len <=
5722			    dev_replace->cursor_left) {
5723				struct btrfs_bio_stripe *tgtdev_stripe =
5724					bbio->stripes + num_stripes;
5725
5726				tgtdev_stripe->physical = physical_of_found;
5727				tgtdev_stripe->length =
5728					bbio->stripes[index_srcdev].length;
5729				tgtdev_stripe->dev = dev_replace->tgtdev;
5730				bbio->tgtdev_map[index_srcdev] = num_stripes;
5731
5732				tgtdev_indexes++;
5733				num_stripes++;
5734			}
5735		}
5736	}
5737
5738	*bbio_ret = bbio;
5739	bbio->map_type = map->type;
5740	bbio->num_stripes = num_stripes;
5741	bbio->max_errors = max_errors;
5742	bbio->mirror_num = mirror_num;
5743	bbio->num_tgtdevs = tgtdev_indexes;
5744
5745	/*
5746	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5747	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5748	 * available as a mirror
5749	 */
5750	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5751		WARN_ON(num_stripes > 1);
5752		bbio->stripes[0].dev = dev_replace->tgtdev;
5753		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5754		bbio->mirror_num = map->num_stripes + 1;
5755	}
5756out:
5757	if (dev_replace_is_ongoing) {
5758		btrfs_dev_replace_clear_lock_blocking(dev_replace);
5759		btrfs_dev_replace_unlock(dev_replace, 0);
5760	}
5761	free_extent_map(em);
5762	return ret;
5763}
5764
5765int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5766		      u64 logical, u64 *length,
5767		      struct btrfs_bio **bbio_ret, int mirror_num)
5768{
5769	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5770				 mirror_num, 0);
5771}
5772
5773/* For Scrub/replace */
5774int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5775		     u64 logical, u64 *length,
5776		     struct btrfs_bio **bbio_ret, int mirror_num,
5777		     int need_raid_map)
5778{
5779	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5780				 mirror_num, need_raid_map);
5781}
5782
5783int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5784		     u64 chunk_start, u64 physical, u64 devid,
5785		     u64 **logical, int *naddrs, int *stripe_len)
5786{
5787	struct extent_map_tree *em_tree = &map_tree->map_tree;
5788	struct extent_map *em;
5789	struct map_lookup *map;
5790	u64 *buf;
5791	u64 bytenr;
5792	u64 length;
5793	u64 stripe_nr;
5794	u64 rmap_len;
5795	int i, j, nr = 0;
5796
5797	read_lock(&em_tree->lock);
5798	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5799	read_unlock(&em_tree->lock);
5800
5801	if (!em) {
5802		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5803		       chunk_start);
5804		return -EIO;
5805	}
5806
5807	if (em->start != chunk_start) {
5808		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5809		       em->start, chunk_start);
5810		free_extent_map(em);
5811		return -EIO;
5812	}
5813	map = em->map_lookup;
5814
5815	length = em->len;
5816	rmap_len = map->stripe_len;
5817
5818	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5819		length = div_u64(length, map->num_stripes / map->sub_stripes);
5820	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5821		length = div_u64(length, map->num_stripes);
5822	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5823		length = div_u64(length, nr_data_stripes(map));
5824		rmap_len = map->stripe_len * nr_data_stripes(map);
5825	}
5826
5827	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5828	BUG_ON(!buf); /* -ENOMEM */
5829
5830	for (i = 0; i < map->num_stripes; i++) {
5831		if (devid && map->stripes[i].dev->devid != devid)
5832			continue;
5833		if (map->stripes[i].physical > physical ||
5834		    map->stripes[i].physical + length <= physical)
5835			continue;
5836
5837		stripe_nr = physical - map->stripes[i].physical;
5838		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5839
5840		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5841			stripe_nr = stripe_nr * map->num_stripes + i;
5842			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5843		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5844			stripe_nr = stripe_nr * map->num_stripes + i;
5845		} /* else if RAID[56], multiply by nr_data_stripes().
5846		   * Alternatively, just use rmap_len below instead of
5847		   * map->stripe_len */
5848
5849		bytenr = chunk_start + stripe_nr * rmap_len;
5850		WARN_ON(nr >= map->num_stripes);
5851		for (j = 0; j < nr; j++) {
5852			if (buf[j] == bytenr)
5853				break;
5854		}
5855		if (j == nr) {
5856			WARN_ON(nr >= map->num_stripes);
5857			buf[nr++] = bytenr;
5858		}
5859	}
5860
5861	*logical = buf;
5862	*naddrs = nr;
5863	*stripe_len = rmap_len;
5864
5865	free_extent_map(em);
5866	return 0;
5867}
5868
5869static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
 
5870{
5871	bio->bi_private = bbio->private;
5872	bio->bi_end_io = bbio->end_io;
5873	bio_endio(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
5874
5875	btrfs_put_bbio(bbio);
 
 
5876}
5877
5878static void btrfs_end_bio(struct bio *bio)
5879{
5880	struct btrfs_bio *bbio = bio->bi_private;
5881	int is_orig_bio = 0;
5882
5883	if (bio->bi_error) {
5884		atomic_inc(&bbio->error);
5885		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5886			unsigned int stripe_index =
5887				btrfs_io_bio(bio)->stripe_index;
 
5888			struct btrfs_device *dev;
5889
5890			BUG_ON(stripe_index >= bbio->num_stripes);
5891			dev = bbio->stripes[stripe_index].dev;
5892			if (dev->bdev) {
5893				if (bio->bi_rw & WRITE)
5894					btrfs_dev_stat_inc(dev,
5895						BTRFS_DEV_STAT_WRITE_ERRS);
5896				else
5897					btrfs_dev_stat_inc(dev,
5898						BTRFS_DEV_STAT_READ_ERRS);
5899				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5900					btrfs_dev_stat_inc(dev,
5901						BTRFS_DEV_STAT_FLUSH_ERRS);
5902				btrfs_dev_stat_print_on_error(dev);
5903			}
5904		}
5905	}
5906
5907	if (bio == bbio->orig_bio)
5908		is_orig_bio = 1;
5909
5910	btrfs_bio_counter_dec(bbio->fs_info);
5911
5912	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5913		if (!is_orig_bio) {
5914			bio_put(bio);
5915			bio = bbio->orig_bio;
5916		}
5917
5918		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
 
 
5919		/* only send an error to the higher layers if it is
5920		 * beyond the tolerance of the btrfs bio
5921		 */
5922		if (atomic_read(&bbio->error) > bbio->max_errors) {
5923			bio->bi_error = -EIO;
5924		} else {
5925			/*
5926			 * this bio is actually up to date, we didn't
5927			 * go over the max number of errors
5928			 */
5929			bio->bi_error = 0;
 
5930		}
 
5931
5932		btrfs_end_bbio(bbio, bio);
5933	} else if (!is_orig_bio) {
5934		bio_put(bio);
5935	}
5936}
5937
 
 
 
 
 
 
 
5938/*
5939 * see run_scheduled_bios for a description of why bios are collected for
5940 * async submit.
5941 *
5942 * This will add one bio to the pending list for a device and make sure
5943 * the work struct is scheduled.
5944 */
5945static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5946					struct btrfs_device *device,
5947					int rw, struct bio *bio)
5948{
5949	int should_queue = 1;
5950	struct btrfs_pending_bios *pending_bios;
5951
5952	if (device->missing || !device->bdev) {
5953		bio_io_error(bio);
5954		return;
5955	}
5956
5957	/* don't bother with additional async steps for reads, right now */
5958	if (!(rw & REQ_WRITE)) {
5959		bio_get(bio);
5960		btrfsic_submit_bio(rw, bio);
5961		bio_put(bio);
5962		return;
5963	}
5964
5965	/*
5966	 * nr_async_bios allows us to reliably return congestion to the
5967	 * higher layers.  Otherwise, the async bio makes it appear we have
5968	 * made progress against dirty pages when we've really just put it
5969	 * on a queue for later
5970	 */
5971	atomic_inc(&root->fs_info->nr_async_bios);
5972	WARN_ON(bio->bi_next);
5973	bio->bi_next = NULL;
5974	bio->bi_rw |= rw;
5975
5976	spin_lock(&device->io_lock);
5977	if (bio->bi_rw & REQ_SYNC)
5978		pending_bios = &device->pending_sync_bios;
5979	else
5980		pending_bios = &device->pending_bios;
5981
5982	if (pending_bios->tail)
5983		pending_bios->tail->bi_next = bio;
5984
5985	pending_bios->tail = bio;
5986	if (!pending_bios->head)
5987		pending_bios->head = bio;
5988	if (device->running_pending)
5989		should_queue = 0;
5990
5991	spin_unlock(&device->io_lock);
5992
5993	if (should_queue)
5994		btrfs_queue_work(root->fs_info->submit_workers,
5995				 &device->work);
5996}
5997
5998static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5999			      struct bio *bio, u64 physical, int dev_nr,
6000			      int rw, int async)
6001{
6002	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6003
6004	bio->bi_private = bbio;
6005	btrfs_io_bio(bio)->stripe_index = dev_nr;
6006	bio->bi_end_io = btrfs_end_bio;
6007	bio->bi_iter.bi_sector = physical >> 9;
6008#ifdef DEBUG
6009	{
6010		struct rcu_string *name;
6011
6012		rcu_read_lock();
6013		name = rcu_dereference(dev->name);
6014		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6015			 "(%s id %llu), size=%u\n", rw,
6016			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6017			 name->str, dev->devid, bio->bi_iter.bi_size);
6018		rcu_read_unlock();
6019	}
6020#endif
6021	bio->bi_bdev = dev->bdev;
6022
6023	btrfs_bio_counter_inc_noblocked(root->fs_info);
6024
6025	if (async)
6026		btrfs_schedule_bio(root, dev, rw, bio);
6027	else
6028		btrfsic_submit_bio(rw, bio);
6029}
6030
6031static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6032{
6033	atomic_inc(&bbio->error);
6034	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6035		/* Shoud be the original bio. */
6036		WARN_ON(bio != bbio->orig_bio);
6037
6038		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6039		bio->bi_iter.bi_sector = logical >> 9;
6040		bio->bi_error = -EIO;
6041		btrfs_end_bbio(bbio, bio);
6042	}
6043}
6044
6045int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6046		  int mirror_num, int async_submit)
6047{
 
6048	struct btrfs_device *dev;
6049	struct bio *first_bio = bio;
6050	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6051	u64 length = 0;
6052	u64 map_length;
6053	int ret;
6054	int dev_nr;
6055	int total_devs;
6056	struct btrfs_bio *bbio = NULL;
6057
6058	length = bio->bi_iter.bi_size;
 
6059	map_length = length;
6060
6061	btrfs_bio_counter_inc_blocked(root->fs_info);
6062	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6063			      mirror_num, 1);
6064	if (ret) {
6065		btrfs_bio_counter_dec(root->fs_info);
6066		return ret;
6067	}
6068
6069	total_devs = bbio->num_stripes;
6070	bbio->orig_bio = first_bio;
6071	bbio->private = first_bio->bi_private;
6072	bbio->end_io = first_bio->bi_end_io;
6073	bbio->fs_info = root->fs_info;
6074	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6075
6076	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6077	    ((rw & WRITE) || (mirror_num > 1))) {
6078		/* In this case, map_length has been set to the length of
6079		   a single stripe; not the whole write */
6080		if (rw & WRITE) {
6081			ret = raid56_parity_write(root, bio, bbio, map_length);
6082		} else {
6083			ret = raid56_parity_recover(root, bio, bbio, map_length,
6084						    mirror_num, 1);
6085		}
6086
6087		btrfs_bio_counter_dec(root->fs_info);
6088		return ret;
6089	}
6090
6091	if (map_length < length) {
6092		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6093			logical, length, map_length);
 
 
6094		BUG();
6095	}
6096
6097	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6098		dev = bbio->stripes[dev_nr].dev;
6099		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
6100			bbio_error(bbio, first_bio, logical);
6101			continue;
6102		}
6103
 
6104		if (dev_nr < total_devs - 1) {
6105			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6106			BUG_ON(!bio); /* -ENOMEM */
6107		} else
6108			bio = first_bio;
 
 
 
 
 
 
 
 
 
 
6109
6110		submit_stripe_bio(root, bbio, bio,
6111				  bbio->stripes[dev_nr].physical, dev_nr, rw,
6112				  async_submit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6113	}
6114	btrfs_bio_counter_dec(root->fs_info);
6115	return 0;
6116}
6117
6118struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6119				       u8 *uuid, u8 *fsid)
6120{
6121	struct btrfs_device *device;
6122	struct btrfs_fs_devices *cur_devices;
6123
6124	cur_devices = fs_info->fs_devices;
6125	while (cur_devices) {
6126		if (!fsid ||
6127		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6128			device = __find_device(&cur_devices->devices,
6129					       devid, uuid);
6130			if (device)
6131				return device;
6132		}
6133		cur_devices = cur_devices->seed;
6134	}
6135	return NULL;
6136}
6137
6138static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6139					    struct btrfs_fs_devices *fs_devices,
6140					    u64 devid, u8 *dev_uuid)
6141{
6142	struct btrfs_device *device;
 
6143
6144	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6145	if (IS_ERR(device))
6146		return NULL;
6147
6148	list_add(&device->dev_list, &fs_devices->devices);
 
 
 
6149	device->fs_devices = fs_devices;
6150	fs_devices->num_devices++;
6151
6152	device->missing = 1;
 
6153	fs_devices->missing_devices++;
6154
 
 
6155	return device;
6156}
6157
6158/**
6159 * btrfs_alloc_device - allocate struct btrfs_device
6160 * @fs_info:	used only for generating a new devid, can be NULL if
6161 *		devid is provided (i.e. @devid != NULL).
6162 * @devid:	a pointer to devid for this device.  If NULL a new devid
6163 *		is generated.
6164 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6165 *		is generated.
6166 *
6167 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6168 * on error.  Returned struct is not linked onto any lists and can be
6169 * destroyed with kfree() right away.
6170 */
6171struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6172					const u64 *devid,
6173					const u8 *uuid)
6174{
6175	struct btrfs_device *dev;
6176	u64 tmp;
6177
6178	if (WARN_ON(!devid && !fs_info))
6179		return ERR_PTR(-EINVAL);
6180
6181	dev = __alloc_device();
6182	if (IS_ERR(dev))
6183		return dev;
6184
6185	if (devid)
6186		tmp = *devid;
6187	else {
6188		int ret;
6189
6190		ret = find_next_devid(fs_info, &tmp);
6191		if (ret) {
6192			kfree(dev);
6193			return ERR_PTR(ret);
6194		}
6195	}
6196	dev->devid = tmp;
6197
6198	if (uuid)
6199		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6200	else
6201		generate_random_uuid(dev->uuid);
6202
6203	btrfs_init_work(&dev->work, btrfs_submit_helper,
6204			pending_bios_fn, NULL, NULL);
6205
6206	return dev;
6207}
6208
6209static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6210			  struct extent_buffer *leaf,
6211			  struct btrfs_chunk *chunk)
6212{
6213	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6214	struct map_lookup *map;
6215	struct extent_map *em;
6216	u64 logical;
6217	u64 length;
6218	u64 stripe_len;
6219	u64 devid;
6220	u8 uuid[BTRFS_UUID_SIZE];
6221	int num_stripes;
6222	int ret;
6223	int i;
6224
6225	logical = key->offset;
6226	length = btrfs_chunk_length(leaf, chunk);
6227	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6228	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6229	/* Validation check */
6230	if (!num_stripes) {
6231		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6232			  num_stripes);
6233		return -EIO;
6234	}
6235	if (!IS_ALIGNED(logical, root->sectorsize)) {
6236		btrfs_err(root->fs_info,
6237			  "invalid chunk logical %llu", logical);
6238		return -EIO;
6239	}
6240	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6241		btrfs_err(root->fs_info,
6242			"invalid chunk length %llu", length);
6243		return -EIO;
6244	}
6245	if (!is_power_of_2(stripe_len)) {
6246		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6247			  stripe_len);
6248		return -EIO;
6249	}
6250	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6251	    btrfs_chunk_type(leaf, chunk)) {
6252		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6253			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6254			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6255			  btrfs_chunk_type(leaf, chunk));
6256		return -EIO;
6257	}
6258
6259	read_lock(&map_tree->map_tree.lock);
6260	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6261	read_unlock(&map_tree->map_tree.lock);
6262
6263	/* already mapped? */
6264	if (em && em->start <= logical && em->start + em->len > logical) {
6265		free_extent_map(em);
6266		return 0;
6267	} else if (em) {
6268		free_extent_map(em);
6269	}
6270
6271	em = alloc_extent_map();
6272	if (!em)
6273		return -ENOMEM;
 
6274	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6275	if (!map) {
6276		free_extent_map(em);
6277		return -ENOMEM;
6278	}
6279
6280	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6281	em->map_lookup = map;
6282	em->start = logical;
6283	em->len = length;
6284	em->orig_start = 0;
6285	em->block_start = 0;
6286	em->block_len = em->len;
6287
6288	map->num_stripes = num_stripes;
6289	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6290	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6291	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6292	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6293	map->type = btrfs_chunk_type(leaf, chunk);
6294	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6295	for (i = 0; i < num_stripes; i++) {
6296		map->stripes[i].physical =
6297			btrfs_stripe_offset_nr(leaf, chunk, i);
6298		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6299		read_extent_buffer(leaf, uuid, (unsigned long)
6300				   btrfs_stripe_dev_uuid_nr(chunk, i),
6301				   BTRFS_UUID_SIZE);
6302		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6303							uuid, NULL);
6304		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
 
6305			free_extent_map(em);
6306			return -EIO;
6307		}
6308		if (!map->stripes[i].dev) {
6309			map->stripes[i].dev =
6310				add_missing_dev(root, root->fs_info->fs_devices,
6311						devid, uuid);
6312			if (!map->stripes[i].dev) {
 
6313				free_extent_map(em);
6314				return -EIO;
6315			}
6316			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6317						devid, uuid);
6318		}
6319		map->stripes[i].dev->in_fs_metadata = 1;
6320	}
6321
6322	write_lock(&map_tree->map_tree.lock);
6323	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6324	write_unlock(&map_tree->map_tree.lock);
6325	BUG_ON(ret); /* Tree corruption */
6326	free_extent_map(em);
6327
6328	return 0;
6329}
6330
6331static void fill_device_from_item(struct extent_buffer *leaf,
6332				 struct btrfs_dev_item *dev_item,
6333				 struct btrfs_device *device)
6334{
6335	unsigned long ptr;
6336
6337	device->devid = btrfs_device_id(leaf, dev_item);
6338	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6339	device->total_bytes = device->disk_total_bytes;
6340	device->commit_total_bytes = device->disk_total_bytes;
6341	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6342	device->commit_bytes_used = device->bytes_used;
6343	device->type = btrfs_device_type(leaf, dev_item);
6344	device->io_align = btrfs_device_io_align(leaf, dev_item);
6345	device->io_width = btrfs_device_io_width(leaf, dev_item);
6346	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6347	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6348	device->is_tgtdev_for_dev_replace = 0;
6349
6350	ptr = btrfs_device_uuid(dev_item);
6351	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6352}
6353
6354static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6355						  u8 *fsid)
6356{
6357	struct btrfs_fs_devices *fs_devices;
6358	int ret;
6359
6360	BUG_ON(!mutex_is_locked(&uuid_mutex));
6361
6362	fs_devices = root->fs_info->fs_devices->seed;
6363	while (fs_devices) {
6364		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6365			return fs_devices;
6366
 
6367		fs_devices = fs_devices->seed;
6368	}
6369
6370	fs_devices = find_fsid(fsid);
6371	if (!fs_devices) {
6372		if (!btrfs_test_opt(root, DEGRADED))
6373			return ERR_PTR(-ENOENT);
6374
6375		fs_devices = alloc_fs_devices(fsid);
6376		if (IS_ERR(fs_devices))
6377			return fs_devices;
6378
6379		fs_devices->seeding = 1;
6380		fs_devices->opened = 1;
6381		return fs_devices;
6382	}
6383
6384	fs_devices = clone_fs_devices(fs_devices);
6385	if (IS_ERR(fs_devices))
6386		return fs_devices;
 
 
6387
6388	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6389				   root->fs_info->bdev_holder);
6390	if (ret) {
6391		free_fs_devices(fs_devices);
6392		fs_devices = ERR_PTR(ret);
6393		goto out;
6394	}
6395
6396	if (!fs_devices->seeding) {
6397		__btrfs_close_devices(fs_devices);
6398		free_fs_devices(fs_devices);
6399		fs_devices = ERR_PTR(-EINVAL);
6400		goto out;
6401	}
6402
6403	fs_devices->seed = root->fs_info->fs_devices->seed;
6404	root->fs_info->fs_devices->seed = fs_devices;
6405out:
6406	return fs_devices;
6407}
6408
6409static int read_one_dev(struct btrfs_root *root,
6410			struct extent_buffer *leaf,
6411			struct btrfs_dev_item *dev_item)
6412{
6413	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6414	struct btrfs_device *device;
6415	u64 devid;
6416	int ret;
6417	u8 fs_uuid[BTRFS_UUID_SIZE];
6418	u8 dev_uuid[BTRFS_UUID_SIZE];
6419
6420	devid = btrfs_device_id(leaf, dev_item);
6421	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
 
6422			   BTRFS_UUID_SIZE);
6423	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
 
6424			   BTRFS_UUID_SIZE);
6425
6426	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6427		fs_devices = open_seed_devices(root, fs_uuid);
6428		if (IS_ERR(fs_devices))
6429			return PTR_ERR(fs_devices);
6430	}
6431
6432	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6433	if (!device) {
6434		if (!btrfs_test_opt(root, DEGRADED))
6435			return -EIO;
6436
6437		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6438		if (!device)
6439			return -ENOMEM;
6440		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6441				devid, dev_uuid);
6442	} else {
6443		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6444			return -EIO;
6445
6446		if(!device->bdev && !device->missing) {
6447			/*
6448			 * this happens when a device that was properly setup
6449			 * in the device info lists suddenly goes bad.
6450			 * device->bdev is NULL, and so we have to set
6451			 * device->missing to one here
6452			 */
6453			device->fs_devices->missing_devices++;
6454			device->missing = 1;
6455		}
6456
6457		/* Move the device to its own fs_devices */
6458		if (device->fs_devices != fs_devices) {
6459			ASSERT(device->missing);
6460
6461			list_move(&device->dev_list, &fs_devices->devices);
6462			device->fs_devices->num_devices--;
6463			fs_devices->num_devices++;
6464
6465			device->fs_devices->missing_devices--;
6466			fs_devices->missing_devices++;
6467
6468			device->fs_devices = fs_devices;
6469		}
6470	}
6471
6472	if (device->fs_devices != root->fs_info->fs_devices) {
6473		BUG_ON(device->writeable);
6474		if (device->generation !=
6475		    btrfs_device_generation(leaf, dev_item))
6476			return -EINVAL;
6477	}
6478
6479	fill_device_from_item(leaf, dev_item, device);
 
6480	device->in_fs_metadata = 1;
6481	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6482		device->fs_devices->total_rw_bytes += device->total_bytes;
6483		spin_lock(&root->fs_info->free_chunk_lock);
6484		root->fs_info->free_chunk_space += device->total_bytes -
6485			device->bytes_used;
6486		spin_unlock(&root->fs_info->free_chunk_lock);
6487	}
6488	ret = 0;
6489	return ret;
6490}
6491
6492int btrfs_read_sys_array(struct btrfs_root *root)
6493{
6494	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6495	struct extent_buffer *sb;
6496	struct btrfs_disk_key *disk_key;
6497	struct btrfs_chunk *chunk;
6498	u8 *array_ptr;
6499	unsigned long sb_array_offset;
6500	int ret = 0;
6501	u32 num_stripes;
6502	u32 array_size;
6503	u32 len = 0;
6504	u32 cur_offset;
6505	struct btrfs_key key;
6506
6507	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6508	/*
6509	 * This will create extent buffer of nodesize, superblock size is
6510	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6511	 * overallocate but we can keep it as-is, only the first page is used.
6512	 */
6513	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6514	if (!sb)
6515		return -ENOMEM;
6516	set_extent_buffer_uptodate(sb);
6517	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6518	/*
6519	 * The sb extent buffer is artifical and just used to read the system array.
6520	 * set_extent_buffer_uptodate() call does not properly mark all it's
6521	 * pages up-to-date when the page is larger: extent does not cover the
6522	 * whole page and consequently check_page_uptodate does not find all
6523	 * the page's extents up-to-date (the hole beyond sb),
6524	 * write_extent_buffer then triggers a WARN_ON.
6525	 *
6526	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6527	 * but sb spans only this function. Add an explicit SetPageUptodate call
6528	 * to silence the warning eg. on PowerPC 64.
6529	 */
6530	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6531		SetPageUptodate(sb->pages[0]);
6532
6533	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6534	array_size = btrfs_super_sys_array_size(super_copy);
6535
6536	array_ptr = super_copy->sys_chunk_array;
6537	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6538	cur_offset = 0;
6539
6540	while (cur_offset < array_size) {
6541		disk_key = (struct btrfs_disk_key *)array_ptr;
6542		len = sizeof(*disk_key);
6543		if (cur_offset + len > array_size)
6544			goto out_short_read;
6545
 
 
6546		btrfs_disk_key_to_cpu(&key, disk_key);
6547
6548		array_ptr += len;
6549		sb_array_offset += len;
6550		cur_offset += len;
6551
6552		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6553			chunk = (struct btrfs_chunk *)sb_array_offset;
6554			/*
6555			 * At least one btrfs_chunk with one stripe must be
6556			 * present, exact stripe count check comes afterwards
6557			 */
6558			len = btrfs_chunk_item_size(1);
6559			if (cur_offset + len > array_size)
6560				goto out_short_read;
6561
6562			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6563			if (!num_stripes) {
6564				printk(KERN_ERR
6565	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6566					num_stripes, cur_offset);
6567				ret = -EIO;
6568				break;
6569			}
6570
6571			len = btrfs_chunk_item_size(num_stripes);
6572			if (cur_offset + len > array_size)
6573				goto out_short_read;
6574
6575			ret = read_one_chunk(root, &key, sb, chunk);
6576			if (ret)
6577				break;
 
 
6578		} else {
6579			printk(KERN_ERR
6580		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
6581				(u32)key.type, cur_offset);
6582			ret = -EIO;
6583			break;
6584		}
6585		array_ptr += len;
6586		sb_array_offset += len;
6587		cur_offset += len;
6588	}
6589	free_extent_buffer(sb);
6590	return ret;
6591
6592out_short_read:
6593	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6594			len, cur_offset);
6595	free_extent_buffer(sb);
6596	return -EIO;
6597}
6598
6599int btrfs_read_chunk_tree(struct btrfs_root *root)
6600{
6601	struct btrfs_path *path;
6602	struct extent_buffer *leaf;
6603	struct btrfs_key key;
6604	struct btrfs_key found_key;
6605	int ret;
6606	int slot;
6607
6608	root = root->fs_info->chunk_root;
6609
6610	path = btrfs_alloc_path();
6611	if (!path)
6612		return -ENOMEM;
6613
6614	mutex_lock(&uuid_mutex);
6615	lock_chunks(root);
6616
6617	/*
6618	 * Read all device items, and then all the chunk items. All
6619	 * device items are found before any chunk item (their object id
6620	 * is smaller than the lowest possible object id for a chunk
6621	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6622	 */
6623	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6624	key.offset = 0;
6625	key.type = 0;
 
6626	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6627	if (ret < 0)
6628		goto error;
6629	while (1) {
6630		leaf = path->nodes[0];
6631		slot = path->slots[0];
6632		if (slot >= btrfs_header_nritems(leaf)) {
6633			ret = btrfs_next_leaf(root, path);
6634			if (ret == 0)
6635				continue;
6636			if (ret < 0)
6637				goto error;
6638			break;
6639		}
6640		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6641		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6642			struct btrfs_dev_item *dev_item;
6643			dev_item = btrfs_item_ptr(leaf, slot,
 
 
 
6644						  struct btrfs_dev_item);
6645			ret = read_one_dev(root, leaf, dev_item);
6646			if (ret)
6647				goto error;
 
6648		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6649			struct btrfs_chunk *chunk;
6650			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6651			ret = read_one_chunk(root, &found_key, leaf, chunk);
6652			if (ret)
6653				goto error;
6654		}
6655		path->slots[0]++;
6656	}
 
 
 
 
 
6657	ret = 0;
6658error:
6659	unlock_chunks(root);
6660	mutex_unlock(&uuid_mutex);
6661
6662	btrfs_free_path(path);
6663	return ret;
6664}
6665
6666void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6667{
6668	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6669	struct btrfs_device *device;
6670
6671	while (fs_devices) {
6672		mutex_lock(&fs_devices->device_list_mutex);
6673		list_for_each_entry(device, &fs_devices->devices, dev_list)
6674			device->dev_root = fs_info->dev_root;
6675		mutex_unlock(&fs_devices->device_list_mutex);
6676
6677		fs_devices = fs_devices->seed;
6678	}
6679}
6680
6681static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6682{
6683	int i;
6684
6685	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6686		btrfs_dev_stat_reset(dev, i);
6687}
6688
6689int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6690{
6691	struct btrfs_key key;
6692	struct btrfs_key found_key;
6693	struct btrfs_root *dev_root = fs_info->dev_root;
6694	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6695	struct extent_buffer *eb;
6696	int slot;
6697	int ret = 0;
6698	struct btrfs_device *device;
6699	struct btrfs_path *path = NULL;
6700	int i;
6701
6702	path = btrfs_alloc_path();
6703	if (!path) {
6704		ret = -ENOMEM;
6705		goto out;
6706	}
6707
6708	mutex_lock(&fs_devices->device_list_mutex);
6709	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6710		int item_size;
6711		struct btrfs_dev_stats_item *ptr;
6712
6713		key.objectid = BTRFS_DEV_STATS_OBJECTID;
6714		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6715		key.offset = device->devid;
6716		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6717		if (ret) {
 
 
 
6718			__btrfs_reset_dev_stats(device);
6719			device->dev_stats_valid = 1;
6720			btrfs_release_path(path);
6721			continue;
6722		}
6723		slot = path->slots[0];
6724		eb = path->nodes[0];
6725		btrfs_item_key_to_cpu(eb, &found_key, slot);
6726		item_size = btrfs_item_size_nr(eb, slot);
6727
6728		ptr = btrfs_item_ptr(eb, slot,
6729				     struct btrfs_dev_stats_item);
6730
6731		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6732			if (item_size >= (1 + i) * sizeof(__le64))
6733				btrfs_dev_stat_set(device, i,
6734					btrfs_dev_stats_value(eb, ptr, i));
6735			else
6736				btrfs_dev_stat_reset(device, i);
6737		}
6738
6739		device->dev_stats_valid = 1;
6740		btrfs_dev_stat_print_on_load(device);
6741		btrfs_release_path(path);
6742	}
6743	mutex_unlock(&fs_devices->device_list_mutex);
6744
6745out:
6746	btrfs_free_path(path);
6747	return ret < 0 ? ret : 0;
6748}
6749
6750static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6751				struct btrfs_root *dev_root,
6752				struct btrfs_device *device)
6753{
6754	struct btrfs_path *path;
6755	struct btrfs_key key;
6756	struct extent_buffer *eb;
6757	struct btrfs_dev_stats_item *ptr;
6758	int ret;
6759	int i;
6760
6761	key.objectid = BTRFS_DEV_STATS_OBJECTID;
6762	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6763	key.offset = device->devid;
6764
6765	path = btrfs_alloc_path();
6766	BUG_ON(!path);
6767	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6768	if (ret < 0) {
6769		btrfs_warn_in_rcu(dev_root->fs_info,
6770			"error %d while searching for dev_stats item for device %s",
6771			      ret, rcu_str_deref(device->name));
6772		goto out;
6773	}
6774
6775	if (ret == 0 &&
6776	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6777		/* need to delete old one and insert a new one */
6778		ret = btrfs_del_item(trans, dev_root, path);
6779		if (ret != 0) {
6780			btrfs_warn_in_rcu(dev_root->fs_info,
6781				"delete too small dev_stats item for device %s failed %d",
6782				      rcu_str_deref(device->name), ret);
6783			goto out;
6784		}
6785		ret = 1;
6786	}
6787
6788	if (ret == 1) {
6789		/* need to insert a new item */
6790		btrfs_release_path(path);
6791		ret = btrfs_insert_empty_item(trans, dev_root, path,
6792					      &key, sizeof(*ptr));
6793		if (ret < 0) {
6794			btrfs_warn_in_rcu(dev_root->fs_info,
6795				"insert dev_stats item for device %s failed %d",
6796				rcu_str_deref(device->name), ret);
6797			goto out;
6798		}
6799	}
6800
6801	eb = path->nodes[0];
6802	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6803	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6804		btrfs_set_dev_stats_value(eb, ptr, i,
6805					  btrfs_dev_stat_read(device, i));
6806	btrfs_mark_buffer_dirty(eb);
6807
6808out:
6809	btrfs_free_path(path);
6810	return ret;
6811}
6812
6813/*
6814 * called from commit_transaction. Writes all changed device stats to disk.
6815 */
6816int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6817			struct btrfs_fs_info *fs_info)
6818{
6819	struct btrfs_root *dev_root = fs_info->dev_root;
6820	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6821	struct btrfs_device *device;
6822	int stats_cnt;
6823	int ret = 0;
6824
6825	mutex_lock(&fs_devices->device_list_mutex);
6826	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6827		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6828			continue;
6829
6830		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6831		ret = update_dev_stat_item(trans, dev_root, device);
6832		if (!ret)
6833			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6834	}
6835	mutex_unlock(&fs_devices->device_list_mutex);
6836
6837	return ret;
6838}
6839
6840void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6841{
6842	btrfs_dev_stat_inc(dev, index);
6843	btrfs_dev_stat_print_on_error(dev);
6844}
6845
6846static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6847{
6848	if (!dev->dev_stats_valid)
6849		return;
6850	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
6851		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6852			   rcu_str_deref(dev->name),
6853			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6854			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6855			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6856			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6857			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
 
 
6858}
6859
6860static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6861{
6862	int i;
6863
6864	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6865		if (btrfs_dev_stat_read(dev, i) != 0)
6866			break;
6867	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6868		return; /* all values == 0, suppress message */
6869
6870	btrfs_info_in_rcu(dev->dev_root->fs_info,
6871		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6872	       rcu_str_deref(dev->name),
6873	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6874	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6875	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6876	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6877	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6878}
6879
6880int btrfs_get_dev_stats(struct btrfs_root *root,
6881			struct btrfs_ioctl_get_dev_stats *stats)
 
6882{
6883	struct btrfs_device *dev;
6884	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6885	int i;
6886
6887	mutex_lock(&fs_devices->device_list_mutex);
6888	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6889	mutex_unlock(&fs_devices->device_list_mutex);
6890
6891	if (!dev) {
6892		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
 
6893		return -ENODEV;
6894	} else if (!dev->dev_stats_valid) {
6895		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
 
6896		return -ENODEV;
6897	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6898		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6899			if (stats->nr_items > i)
6900				stats->values[i] =
6901					btrfs_dev_stat_read_and_reset(dev, i);
6902			else
6903				btrfs_dev_stat_reset(dev, i);
6904		}
6905	} else {
6906		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6907			if (stats->nr_items > i)
6908				stats->values[i] = btrfs_dev_stat_read(dev, i);
6909	}
6910	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6911		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6912	return 0;
6913}
6914
6915void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6916{
6917	struct buffer_head *bh;
6918	struct btrfs_super_block *disk_super;
6919	int copy_num;
6920
6921	if (!bdev)
6922		return;
6923
6924	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
6925		copy_num++) {
6926
6927		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
6928			continue;
6929
6930		disk_super = (struct btrfs_super_block *)bh->b_data;
6931
6932		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6933		set_buffer_dirty(bh);
6934		sync_dirty_buffer(bh);
6935		brelse(bh);
6936	}
6937
6938	/* Notify udev that device has changed */
6939	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
6940
6941	/* Update ctime/mtime for device path for libblkid */
6942	update_dev_time(device_path);
6943}
6944
6945/*
6946 * Update the size of all devices, which is used for writing out the
6947 * super blocks.
6948 */
6949void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6950{
6951	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6952	struct btrfs_device *curr, *next;
6953
6954	if (list_empty(&fs_devices->resized_devices))
6955		return;
6956
6957	mutex_lock(&fs_devices->device_list_mutex);
6958	lock_chunks(fs_info->dev_root);
6959	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6960				 resized_list) {
6961		list_del_init(&curr->resized_list);
6962		curr->commit_total_bytes = curr->disk_total_bytes;
6963	}
6964	unlock_chunks(fs_info->dev_root);
6965	mutex_unlock(&fs_devices->device_list_mutex);
6966}
6967
6968/* Must be invoked during the transaction commit */
6969void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6970					struct btrfs_transaction *transaction)
6971{
6972	struct extent_map *em;
6973	struct map_lookup *map;
6974	struct btrfs_device *dev;
6975	int i;
6976
6977	if (list_empty(&transaction->pending_chunks))
6978		return;
6979
6980	/* In order to kick the device replace finish process */
6981	lock_chunks(root);
6982	list_for_each_entry(em, &transaction->pending_chunks, list) {
6983		map = em->map_lookup;
6984
6985		for (i = 0; i < map->num_stripes; i++) {
6986			dev = map->stripes[i].dev;
6987			dev->commit_bytes_used = dev->bytes_used;
6988		}
6989	}
6990	unlock_chunks(root);
6991}
6992
6993void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
6994{
6995	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6996	while (fs_devices) {
6997		fs_devices->fs_info = fs_info;
6998		fs_devices = fs_devices->seed;
6999	}
7000}
7001
7002void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7003{
7004	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7005	while (fs_devices) {
7006		fs_devices->fs_info = NULL;
7007		fs_devices = fs_devices->seed;
7008	}
7009}
7010
7011static void btrfs_close_one_device(struct btrfs_device *device)
7012{
7013	struct btrfs_fs_devices *fs_devices = device->fs_devices;
7014	struct btrfs_device *new_device;
7015	struct rcu_string *name;
7016
7017	if (device->bdev)
7018		fs_devices->open_devices--;
7019
7020	if (device->writeable &&
7021	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
7022		list_del_init(&device->dev_alloc_list);
7023		fs_devices->rw_devices--;
7024	}
7025
7026	if (device->missing)
7027		fs_devices->missing_devices--;
7028
7029	new_device = btrfs_alloc_device(NULL, &device->devid,
7030					device->uuid);
7031	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
7032
7033	/* Safe because we are under uuid_mutex */
7034	if (device->name) {
7035		name = rcu_string_strdup(device->name->str, GFP_NOFS);
7036		BUG_ON(!name); /* -ENOMEM */
7037		rcu_assign_pointer(new_device->name, name);
7038	}
7039
7040	list_replace_rcu(&device->dev_list, &new_device->dev_list);
7041	new_device->fs_devices = device->fs_devices;
7042
7043	call_rcu(&device->rcu, free_device);
7044}
v3.5.6
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18#include <linux/sched.h>
  19#include <linux/bio.h>
  20#include <linux/slab.h>
  21#include <linux/buffer_head.h>
  22#include <linux/blkdev.h>
  23#include <linux/random.h>
  24#include <linux/iocontext.h>
  25#include <linux/capability.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
 
 
  28#include <asm/div64.h>
  29#include "compat.h"
  30#include "ctree.h"
  31#include "extent_map.h"
  32#include "disk-io.h"
  33#include "transaction.h"
  34#include "print-tree.h"
  35#include "volumes.h"
 
  36#include "async-thread.h"
  37#include "check-integrity.h"
  38#include "rcu-string.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39
  40static int init_first_rw_device(struct btrfs_trans_handle *trans,
  41				struct btrfs_root *root,
  42				struct btrfs_device *device);
  43static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
  44static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
 
  45static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
  46
  47static DEFINE_MUTEX(uuid_mutex);
  48static LIST_HEAD(fs_uuids);
 
 
 
 
  49
  50static void lock_chunks(struct btrfs_root *root)
  51{
  52	mutex_lock(&root->fs_info->chunk_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
  53}
  54
  55static void unlock_chunks(struct btrfs_root *root)
 
 
 
 
 
 
 
 
 
  56{
  57	mutex_unlock(&root->fs_info->chunk_mutex);
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
  61{
  62	struct btrfs_device *device;
  63	WARN_ON(fs_devices->opened);
  64	while (!list_empty(&fs_devices->devices)) {
  65		device = list_entry(fs_devices->devices.next,
  66				    struct btrfs_device, dev_list);
  67		list_del(&device->dev_list);
  68		rcu_string_free(device->name);
  69		kfree(device);
  70	}
  71	kfree(fs_devices);
  72}
  73
 
 
 
 
 
 
 
 
 
 
 
 
 
  74void btrfs_cleanup_fs_uuids(void)
  75{
  76	struct btrfs_fs_devices *fs_devices;
  77
  78	while (!list_empty(&fs_uuids)) {
  79		fs_devices = list_entry(fs_uuids.next,
  80					struct btrfs_fs_devices, list);
  81		list_del(&fs_devices->list);
  82		free_fs_devices(fs_devices);
  83	}
  84}
  85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86static noinline struct btrfs_device *__find_device(struct list_head *head,
  87						   u64 devid, u8 *uuid)
  88{
  89	struct btrfs_device *dev;
  90
  91	list_for_each_entry(dev, head, dev_list) {
  92		if (dev->devid == devid &&
  93		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
  94			return dev;
  95		}
  96	}
  97	return NULL;
  98}
  99
 100static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 101{
 102	struct btrfs_fs_devices *fs_devices;
 103
 104	list_for_each_entry(fs_devices, &fs_uuids, list) {
 105		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
 106			return fs_devices;
 107	}
 108	return NULL;
 109}
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111static void requeue_list(struct btrfs_pending_bios *pending_bios,
 112			struct bio *head, struct bio *tail)
 113{
 114
 115	struct bio *old_head;
 116
 117	old_head = pending_bios->head;
 118	pending_bios->head = head;
 119	if (pending_bios->tail)
 120		tail->bi_next = old_head;
 121	else
 122		pending_bios->tail = tail;
 123}
 124
 125/*
 126 * we try to collect pending bios for a device so we don't get a large
 127 * number of procs sending bios down to the same device.  This greatly
 128 * improves the schedulers ability to collect and merge the bios.
 129 *
 130 * But, it also turns into a long list of bios to process and that is sure
 131 * to eventually make the worker thread block.  The solution here is to
 132 * make some progress and then put this work struct back at the end of
 133 * the list if the block device is congested.  This way, multiple devices
 134 * can make progress from a single worker thread.
 135 */
 136static noinline void run_scheduled_bios(struct btrfs_device *device)
 137{
 138	struct bio *pending;
 139	struct backing_dev_info *bdi;
 140	struct btrfs_fs_info *fs_info;
 141	struct btrfs_pending_bios *pending_bios;
 142	struct bio *tail;
 143	struct bio *cur;
 144	int again = 0;
 145	unsigned long num_run;
 146	unsigned long batch_run = 0;
 147	unsigned long limit;
 148	unsigned long last_waited = 0;
 149	int force_reg = 0;
 150	int sync_pending = 0;
 151	struct blk_plug plug;
 152
 153	/*
 154	 * this function runs all the bios we've collected for
 155	 * a particular device.  We don't want to wander off to
 156	 * another device without first sending all of these down.
 157	 * So, setup a plug here and finish it off before we return
 158	 */
 159	blk_start_plug(&plug);
 160
 161	bdi = blk_get_backing_dev_info(device->bdev);
 162	fs_info = device->dev_root->fs_info;
 163	limit = btrfs_async_submit_limit(fs_info);
 164	limit = limit * 2 / 3;
 165
 166loop:
 167	spin_lock(&device->io_lock);
 168
 169loop_lock:
 170	num_run = 0;
 171
 172	/* take all the bios off the list at once and process them
 173	 * later on (without the lock held).  But, remember the
 174	 * tail and other pointers so the bios can be properly reinserted
 175	 * into the list if we hit congestion
 176	 */
 177	if (!force_reg && device->pending_sync_bios.head) {
 178		pending_bios = &device->pending_sync_bios;
 179		force_reg = 1;
 180	} else {
 181		pending_bios = &device->pending_bios;
 182		force_reg = 0;
 183	}
 184
 185	pending = pending_bios->head;
 186	tail = pending_bios->tail;
 187	WARN_ON(pending && !tail);
 188
 189	/*
 190	 * if pending was null this time around, no bios need processing
 191	 * at all and we can stop.  Otherwise it'll loop back up again
 192	 * and do an additional check so no bios are missed.
 193	 *
 194	 * device->running_pending is used to synchronize with the
 195	 * schedule_bio code.
 196	 */
 197	if (device->pending_sync_bios.head == NULL &&
 198	    device->pending_bios.head == NULL) {
 199		again = 0;
 200		device->running_pending = 0;
 201	} else {
 202		again = 1;
 203		device->running_pending = 1;
 204	}
 205
 206	pending_bios->head = NULL;
 207	pending_bios->tail = NULL;
 208
 209	spin_unlock(&device->io_lock);
 210
 211	while (pending) {
 212
 213		rmb();
 214		/* we want to work on both lists, but do more bios on the
 215		 * sync list than the regular list
 216		 */
 217		if ((num_run > 32 &&
 218		    pending_bios != &device->pending_sync_bios &&
 219		    device->pending_sync_bios.head) ||
 220		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
 221		    device->pending_bios.head)) {
 222			spin_lock(&device->io_lock);
 223			requeue_list(pending_bios, pending, tail);
 224			goto loop_lock;
 225		}
 226
 227		cur = pending;
 228		pending = pending->bi_next;
 229		cur->bi_next = NULL;
 230		atomic_dec(&fs_info->nr_async_bios);
 231
 232		if (atomic_read(&fs_info->nr_async_bios) < limit &&
 
 
 
 233		    waitqueue_active(&fs_info->async_submit_wait))
 234			wake_up(&fs_info->async_submit_wait);
 235
 236		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 237
 238		/*
 239		 * if we're doing the sync list, record that our
 240		 * plug has some sync requests on it
 241		 *
 242		 * If we're doing the regular list and there are
 243		 * sync requests sitting around, unplug before
 244		 * we add more
 245		 */
 246		if (pending_bios == &device->pending_sync_bios) {
 247			sync_pending = 1;
 248		} else if (sync_pending) {
 249			blk_finish_plug(&plug);
 250			blk_start_plug(&plug);
 251			sync_pending = 0;
 252		}
 253
 254		btrfsic_submit_bio(cur->bi_rw, cur);
 255		num_run++;
 256		batch_run++;
 257		if (need_resched())
 258			cond_resched();
 259
 260		/*
 261		 * we made progress, there is more work to do and the bdi
 262		 * is now congested.  Back off and let other work structs
 263		 * run instead
 264		 */
 265		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
 266		    fs_info->fs_devices->open_devices > 1) {
 267			struct io_context *ioc;
 268
 269			ioc = current->io_context;
 270
 271			/*
 272			 * the main goal here is that we don't want to
 273			 * block if we're going to be able to submit
 274			 * more requests without blocking.
 275			 *
 276			 * This code does two great things, it pokes into
 277			 * the elevator code from a filesystem _and_
 278			 * it makes assumptions about how batching works.
 279			 */
 280			if (ioc && ioc->nr_batch_requests > 0 &&
 281			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
 282			    (last_waited == 0 ||
 283			     ioc->last_waited == last_waited)) {
 284				/*
 285				 * we want to go through our batch of
 286				 * requests and stop.  So, we copy out
 287				 * the ioc->last_waited time and test
 288				 * against it before looping
 289				 */
 290				last_waited = ioc->last_waited;
 291				if (need_resched())
 292					cond_resched();
 293				continue;
 294			}
 295			spin_lock(&device->io_lock);
 296			requeue_list(pending_bios, pending, tail);
 297			device->running_pending = 1;
 298
 299			spin_unlock(&device->io_lock);
 300			btrfs_requeue_work(&device->work);
 
 301			goto done;
 302		}
 303		/* unplug every 64 requests just for good measure */
 304		if (batch_run % 64 == 0) {
 305			blk_finish_plug(&plug);
 306			blk_start_plug(&plug);
 307			sync_pending = 0;
 308		}
 309	}
 310
 311	cond_resched();
 312	if (again)
 313		goto loop;
 314
 315	spin_lock(&device->io_lock);
 316	if (device->pending_bios.head || device->pending_sync_bios.head)
 317		goto loop_lock;
 318	spin_unlock(&device->io_lock);
 319
 320done:
 321	blk_finish_plug(&plug);
 322}
 323
 324static void pending_bios_fn(struct btrfs_work *work)
 325{
 326	struct btrfs_device *device;
 327
 328	device = container_of(work, struct btrfs_device, work);
 329	run_scheduled_bios(device);
 330}
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332static noinline int device_list_add(const char *path,
 333			   struct btrfs_super_block *disk_super,
 334			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 335{
 336	struct btrfs_device *device;
 337	struct btrfs_fs_devices *fs_devices;
 338	struct rcu_string *name;
 
 339	u64 found_transid = btrfs_super_generation(disk_super);
 340
 341	fs_devices = find_fsid(disk_super->fsid);
 342	if (!fs_devices) {
 343		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
 344		if (!fs_devices)
 345			return -ENOMEM;
 346		INIT_LIST_HEAD(&fs_devices->devices);
 347		INIT_LIST_HEAD(&fs_devices->alloc_list);
 348		list_add(&fs_devices->list, &fs_uuids);
 349		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
 350		fs_devices->latest_devid = devid;
 351		fs_devices->latest_trans = found_transid;
 352		mutex_init(&fs_devices->device_list_mutex);
 353		device = NULL;
 354	} else {
 355		device = __find_device(&fs_devices->devices, devid,
 356				       disk_super->dev_item.uuid);
 357	}
 
 358	if (!device) {
 359		if (fs_devices->opened)
 360			return -EBUSY;
 361
 362		device = kzalloc(sizeof(*device), GFP_NOFS);
 363		if (!device) {
 
 364			/* we can safely leave the fs_devices entry around */
 365			return -ENOMEM;
 366		}
 367		device->devid = devid;
 368		device->dev_stats_valid = 0;
 369		device->work.func = pending_bios_fn;
 370		memcpy(device->uuid, disk_super->dev_item.uuid,
 371		       BTRFS_UUID_SIZE);
 372		spin_lock_init(&device->io_lock);
 373
 374		name = rcu_string_strdup(path, GFP_NOFS);
 375		if (!name) {
 376			kfree(device);
 377			return -ENOMEM;
 378		}
 379		rcu_assign_pointer(device->name, name);
 380		INIT_LIST_HEAD(&device->dev_alloc_list);
 381
 382		/* init readahead state */
 383		spin_lock_init(&device->reada_lock);
 384		device->reada_curr_zone = NULL;
 385		atomic_set(&device->reada_in_flight, 0);
 386		device->reada_next = 0;
 387		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
 388		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
 389
 390		mutex_lock(&fs_devices->device_list_mutex);
 391		list_add_rcu(&device->dev_list, &fs_devices->devices);
 
 392		mutex_unlock(&fs_devices->device_list_mutex);
 393
 
 394		device->fs_devices = fs_devices;
 395		fs_devices->num_devices++;
 396	} else if (!device->name || strcmp(device->name->str, path)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397		name = rcu_string_strdup(path, GFP_NOFS);
 398		if (!name)
 399			return -ENOMEM;
 400		rcu_string_free(device->name);
 401		rcu_assign_pointer(device->name, name);
 402		if (device->missing) {
 403			fs_devices->missing_devices--;
 404			device->missing = 0;
 405		}
 406	}
 407
 408	if (found_transid > fs_devices->latest_trans) {
 409		fs_devices->latest_devid = devid;
 410		fs_devices->latest_trans = found_transid;
 411	}
 
 
 
 
 
 
 
 
 
 
 
 412	*fs_devices_ret = fs_devices;
 413	return 0;
 
 414}
 415
 416static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 417{
 418	struct btrfs_fs_devices *fs_devices;
 419	struct btrfs_device *device;
 420	struct btrfs_device *orig_dev;
 421
 422	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
 423	if (!fs_devices)
 424		return ERR_PTR(-ENOMEM);
 425
 426	INIT_LIST_HEAD(&fs_devices->devices);
 427	INIT_LIST_HEAD(&fs_devices->alloc_list);
 428	INIT_LIST_HEAD(&fs_devices->list);
 429	mutex_init(&fs_devices->device_list_mutex);
 430	fs_devices->latest_devid = orig->latest_devid;
 431	fs_devices->latest_trans = orig->latest_trans;
 432	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
 433
 434	/* We have held the volume lock, it is safe to get the devices. */
 435	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
 436		struct rcu_string *name;
 437
 438		device = kzalloc(sizeof(*device), GFP_NOFS);
 439		if (!device)
 
 440			goto error;
 441
 442		/*
 443		 * This is ok to do without rcu read locked because we hold the
 444		 * uuid mutex so nothing we touch in here is going to disappear.
 445		 */
 446		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
 447		if (!name) {
 448			kfree(device);
 449			goto error;
 
 
 
 
 450		}
 451		rcu_assign_pointer(device->name, name);
 452
 453		device->devid = orig_dev->devid;
 454		device->work.func = pending_bios_fn;
 455		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
 456		spin_lock_init(&device->io_lock);
 457		INIT_LIST_HEAD(&device->dev_list);
 458		INIT_LIST_HEAD(&device->dev_alloc_list);
 459
 460		list_add(&device->dev_list, &fs_devices->devices);
 461		device->fs_devices = fs_devices;
 462		fs_devices->num_devices++;
 463	}
 
 464	return fs_devices;
 465error:
 
 466	free_fs_devices(fs_devices);
 467	return ERR_PTR(-ENOMEM);
 468}
 469
 470void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
 471{
 472	struct btrfs_device *device, *next;
 473
 474	struct block_device *latest_bdev = NULL;
 475	u64 latest_devid = 0;
 476	u64 latest_transid = 0;
 477
 478	mutex_lock(&uuid_mutex);
 479again:
 480	/* This is the initialized path, it is safe to release the devices. */
 481	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
 482		if (device->in_fs_metadata) {
 483			if (!latest_transid ||
 484			    device->generation > latest_transid) {
 485				latest_devid = device->devid;
 486				latest_transid = device->generation;
 487				latest_bdev = device->bdev;
 488			}
 489			continue;
 490		}
 491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492		if (device->bdev) {
 493			blkdev_put(device->bdev, device->mode);
 494			device->bdev = NULL;
 495			fs_devices->open_devices--;
 496		}
 497		if (device->writeable) {
 498			list_del_init(&device->dev_alloc_list);
 499			device->writeable = 0;
 500			fs_devices->rw_devices--;
 
 501		}
 502		list_del_init(&device->dev_list);
 503		fs_devices->num_devices--;
 504		rcu_string_free(device->name);
 505		kfree(device);
 506	}
 507
 508	if (fs_devices->seed) {
 509		fs_devices = fs_devices->seed;
 510		goto again;
 511	}
 512
 513	fs_devices->latest_bdev = latest_bdev;
 514	fs_devices->latest_devid = latest_devid;
 515	fs_devices->latest_trans = latest_transid;
 516
 517	mutex_unlock(&uuid_mutex);
 518}
 519
 520static void __free_device(struct work_struct *work)
 521{
 522	struct btrfs_device *device;
 523
 524	device = container_of(work, struct btrfs_device, rcu_work);
 525
 526	if (device->bdev)
 527		blkdev_put(device->bdev, device->mode);
 528
 529	rcu_string_free(device->name);
 530	kfree(device);
 531}
 532
 533static void free_device(struct rcu_head *head)
 534{
 535	struct btrfs_device *device;
 536
 537	device = container_of(head, struct btrfs_device, rcu);
 538
 539	INIT_WORK(&device->rcu_work, __free_device);
 540	schedule_work(&device->rcu_work);
 541}
 542
 543static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 544{
 545	struct btrfs_device *device;
 546
 547	if (--fs_devices->opened > 0)
 548		return 0;
 549
 550	mutex_lock(&fs_devices->device_list_mutex);
 551	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 552		struct btrfs_device *new_device;
 553		struct rcu_string *name;
 554
 555		if (device->bdev)
 556			fs_devices->open_devices--;
 557
 558		if (device->writeable) {
 559			list_del_init(&device->dev_alloc_list);
 560			fs_devices->rw_devices--;
 561		}
 562
 563		if (device->can_discard)
 564			fs_devices->num_can_discard--;
 565
 566		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
 567		BUG_ON(!new_device); /* -ENOMEM */
 568		memcpy(new_device, device, sizeof(*new_device));
 569
 570		/* Safe because we are under uuid_mutex */
 571		name = rcu_string_strdup(device->name->str, GFP_NOFS);
 572		BUG_ON(device->name && !name); /* -ENOMEM */
 573		rcu_assign_pointer(new_device->name, name);
 574		new_device->bdev = NULL;
 575		new_device->writeable = 0;
 576		new_device->in_fs_metadata = 0;
 577		new_device->can_discard = 0;
 578		list_replace_rcu(&device->dev_list, &new_device->dev_list);
 579
 580		call_rcu(&device->rcu, free_device);
 581	}
 582	mutex_unlock(&fs_devices->device_list_mutex);
 583
 584	WARN_ON(fs_devices->open_devices);
 585	WARN_ON(fs_devices->rw_devices);
 586	fs_devices->opened = 0;
 587	fs_devices->seeding = 0;
 588
 589	return 0;
 590}
 591
 592int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 593{
 594	struct btrfs_fs_devices *seed_devices = NULL;
 595	int ret;
 596
 597	mutex_lock(&uuid_mutex);
 598	ret = __btrfs_close_devices(fs_devices);
 599	if (!fs_devices->opened) {
 600		seed_devices = fs_devices->seed;
 601		fs_devices->seed = NULL;
 602	}
 603	mutex_unlock(&uuid_mutex);
 604
 605	while (seed_devices) {
 606		fs_devices = seed_devices;
 607		seed_devices = fs_devices->seed;
 608		__btrfs_close_devices(fs_devices);
 609		free_fs_devices(fs_devices);
 610	}
 
 
 
 
 
 
 611	return ret;
 612}
 613
 614static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 615				fmode_t flags, void *holder)
 616{
 617	struct request_queue *q;
 618	struct block_device *bdev;
 619	struct list_head *head = &fs_devices->devices;
 620	struct btrfs_device *device;
 621	struct block_device *latest_bdev = NULL;
 622	struct buffer_head *bh;
 623	struct btrfs_super_block *disk_super;
 624	u64 latest_devid = 0;
 625	u64 latest_transid = 0;
 626	u64 devid;
 627	int seeding = 1;
 628	int ret = 0;
 629
 630	flags |= FMODE_EXCL;
 631
 632	list_for_each_entry(device, head, dev_list) {
 633		if (device->bdev)
 634			continue;
 635		if (!device->name)
 636			continue;
 637
 638		bdev = blkdev_get_by_path(device->name->str, flags, holder);
 639		if (IS_ERR(bdev)) {
 640			printk(KERN_INFO "open %s failed\n", device->name->str);
 641			goto error;
 642		}
 643		filemap_write_and_wait(bdev->bd_inode->i_mapping);
 644		invalidate_bdev(bdev);
 645		set_blocksize(bdev, 4096);
 646
 647		bh = btrfs_read_dev_super(bdev);
 648		if (!bh)
 649			goto error_close;
 650
 651		disk_super = (struct btrfs_super_block *)bh->b_data;
 652		devid = btrfs_stack_device_id(&disk_super->dev_item);
 653		if (devid != device->devid)
 654			goto error_brelse;
 655
 656		if (memcmp(device->uuid, disk_super->dev_item.uuid,
 657			   BTRFS_UUID_SIZE))
 658			goto error_brelse;
 659
 660		device->generation = btrfs_super_generation(disk_super);
 661		if (!latest_transid || device->generation > latest_transid) {
 662			latest_devid = devid;
 663			latest_transid = device->generation;
 664			latest_bdev = bdev;
 665		}
 666
 667		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
 668			device->writeable = 0;
 669		} else {
 670			device->writeable = !bdev_read_only(bdev);
 671			seeding = 0;
 672		}
 673
 674		q = bdev_get_queue(bdev);
 675		if (blk_queue_discard(q)) {
 676			device->can_discard = 1;
 677			fs_devices->num_can_discard++;
 678		}
 679
 680		device->bdev = bdev;
 681		device->in_fs_metadata = 0;
 682		device->mode = flags;
 683
 684		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
 685			fs_devices->rotating = 1;
 686
 687		fs_devices->open_devices++;
 688		if (device->writeable) {
 
 689			fs_devices->rw_devices++;
 690			list_add(&device->dev_alloc_list,
 691				 &fs_devices->alloc_list);
 692		}
 693		brelse(bh);
 694		continue;
 695
 696error_brelse:
 697		brelse(bh);
 698error_close:
 699		blkdev_put(bdev, flags);
 700error:
 701		continue;
 702	}
 703	if (fs_devices->open_devices == 0) {
 704		ret = -EINVAL;
 705		goto out;
 706	}
 707	fs_devices->seeding = seeding;
 708	fs_devices->opened = 1;
 709	fs_devices->latest_bdev = latest_bdev;
 710	fs_devices->latest_devid = latest_devid;
 711	fs_devices->latest_trans = latest_transid;
 712	fs_devices->total_rw_bytes = 0;
 713out:
 714	return ret;
 715}
 716
 717int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 718		       fmode_t flags, void *holder)
 719{
 720	int ret;
 721
 722	mutex_lock(&uuid_mutex);
 723	if (fs_devices->opened) {
 724		fs_devices->opened++;
 725		ret = 0;
 726	} else {
 727		ret = __btrfs_open_devices(fs_devices, flags, holder);
 728	}
 729	mutex_unlock(&uuid_mutex);
 730	return ret;
 731}
 732
 
 
 
 
 
 733int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 734			  struct btrfs_fs_devices **fs_devices_ret)
 735{
 736	struct btrfs_super_block *disk_super;
 737	struct block_device *bdev;
 738	struct buffer_head *bh;
 739	int ret;
 
 740	u64 devid;
 741	u64 transid;
 
 
 
 742
 
 
 
 
 
 
 
 743	flags |= FMODE_EXCL;
 
 
 744	bdev = blkdev_get_by_path(path, flags, holder);
 745
 746	if (IS_ERR(bdev)) {
 747		ret = PTR_ERR(bdev);
 748		goto error;
 749	}
 750
 751	mutex_lock(&uuid_mutex);
 752	ret = set_blocksize(bdev, 4096);
 753	if (ret)
 754		goto error_close;
 755	bh = btrfs_read_dev_super(bdev);
 756	if (!bh) {
 757		ret = -EINVAL;
 758		goto error_close;
 759	}
 760	disk_super = (struct btrfs_super_block *)bh->b_data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761	devid = btrfs_stack_device_id(&disk_super->dev_item);
 762	transid = btrfs_super_generation(disk_super);
 763	if (disk_super->label[0])
 764		printk(KERN_INFO "device label %s ", disk_super->label);
 765	else
 766		printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
 767	printk(KERN_CONT "devid %llu transid %llu %s\n",
 768	       (unsigned long long)devid, (unsigned long long)transid, path);
 769	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 770
 771	brelse(bh);
 772error_close:
 773	mutex_unlock(&uuid_mutex);
 774	blkdev_put(bdev, flags);
 775error:
 
 776	return ret;
 777}
 778
 779/* helper to account the used device space in the range */
 780int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 781				   u64 end, u64 *length)
 782{
 783	struct btrfs_key key;
 784	struct btrfs_root *root = device->dev_root;
 785	struct btrfs_dev_extent *dev_extent;
 786	struct btrfs_path *path;
 787	u64 extent_end;
 788	int ret;
 789	int slot;
 790	struct extent_buffer *l;
 791
 792	*length = 0;
 793
 794	if (start >= device->total_bytes)
 795		return 0;
 796
 797	path = btrfs_alloc_path();
 798	if (!path)
 799		return -ENOMEM;
 800	path->reada = 2;
 801
 802	key.objectid = device->devid;
 803	key.offset = start;
 804	key.type = BTRFS_DEV_EXTENT_KEY;
 805
 806	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 807	if (ret < 0)
 808		goto out;
 809	if (ret > 0) {
 810		ret = btrfs_previous_item(root, path, key.objectid, key.type);
 811		if (ret < 0)
 812			goto out;
 813	}
 814
 815	while (1) {
 816		l = path->nodes[0];
 817		slot = path->slots[0];
 818		if (slot >= btrfs_header_nritems(l)) {
 819			ret = btrfs_next_leaf(root, path);
 820			if (ret == 0)
 821				continue;
 822			if (ret < 0)
 823				goto out;
 824
 825			break;
 826		}
 827		btrfs_item_key_to_cpu(l, &key, slot);
 828
 829		if (key.objectid < device->devid)
 830			goto next;
 831
 832		if (key.objectid > device->devid)
 833			break;
 834
 835		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
 836			goto next;
 837
 838		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
 839		extent_end = key.offset + btrfs_dev_extent_length(l,
 840								  dev_extent);
 841		if (key.offset <= start && extent_end > end) {
 842			*length = end - start + 1;
 843			break;
 844		} else if (key.offset <= start && extent_end > start)
 845			*length += extent_end - start;
 846		else if (key.offset > start && extent_end <= end)
 847			*length += extent_end - key.offset;
 848		else if (key.offset > start && key.offset <= end) {
 849			*length += end - key.offset + 1;
 850			break;
 851		} else if (key.offset > end)
 852			break;
 853
 854next:
 855		path->slots[0]++;
 856	}
 857	ret = 0;
 858out:
 859	btrfs_free_path(path);
 860	return ret;
 861}
 862
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863/*
 864 * find_free_dev_extent - find free space in the specified device
 865 * @device:	the device which we search the free space in
 866 * @num_bytes:	the size of the free space that we need
 867 * @start:	store the start of the free space.
 868 * @len:	the size of the free space. that we find, or the size of the max
 869 * 		free space if we don't find suitable free space
 
 870 *
 871 * this uses a pretty simple search, the expectation is that it is
 872 * called very infrequently and that a given device has a small number
 873 * of extents
 874 *
 875 * @start is used to store the start of the free space if we find. But if we
 876 * don't find suitable free space, it will be used to store the start position
 877 * of the max free space.
 878 *
 879 * @len is used to store the size of the free space that we find.
 880 * But if we don't find suitable free space, it is used to store the size of
 881 * the max free space.
 882 */
 883int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
 884			 u64 *start, u64 *len)
 
 885{
 886	struct btrfs_key key;
 887	struct btrfs_root *root = device->dev_root;
 888	struct btrfs_dev_extent *dev_extent;
 889	struct btrfs_path *path;
 890	u64 hole_size;
 891	u64 max_hole_start;
 892	u64 max_hole_size;
 893	u64 extent_end;
 894	u64 search_start;
 895	u64 search_end = device->total_bytes;
 896	int ret;
 897	int slot;
 898	struct extent_buffer *l;
 
 899
 900	/* FIXME use last free of some kind */
 
 
 
 
 
 
 901
 902	/* we don't want to overwrite the superblock on the drive,
 903	 * so we make sure to start at an offset of at least 1MB
 904	 */
 905	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
 906
 907	max_hole_start = search_start;
 908	max_hole_size = 0;
 909	hole_size = 0;
 910
 911	if (search_start >= search_end) {
 
 912		ret = -ENOSPC;
 913		goto error;
 914	}
 915
 916	path = btrfs_alloc_path();
 917	if (!path) {
 918		ret = -ENOMEM;
 919		goto error;
 920	}
 921	path->reada = 2;
 922
 923	key.objectid = device->devid;
 924	key.offset = search_start;
 925	key.type = BTRFS_DEV_EXTENT_KEY;
 926
 927	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 928	if (ret < 0)
 929		goto out;
 930	if (ret > 0) {
 931		ret = btrfs_previous_item(root, path, key.objectid, key.type);
 932		if (ret < 0)
 933			goto out;
 934	}
 935
 936	while (1) {
 937		l = path->nodes[0];
 938		slot = path->slots[0];
 939		if (slot >= btrfs_header_nritems(l)) {
 940			ret = btrfs_next_leaf(root, path);
 941			if (ret == 0)
 942				continue;
 943			if (ret < 0)
 944				goto out;
 945
 946			break;
 947		}
 948		btrfs_item_key_to_cpu(l, &key, slot);
 949
 950		if (key.objectid < device->devid)
 951			goto next;
 952
 953		if (key.objectid > device->devid)
 954			break;
 955
 956		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
 957			goto next;
 958
 959		if (key.offset > search_start) {
 960			hole_size = key.offset - search_start;
 961
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 962			if (hole_size > max_hole_size) {
 963				max_hole_start = search_start;
 964				max_hole_size = hole_size;
 965			}
 966
 967			/*
 968			 * If this free space is greater than which we need,
 969			 * it must be the max free space that we have found
 970			 * until now, so max_hole_start must point to the start
 971			 * of this free space and the length of this free space
 972			 * is stored in max_hole_size. Thus, we return
 973			 * max_hole_start and max_hole_size and go back to the
 974			 * caller.
 975			 */
 976			if (hole_size >= num_bytes) {
 977				ret = 0;
 978				goto out;
 979			}
 980		}
 981
 982		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
 983		extent_end = key.offset + btrfs_dev_extent_length(l,
 984								  dev_extent);
 985		if (extent_end > search_start)
 986			search_start = extent_end;
 987next:
 988		path->slots[0]++;
 989		cond_resched();
 990	}
 991
 992	/*
 993	 * At this point, search_start should be the end of
 994	 * allocated dev extents, and when shrinking the device,
 995	 * search_end may be smaller than search_start.
 996	 */
 997	if (search_end > search_start)
 998		hole_size = search_end - search_start;
 999
1000	if (hole_size > max_hole_size) {
1001		max_hole_start = search_start;
1002		max_hole_size = hole_size;
 
 
 
 
 
 
 
1003	}
1004
1005	/* See above. */
1006	if (hole_size < num_bytes)
1007		ret = -ENOSPC;
1008	else
1009		ret = 0;
1010
1011out:
1012	btrfs_free_path(path);
1013error:
1014	*start = max_hole_start;
1015	if (len)
1016		*len = max_hole_size;
1017	return ret;
1018}
1019
 
 
 
 
 
 
 
 
 
1020static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1021			  struct btrfs_device *device,
1022			  u64 start)
1023{
1024	int ret;
1025	struct btrfs_path *path;
1026	struct btrfs_root *root = device->dev_root;
1027	struct btrfs_key key;
1028	struct btrfs_key found_key;
1029	struct extent_buffer *leaf = NULL;
1030	struct btrfs_dev_extent *extent = NULL;
1031
1032	path = btrfs_alloc_path();
1033	if (!path)
1034		return -ENOMEM;
1035
1036	key.objectid = device->devid;
1037	key.offset = start;
1038	key.type = BTRFS_DEV_EXTENT_KEY;
1039again:
1040	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1041	if (ret > 0) {
1042		ret = btrfs_previous_item(root, path, key.objectid,
1043					  BTRFS_DEV_EXTENT_KEY);
1044		if (ret)
1045			goto out;
1046		leaf = path->nodes[0];
1047		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1048		extent = btrfs_item_ptr(leaf, path->slots[0],
1049					struct btrfs_dev_extent);
1050		BUG_ON(found_key.offset > start || found_key.offset +
1051		       btrfs_dev_extent_length(leaf, extent) < start);
1052		key = found_key;
1053		btrfs_release_path(path);
1054		goto again;
1055	} else if (ret == 0) {
1056		leaf = path->nodes[0];
1057		extent = btrfs_item_ptr(leaf, path->slots[0],
1058					struct btrfs_dev_extent);
1059	} else {
1060		btrfs_error(root->fs_info, ret, "Slot search failed");
1061		goto out;
1062	}
1063
1064	if (device->bytes_used > 0) {
1065		u64 len = btrfs_dev_extent_length(leaf, extent);
1066		device->bytes_used -= len;
1067		spin_lock(&root->fs_info->free_chunk_lock);
1068		root->fs_info->free_chunk_space += len;
1069		spin_unlock(&root->fs_info->free_chunk_lock);
1070	}
1071	ret = btrfs_del_item(trans, root, path);
1072	if (ret) {
1073		btrfs_error(root->fs_info, ret,
1074			    "Failed to remove dev extent item");
 
 
1075	}
1076out:
1077	btrfs_free_path(path);
1078	return ret;
1079}
1080
1081int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1082			   struct btrfs_device *device,
1083			   u64 chunk_tree, u64 chunk_objectid,
1084			   u64 chunk_offset, u64 start, u64 num_bytes)
1085{
1086	int ret;
1087	struct btrfs_path *path;
1088	struct btrfs_root *root = device->dev_root;
1089	struct btrfs_dev_extent *extent;
1090	struct extent_buffer *leaf;
1091	struct btrfs_key key;
1092
1093	WARN_ON(!device->in_fs_metadata);
 
1094	path = btrfs_alloc_path();
1095	if (!path)
1096		return -ENOMEM;
1097
1098	key.objectid = device->devid;
1099	key.offset = start;
1100	key.type = BTRFS_DEV_EXTENT_KEY;
1101	ret = btrfs_insert_empty_item(trans, root, path, &key,
1102				      sizeof(*extent));
1103	if (ret)
1104		goto out;
1105
1106	leaf = path->nodes[0];
1107	extent = btrfs_item_ptr(leaf, path->slots[0],
1108				struct btrfs_dev_extent);
1109	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1110	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1111	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1112
1113	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1114		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1115		    BTRFS_UUID_SIZE);
1116
1117	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1118	btrfs_mark_buffer_dirty(leaf);
1119out:
1120	btrfs_free_path(path);
1121	return ret;
1122}
1123
1124static noinline int find_next_chunk(struct btrfs_root *root,
1125				    u64 objectid, u64 *offset)
1126{
1127	struct btrfs_path *path;
1128	int ret;
1129	struct btrfs_key key;
1130	struct btrfs_chunk *chunk;
1131	struct btrfs_key found_key;
1132
1133	path = btrfs_alloc_path();
1134	if (!path)
1135		return -ENOMEM;
 
 
 
 
 
1136
1137	key.objectid = objectid;
1138	key.offset = (u64)-1;
1139	key.type = BTRFS_CHUNK_ITEM_KEY;
1140
1141	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1142	if (ret < 0)
1143		goto error;
1144
1145	BUG_ON(ret == 0); /* Corruption */
1146
1147	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1148	if (ret) {
1149		*offset = 0;
1150	} else {
1151		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1152				      path->slots[0]);
1153		if (found_key.objectid != objectid)
1154			*offset = 0;
1155		else {
1156			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1157					       struct btrfs_chunk);
1158			*offset = found_key.offset +
1159				btrfs_chunk_length(path->nodes[0], chunk);
1160		}
1161	}
1162	ret = 0;
1163error:
1164	btrfs_free_path(path);
1165	return ret;
1166}
1167
1168static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
 
1169{
1170	int ret;
1171	struct btrfs_key key;
1172	struct btrfs_key found_key;
1173	struct btrfs_path *path;
1174
1175	root = root->fs_info->chunk_root;
1176
1177	path = btrfs_alloc_path();
1178	if (!path)
1179		return -ENOMEM;
1180
1181	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1182	key.type = BTRFS_DEV_ITEM_KEY;
1183	key.offset = (u64)-1;
1184
1185	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1186	if (ret < 0)
1187		goto error;
1188
1189	BUG_ON(ret == 0); /* Corruption */
1190
1191	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
 
1192				  BTRFS_DEV_ITEM_KEY);
1193	if (ret) {
1194		*objectid = 1;
1195	} else {
1196		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1197				      path->slots[0]);
1198		*objectid = found_key.offset + 1;
1199	}
1200	ret = 0;
1201error:
1202	btrfs_free_path(path);
1203	return ret;
1204}
1205
1206/*
1207 * the device information is stored in the chunk root
1208 * the btrfs_device struct should be fully filled in
1209 */
1210int btrfs_add_device(struct btrfs_trans_handle *trans,
1211		     struct btrfs_root *root,
1212		     struct btrfs_device *device)
1213{
1214	int ret;
1215	struct btrfs_path *path;
1216	struct btrfs_dev_item *dev_item;
1217	struct extent_buffer *leaf;
1218	struct btrfs_key key;
1219	unsigned long ptr;
1220
1221	root = root->fs_info->chunk_root;
1222
1223	path = btrfs_alloc_path();
1224	if (!path)
1225		return -ENOMEM;
1226
1227	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1228	key.type = BTRFS_DEV_ITEM_KEY;
1229	key.offset = device->devid;
1230
1231	ret = btrfs_insert_empty_item(trans, root, path, &key,
1232				      sizeof(*dev_item));
1233	if (ret)
1234		goto out;
1235
1236	leaf = path->nodes[0];
1237	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1238
1239	btrfs_set_device_id(leaf, dev_item, device->devid);
1240	btrfs_set_device_generation(leaf, dev_item, 0);
1241	btrfs_set_device_type(leaf, dev_item, device->type);
1242	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1243	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1244	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1245	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1246	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
 
 
1247	btrfs_set_device_group(leaf, dev_item, 0);
1248	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1249	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1250	btrfs_set_device_start_offset(leaf, dev_item, 0);
1251
1252	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1253	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1254	ptr = (unsigned long)btrfs_device_fsid(dev_item);
1255	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1256	btrfs_mark_buffer_dirty(leaf);
1257
1258	ret = 0;
1259out:
1260	btrfs_free_path(path);
1261	return ret;
1262}
1263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1264static int btrfs_rm_dev_item(struct btrfs_root *root,
1265			     struct btrfs_device *device)
1266{
1267	int ret;
1268	struct btrfs_path *path;
1269	struct btrfs_key key;
1270	struct btrfs_trans_handle *trans;
1271
1272	root = root->fs_info->chunk_root;
1273
1274	path = btrfs_alloc_path();
1275	if (!path)
1276		return -ENOMEM;
1277
1278	trans = btrfs_start_transaction(root, 0);
1279	if (IS_ERR(trans)) {
1280		btrfs_free_path(path);
1281		return PTR_ERR(trans);
1282	}
1283	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284	key.type = BTRFS_DEV_ITEM_KEY;
1285	key.offset = device->devid;
1286	lock_chunks(root);
1287
1288	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1289	if (ret < 0)
1290		goto out;
1291
1292	if (ret > 0) {
1293		ret = -ENOENT;
1294		goto out;
1295	}
1296
1297	ret = btrfs_del_item(trans, root, path);
1298	if (ret)
1299		goto out;
1300out:
1301	btrfs_free_path(path);
1302	unlock_chunks(root);
1303	btrfs_commit_transaction(trans, root);
1304	return ret;
1305}
1306
1307int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1308{
1309	struct btrfs_device *device;
1310	struct btrfs_device *next_device;
1311	struct block_device *bdev;
1312	struct buffer_head *bh = NULL;
1313	struct btrfs_super_block *disk_super;
1314	struct btrfs_fs_devices *cur_devices;
1315	u64 all_avail;
1316	u64 devid;
1317	u64 num_devices;
1318	u8 *dev_uuid;
 
1319	int ret = 0;
1320	bool clear_super = false;
1321
1322	mutex_lock(&uuid_mutex);
1323
1324	all_avail = root->fs_info->avail_data_alloc_bits |
1325		root->fs_info->avail_system_alloc_bits |
1326		root->fs_info->avail_metadata_alloc_bits;
1327
1328	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1329	    root->fs_info->fs_devices->num_devices <= 4) {
1330		printk(KERN_ERR "btrfs: unable to go below four devices "
1331		       "on raid10\n");
1332		ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333		goto out;
1334	}
1335
1336	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1337	    root->fs_info->fs_devices->num_devices <= 2) {
1338		printk(KERN_ERR "btrfs: unable to go below two "
1339		       "devices on raid1\n");
1340		ret = -EINVAL;
 
 
 
1341		goto out;
1342	}
1343
1344	if (strcmp(device_path, "missing") == 0) {
1345		struct list_head *devices;
1346		struct btrfs_device *tmp;
1347
1348		device = NULL;
1349		devices = &root->fs_info->fs_devices->devices;
1350		/*
1351		 * It is safe to read the devices since the volume_mutex
1352		 * is held.
1353		 */
1354		list_for_each_entry(tmp, devices, dev_list) {
1355			if (tmp->in_fs_metadata && !tmp->bdev) {
 
 
1356				device = tmp;
1357				break;
1358			}
1359		}
1360		bdev = NULL;
1361		bh = NULL;
1362		disk_super = NULL;
1363		if (!device) {
1364			printk(KERN_ERR "btrfs: no missing devices found to "
1365			       "remove\n");
1366			goto out;
1367		}
1368	} else {
1369		bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1370					  root->fs_info->bdev_holder);
1371		if (IS_ERR(bdev)) {
1372			ret = PTR_ERR(bdev);
 
1373			goto out;
1374		}
1375
1376		set_blocksize(bdev, 4096);
1377		invalidate_bdev(bdev);
1378		bh = btrfs_read_dev_super(bdev);
1379		if (!bh) {
1380			ret = -EINVAL;
1381			goto error_close;
1382		}
1383		disk_super = (struct btrfs_super_block *)bh->b_data;
1384		devid = btrfs_stack_device_id(&disk_super->dev_item);
1385		dev_uuid = disk_super->dev_item.uuid;
1386		device = btrfs_find_device(root, devid, dev_uuid,
1387					   disk_super->fsid);
1388		if (!device) {
1389			ret = -ENOENT;
1390			goto error_brelse;
1391		}
1392	}
1393
 
 
 
 
 
1394	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1395		printk(KERN_ERR "btrfs: unable to remove the only writeable "
1396		       "device\n");
1397		ret = -EINVAL;
1398		goto error_brelse;
1399	}
1400
1401	if (device->writeable) {
1402		lock_chunks(root);
1403		list_del_init(&device->dev_alloc_list);
 
1404		unlock_chunks(root);
1405		root->fs_info->fs_devices->rw_devices--;
1406		clear_super = true;
1407	}
1408
 
1409	ret = btrfs_shrink_device(device, 0);
 
1410	if (ret)
1411		goto error_undo;
1412
 
 
 
 
 
1413	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1414	if (ret)
1415		goto error_undo;
1416
1417	spin_lock(&root->fs_info->free_chunk_lock);
1418	root->fs_info->free_chunk_space = device->total_bytes -
1419		device->bytes_used;
1420	spin_unlock(&root->fs_info->free_chunk_lock);
1421
1422	device->in_fs_metadata = 0;
1423	btrfs_scrub_cancel_dev(root, device);
1424
1425	/*
1426	 * the device list mutex makes sure that we don't change
1427	 * the device list while someone else is writing out all
1428	 * the device supers.
 
 
 
 
1429	 */
1430
1431	cur_devices = device->fs_devices;
1432	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1433	list_del_rcu(&device->dev_list);
1434
1435	device->fs_devices->num_devices--;
 
1436
1437	if (device->missing)
1438		root->fs_info->fs_devices->missing_devices--;
1439
1440	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1441				 struct btrfs_device, dev_list);
1442	if (device->bdev == root->fs_info->sb->s_bdev)
1443		root->fs_info->sb->s_bdev = next_device->bdev;
1444	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1445		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1446
1447	if (device->bdev)
1448		device->fs_devices->open_devices--;
 
 
 
1449
1450	call_rcu(&device->rcu, free_device);
1451	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1452
1453	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1454	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
 
1455
1456	if (cur_devices->open_devices == 0) {
1457		struct btrfs_fs_devices *fs_devices;
1458		fs_devices = root->fs_info->fs_devices;
1459		while (fs_devices) {
1460			if (fs_devices->seed == cur_devices)
 
1461				break;
 
1462			fs_devices = fs_devices->seed;
1463		}
1464		fs_devices->seed = cur_devices->seed;
1465		cur_devices->seed = NULL;
1466		lock_chunks(root);
1467		__btrfs_close_devices(cur_devices);
1468		unlock_chunks(root);
1469		free_fs_devices(cur_devices);
1470	}
1471
 
 
 
1472	/*
1473	 * at this point, the device is zero sized.  We want to
1474	 * remove it from the devices list and zero out the old super
1475	 */
1476	if (clear_super) {
 
 
 
1477		/* make sure this device isn't detected as part of
1478		 * the FS anymore
1479		 */
1480		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1481		set_buffer_dirty(bh);
1482		sync_dirty_buffer(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1483	}
1484
1485	ret = 0;
1486
 
 
 
 
 
 
 
 
1487error_brelse:
1488	brelse(bh);
1489error_close:
1490	if (bdev)
1491		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1492out:
1493	mutex_unlock(&uuid_mutex);
1494	return ret;
1495error_undo:
1496	if (device->writeable) {
1497		lock_chunks(root);
1498		list_add(&device->dev_alloc_list,
1499			 &root->fs_info->fs_devices->alloc_list);
 
1500		unlock_chunks(root);
1501		root->fs_info->fs_devices->rw_devices++;
1502	}
1503	goto error_brelse;
1504}
1505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506/*
1507 * does all the dirty work required for changing file system's UUID.
1508 */
1509static int btrfs_prepare_sprout(struct btrfs_root *root)
1510{
1511	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1512	struct btrfs_fs_devices *old_devices;
1513	struct btrfs_fs_devices *seed_devices;
1514	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1515	struct btrfs_device *device;
1516	u64 super_flags;
1517
1518	BUG_ON(!mutex_is_locked(&uuid_mutex));
1519	if (!fs_devices->seeding)
1520		return -EINVAL;
1521
1522	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1523	if (!seed_devices)
1524		return -ENOMEM;
1525
1526	old_devices = clone_fs_devices(fs_devices);
1527	if (IS_ERR(old_devices)) {
1528		kfree(seed_devices);
1529		return PTR_ERR(old_devices);
1530	}
1531
1532	list_add(&old_devices->list, &fs_uuids);
1533
1534	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1535	seed_devices->opened = 1;
1536	INIT_LIST_HEAD(&seed_devices->devices);
1537	INIT_LIST_HEAD(&seed_devices->alloc_list);
1538	mutex_init(&seed_devices->device_list_mutex);
1539
1540	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1541	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1542			      synchronize_rcu);
1543	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
1544
 
1545	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1546	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1547		device->fs_devices = seed_devices;
1548	}
1549
1550	fs_devices->seeding = 0;
1551	fs_devices->num_devices = 0;
1552	fs_devices->open_devices = 0;
 
 
1553	fs_devices->seed = seed_devices;
1554
1555	generate_random_uuid(fs_devices->fsid);
1556	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1557	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
 
 
1558	super_flags = btrfs_super_flags(disk_super) &
1559		      ~BTRFS_SUPER_FLAG_SEEDING;
1560	btrfs_set_super_flags(disk_super, super_flags);
1561
1562	return 0;
1563}
1564
1565/*
1566 * strore the expected generation for seed devices in device items.
1567 */
1568static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1569			       struct btrfs_root *root)
1570{
1571	struct btrfs_path *path;
1572	struct extent_buffer *leaf;
1573	struct btrfs_dev_item *dev_item;
1574	struct btrfs_device *device;
1575	struct btrfs_key key;
1576	u8 fs_uuid[BTRFS_UUID_SIZE];
1577	u8 dev_uuid[BTRFS_UUID_SIZE];
1578	u64 devid;
1579	int ret;
1580
1581	path = btrfs_alloc_path();
1582	if (!path)
1583		return -ENOMEM;
1584
1585	root = root->fs_info->chunk_root;
1586	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1587	key.offset = 0;
1588	key.type = BTRFS_DEV_ITEM_KEY;
1589
1590	while (1) {
1591		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1592		if (ret < 0)
1593			goto error;
1594
1595		leaf = path->nodes[0];
1596next_slot:
1597		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1598			ret = btrfs_next_leaf(root, path);
1599			if (ret > 0)
1600				break;
1601			if (ret < 0)
1602				goto error;
1603			leaf = path->nodes[0];
1604			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1605			btrfs_release_path(path);
1606			continue;
1607		}
1608
1609		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1610		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1611		    key.type != BTRFS_DEV_ITEM_KEY)
1612			break;
1613
1614		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1615					  struct btrfs_dev_item);
1616		devid = btrfs_device_id(leaf, dev_item);
1617		read_extent_buffer(leaf, dev_uuid,
1618				   (unsigned long)btrfs_device_uuid(dev_item),
1619				   BTRFS_UUID_SIZE);
1620		read_extent_buffer(leaf, fs_uuid,
1621				   (unsigned long)btrfs_device_fsid(dev_item),
1622				   BTRFS_UUID_SIZE);
1623		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
 
1624		BUG_ON(!device); /* Logic error */
1625
1626		if (device->fs_devices->seeding) {
1627			btrfs_set_device_generation(leaf, dev_item,
1628						    device->generation);
1629			btrfs_mark_buffer_dirty(leaf);
1630		}
1631
1632		path->slots[0]++;
1633		goto next_slot;
1634	}
1635	ret = 0;
1636error:
1637	btrfs_free_path(path);
1638	return ret;
1639}
1640
1641int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1642{
1643	struct request_queue *q;
1644	struct btrfs_trans_handle *trans;
1645	struct btrfs_device *device;
1646	struct block_device *bdev;
1647	struct list_head *devices;
1648	struct super_block *sb = root->fs_info->sb;
1649	struct rcu_string *name;
1650	u64 total_bytes;
1651	int seeding_dev = 0;
1652	int ret = 0;
1653
1654	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1655		return -EROFS;
1656
1657	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1658				  root->fs_info->bdev_holder);
1659	if (IS_ERR(bdev))
1660		return PTR_ERR(bdev);
1661
1662	if (root->fs_info->fs_devices->seeding) {
1663		seeding_dev = 1;
1664		down_write(&sb->s_umount);
1665		mutex_lock(&uuid_mutex);
1666	}
1667
1668	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1669
1670	devices = &root->fs_info->fs_devices->devices;
1671	/*
1672	 * we have the volume lock, so we don't need the extra
1673	 * device list mutex while reading the list here.
1674	 */
1675	list_for_each_entry(device, devices, dev_list) {
1676		if (device->bdev == bdev) {
1677			ret = -EEXIST;
 
 
1678			goto error;
1679		}
1680	}
 
1681
1682	device = kzalloc(sizeof(*device), GFP_NOFS);
1683	if (!device) {
1684		/* we can safely leave the fs_devices entry around */
1685		ret = -ENOMEM;
1686		goto error;
1687	}
1688
1689	name = rcu_string_strdup(device_path, GFP_NOFS);
1690	if (!name) {
1691		kfree(device);
1692		ret = -ENOMEM;
1693		goto error;
1694	}
1695	rcu_assign_pointer(device->name, name);
1696
1697	ret = find_next_devid(root, &device->devid);
1698	if (ret) {
1699		rcu_string_free(device->name);
1700		kfree(device);
1701		goto error;
1702	}
1703
1704	trans = btrfs_start_transaction(root, 0);
1705	if (IS_ERR(trans)) {
1706		rcu_string_free(device->name);
1707		kfree(device);
1708		ret = PTR_ERR(trans);
1709		goto error;
1710	}
1711
1712	lock_chunks(root);
1713
1714	q = bdev_get_queue(bdev);
1715	if (blk_queue_discard(q))
1716		device->can_discard = 1;
1717	device->writeable = 1;
1718	device->work.func = pending_bios_fn;
1719	generate_random_uuid(device->uuid);
1720	spin_lock_init(&device->io_lock);
1721	device->generation = trans->transid;
1722	device->io_width = root->sectorsize;
1723	device->io_align = root->sectorsize;
1724	device->sector_size = root->sectorsize;
1725	device->total_bytes = i_size_read(bdev->bd_inode);
1726	device->disk_total_bytes = device->total_bytes;
 
1727	device->dev_root = root->fs_info->dev_root;
1728	device->bdev = bdev;
1729	device->in_fs_metadata = 1;
 
1730	device->mode = FMODE_EXCL;
 
1731	set_blocksize(device->bdev, 4096);
1732
1733	if (seeding_dev) {
1734		sb->s_flags &= ~MS_RDONLY;
1735		ret = btrfs_prepare_sprout(root);
1736		BUG_ON(ret); /* -ENOMEM */
1737	}
1738
1739	device->fs_devices = root->fs_info->fs_devices;
1740
1741	/*
1742	 * we don't want write_supers to jump in here with our device
1743	 * half setup
1744	 */
1745	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
 
1746	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1747	list_add(&device->dev_alloc_list,
1748		 &root->fs_info->fs_devices->alloc_list);
1749	root->fs_info->fs_devices->num_devices++;
1750	root->fs_info->fs_devices->open_devices++;
1751	root->fs_info->fs_devices->rw_devices++;
1752	if (device->can_discard)
1753		root->fs_info->fs_devices->num_can_discard++;
1754	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1755
1756	spin_lock(&root->fs_info->free_chunk_lock);
1757	root->fs_info->free_chunk_space += device->total_bytes;
1758	spin_unlock(&root->fs_info->free_chunk_lock);
1759
1760	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1761		root->fs_info->fs_devices->rotating = 1;
1762
1763	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1764	btrfs_set_super_total_bytes(root->fs_info->super_copy,
1765				    total_bytes + device->total_bytes);
1766
1767	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1768	btrfs_set_super_num_devices(root->fs_info->super_copy,
1769				    total_bytes + 1);
 
 
 
 
 
 
 
 
 
 
 
1770	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1771
1772	if (seeding_dev) {
 
1773		ret = init_first_rw_device(trans, root, device);
1774		if (ret)
 
 
1775			goto error_trans;
 
 
 
 
 
 
 
 
 
 
 
 
1776		ret = btrfs_finish_sprout(trans, root);
1777		if (ret)
1778			goto error_trans;
1779	} else {
1780		ret = btrfs_add_device(trans, root, device);
1781		if (ret)
1782			goto error_trans;
 
 
 
 
 
 
 
 
 
 
 
1783	}
1784
1785	/*
1786	 * we've got more storage, clear any full flags on the space
1787	 * infos
1788	 */
1789	btrfs_clear_space_info_full(root->fs_info);
1790
1791	unlock_chunks(root);
1792	ret = btrfs_commit_transaction(trans, root);
1793
1794	if (seeding_dev) {
1795		mutex_unlock(&uuid_mutex);
1796		up_write(&sb->s_umount);
1797
1798		if (ret) /* transaction commit */
1799			return ret;
1800
1801		ret = btrfs_relocate_sys_chunks(root);
1802		if (ret < 0)
1803			btrfs_error(root->fs_info, ret,
1804				    "Failed to relocate sys chunks after "
1805				    "device initialization. This can be fixed "
1806				    "using the \"btrfs balance\" command.");
 
 
 
 
 
 
 
1807	}
1808
 
 
1809	return ret;
1810
1811error_trans:
1812	unlock_chunks(root);
1813	btrfs_abort_transaction(trans, root, ret);
1814	btrfs_end_transaction(trans, root);
1815	rcu_string_free(device->name);
 
1816	kfree(device);
1817error:
1818	blkdev_put(bdev, FMODE_EXCL);
1819	if (seeding_dev) {
1820		mutex_unlock(&uuid_mutex);
1821		up_write(&sb->s_umount);
1822	}
1823	return ret;
1824}
1825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1827					struct btrfs_device *device)
1828{
1829	int ret;
1830	struct btrfs_path *path;
1831	struct btrfs_root *root;
1832	struct btrfs_dev_item *dev_item;
1833	struct extent_buffer *leaf;
1834	struct btrfs_key key;
1835
1836	root = device->dev_root->fs_info->chunk_root;
1837
1838	path = btrfs_alloc_path();
1839	if (!path)
1840		return -ENOMEM;
1841
1842	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1843	key.type = BTRFS_DEV_ITEM_KEY;
1844	key.offset = device->devid;
1845
1846	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1847	if (ret < 0)
1848		goto out;
1849
1850	if (ret > 0) {
1851		ret = -ENOENT;
1852		goto out;
1853	}
1854
1855	leaf = path->nodes[0];
1856	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1857
1858	btrfs_set_device_id(leaf, dev_item, device->devid);
1859	btrfs_set_device_type(leaf, dev_item, device->type);
1860	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1861	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1862	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1863	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1864	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
 
 
1865	btrfs_mark_buffer_dirty(leaf);
1866
1867out:
1868	btrfs_free_path(path);
1869	return ret;
1870}
1871
1872static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1873		      struct btrfs_device *device, u64 new_size)
1874{
1875	struct btrfs_super_block *super_copy =
1876		device->dev_root->fs_info->super_copy;
1877	u64 old_total = btrfs_super_total_bytes(super_copy);
1878	u64 diff = new_size - device->total_bytes;
 
1879
1880	if (!device->writeable)
1881		return -EACCES;
1882	if (new_size <= device->total_bytes)
 
 
 
 
 
 
 
1883		return -EINVAL;
 
 
 
1884
1885	btrfs_set_super_total_bytes(super_copy, old_total + diff);
1886	device->fs_devices->total_rw_bytes += diff;
1887
1888	device->total_bytes = new_size;
1889	device->disk_total_bytes = new_size;
1890	btrfs_clear_space_info_full(device->dev_root->fs_info);
 
 
 
 
1891
1892	return btrfs_update_device(trans, device);
1893}
1894
1895int btrfs_grow_device(struct btrfs_trans_handle *trans,
1896		      struct btrfs_device *device, u64 new_size)
1897{
1898	int ret;
1899	lock_chunks(device->dev_root);
1900	ret = __btrfs_grow_device(trans, device, new_size);
1901	unlock_chunks(device->dev_root);
1902	return ret;
1903}
1904
1905static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1906			    struct btrfs_root *root,
1907			    u64 chunk_tree, u64 chunk_objectid,
1908			    u64 chunk_offset)
1909{
1910	int ret;
1911	struct btrfs_path *path;
1912	struct btrfs_key key;
1913
1914	root = root->fs_info->chunk_root;
1915	path = btrfs_alloc_path();
1916	if (!path)
1917		return -ENOMEM;
1918
1919	key.objectid = chunk_objectid;
1920	key.offset = chunk_offset;
1921	key.type = BTRFS_CHUNK_ITEM_KEY;
1922
1923	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1924	if (ret < 0)
1925		goto out;
1926	else if (ret > 0) { /* Logic error or corruption */
1927		btrfs_error(root->fs_info, -ENOENT,
1928			    "Failed lookup while freeing chunk.");
1929		ret = -ENOENT;
1930		goto out;
1931	}
1932
1933	ret = btrfs_del_item(trans, root, path);
1934	if (ret < 0)
1935		btrfs_error(root->fs_info, ret,
1936			    "Failed to delete chunk item.");
1937out:
1938	btrfs_free_path(path);
1939	return ret;
1940}
1941
1942static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1943			chunk_offset)
1944{
1945	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1946	struct btrfs_disk_key *disk_key;
1947	struct btrfs_chunk *chunk;
1948	u8 *ptr;
1949	int ret = 0;
1950	u32 num_stripes;
1951	u32 array_size;
1952	u32 len = 0;
1953	u32 cur;
1954	struct btrfs_key key;
1955
 
1956	array_size = btrfs_super_sys_array_size(super_copy);
1957
1958	ptr = super_copy->sys_chunk_array;
1959	cur = 0;
1960
1961	while (cur < array_size) {
1962		disk_key = (struct btrfs_disk_key *)ptr;
1963		btrfs_disk_key_to_cpu(&key, disk_key);
1964
1965		len = sizeof(*disk_key);
1966
1967		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1968			chunk = (struct btrfs_chunk *)(ptr + len);
1969			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1970			len += btrfs_chunk_item_size(num_stripes);
1971		} else {
1972			ret = -EIO;
1973			break;
1974		}
1975		if (key.objectid == chunk_objectid &&
1976		    key.offset == chunk_offset) {
1977			memmove(ptr, ptr + len, array_size - (cur + len));
1978			array_size -= len;
1979			btrfs_set_super_sys_array_size(super_copy, array_size);
1980		} else {
1981			ptr += len;
1982			cur += len;
1983		}
1984	}
 
1985	return ret;
1986}
1987
1988static int btrfs_relocate_chunk(struct btrfs_root *root,
1989			 u64 chunk_tree, u64 chunk_objectid,
1990			 u64 chunk_offset)
1991{
1992	struct extent_map_tree *em_tree;
1993	struct btrfs_root *extent_root;
1994	struct btrfs_trans_handle *trans;
1995	struct extent_map *em;
 
1996	struct map_lookup *map;
1997	int ret;
1998	int i;
 
1999
 
2000	root = root->fs_info->chunk_root;
2001	extent_root = root->fs_info->extent_root;
2002	em_tree = &root->fs_info->mapping_tree.map_tree;
2003
2004	ret = btrfs_can_relocate(extent_root, chunk_offset);
2005	if (ret)
2006		return -ENOSPC;
2007
2008	/* step one, relocate all the extents inside this chunk */
2009	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2010	if (ret)
2011		return ret;
2012
2013	trans = btrfs_start_transaction(root, 0);
2014	BUG_ON(IS_ERR(trans));
2015
2016	lock_chunks(root);
2017
2018	/*
2019	 * step two, delete the device extents and the
2020	 * chunk tree entries
2021	 */
2022	read_lock(&em_tree->lock);
2023	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2024	read_unlock(&em_tree->lock);
2025
2026	BUG_ON(!em || em->start > chunk_offset ||
2027	       em->start + em->len < chunk_offset);
2028	map = (struct map_lookup *)em->bdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
2029
2030	for (i = 0; i < map->num_stripes; i++) {
2031		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2032					    map->stripes[i].physical);
2033		BUG_ON(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034
2035		if (map->stripes[i].dev) {
2036			ret = btrfs_update_device(trans, map->stripes[i].dev);
2037			BUG_ON(ret);
 
 
 
2038		}
2039	}
2040	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2041			       chunk_offset);
2042
2043	BUG_ON(ret);
 
2044
2045	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2046
2047	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2048		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2049		BUG_ON(ret);
 
 
 
 
 
 
 
 
 
2050	}
2051
2052	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2053	BUG_ON(ret);
 
 
 
2054
2055	write_lock(&em_tree->lock);
2056	remove_extent_mapping(em_tree, em);
2057	write_unlock(&em_tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2058
2059	kfree(map);
2060	em->bdev = NULL;
 
 
 
 
2061
2062	/* once for the tree */
2063	free_extent_map(em);
2064	/* once for us */
2065	free_extent_map(em);
 
 
 
2066
2067	unlock_chunks(root);
 
 
 
 
2068	btrfs_end_transaction(trans, root);
2069	return 0;
2070}
2071
2072static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2073{
2074	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2075	struct btrfs_path *path;
2076	struct extent_buffer *leaf;
2077	struct btrfs_chunk *chunk;
2078	struct btrfs_key key;
2079	struct btrfs_key found_key;
2080	u64 chunk_tree = chunk_root->root_key.objectid;
2081	u64 chunk_type;
2082	bool retried = false;
2083	int failed = 0;
2084	int ret;
2085
2086	path = btrfs_alloc_path();
2087	if (!path)
2088		return -ENOMEM;
2089
2090again:
2091	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2092	key.offset = (u64)-1;
2093	key.type = BTRFS_CHUNK_ITEM_KEY;
2094
2095	while (1) {
 
2096		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2097		if (ret < 0)
 
2098			goto error;
 
2099		BUG_ON(ret == 0); /* Corruption */
2100
2101		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2102					  key.type);
 
 
2103		if (ret < 0)
2104			goto error;
2105		if (ret > 0)
2106			break;
2107
2108		leaf = path->nodes[0];
2109		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2110
2111		chunk = btrfs_item_ptr(leaf, path->slots[0],
2112				       struct btrfs_chunk);
2113		chunk_type = btrfs_chunk_type(leaf, chunk);
2114		btrfs_release_path(path);
2115
2116		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2117			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2118						   found_key.objectid,
2119						   found_key.offset);
2120			if (ret == -ENOSPC)
2121				failed++;
2122			else if (ret)
2123				BUG();
2124		}
 
2125
2126		if (found_key.offset == 0)
2127			break;
2128		key.offset = found_key.offset - 1;
2129	}
2130	ret = 0;
2131	if (failed && !retried) {
2132		failed = 0;
2133		retried = true;
2134		goto again;
2135	} else if (failed && retried) {
2136		WARN_ON(1);
2137		ret = -ENOSPC;
2138	}
2139error:
2140	btrfs_free_path(path);
2141	return ret;
2142}
2143
2144static int insert_balance_item(struct btrfs_root *root,
2145			       struct btrfs_balance_control *bctl)
2146{
2147	struct btrfs_trans_handle *trans;
2148	struct btrfs_balance_item *item;
2149	struct btrfs_disk_balance_args disk_bargs;
2150	struct btrfs_path *path;
2151	struct extent_buffer *leaf;
2152	struct btrfs_key key;
2153	int ret, err;
2154
2155	path = btrfs_alloc_path();
2156	if (!path)
2157		return -ENOMEM;
2158
2159	trans = btrfs_start_transaction(root, 0);
2160	if (IS_ERR(trans)) {
2161		btrfs_free_path(path);
2162		return PTR_ERR(trans);
2163	}
2164
2165	key.objectid = BTRFS_BALANCE_OBJECTID;
2166	key.type = BTRFS_BALANCE_ITEM_KEY;
2167	key.offset = 0;
2168
2169	ret = btrfs_insert_empty_item(trans, root, path, &key,
2170				      sizeof(*item));
2171	if (ret)
2172		goto out;
2173
2174	leaf = path->nodes[0];
2175	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2176
2177	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2178
2179	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2180	btrfs_set_balance_data(leaf, item, &disk_bargs);
2181	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2182	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2183	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2184	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2185
2186	btrfs_set_balance_flags(leaf, item, bctl->flags);
2187
2188	btrfs_mark_buffer_dirty(leaf);
2189out:
2190	btrfs_free_path(path);
2191	err = btrfs_commit_transaction(trans, root);
2192	if (err && !ret)
2193		ret = err;
2194	return ret;
2195}
2196
2197static int del_balance_item(struct btrfs_root *root)
2198{
2199	struct btrfs_trans_handle *trans;
2200	struct btrfs_path *path;
2201	struct btrfs_key key;
2202	int ret, err;
2203
2204	path = btrfs_alloc_path();
2205	if (!path)
2206		return -ENOMEM;
2207
2208	trans = btrfs_start_transaction(root, 0);
2209	if (IS_ERR(trans)) {
2210		btrfs_free_path(path);
2211		return PTR_ERR(trans);
2212	}
2213
2214	key.objectid = BTRFS_BALANCE_OBJECTID;
2215	key.type = BTRFS_BALANCE_ITEM_KEY;
2216	key.offset = 0;
2217
2218	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2219	if (ret < 0)
2220		goto out;
2221	if (ret > 0) {
2222		ret = -ENOENT;
2223		goto out;
2224	}
2225
2226	ret = btrfs_del_item(trans, root, path);
2227out:
2228	btrfs_free_path(path);
2229	err = btrfs_commit_transaction(trans, root);
2230	if (err && !ret)
2231		ret = err;
2232	return ret;
2233}
2234
2235/*
2236 * This is a heuristic used to reduce the number of chunks balanced on
2237 * resume after balance was interrupted.
2238 */
2239static void update_balance_args(struct btrfs_balance_control *bctl)
2240{
2241	/*
2242	 * Turn on soft mode for chunk types that were being converted.
2243	 */
2244	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2245		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2246	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2247		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2248	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2249		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2250
2251	/*
2252	 * Turn on usage filter if is not already used.  The idea is
2253	 * that chunks that we have already balanced should be
2254	 * reasonably full.  Don't do it for chunks that are being
2255	 * converted - that will keep us from relocating unconverted
2256	 * (albeit full) chunks.
2257	 */
2258	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2259	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2260		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2261		bctl->data.usage = 90;
2262	}
2263	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2264	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2265		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2266		bctl->sys.usage = 90;
2267	}
2268	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
 
2269	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2270		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2271		bctl->meta.usage = 90;
2272	}
2273}
2274
2275/*
2276 * Should be called with both balance and volume mutexes held to
2277 * serialize other volume operations (add_dev/rm_dev/resize) with
2278 * restriper.  Same goes for unset_balance_control.
2279 */
2280static void set_balance_control(struct btrfs_balance_control *bctl)
2281{
2282	struct btrfs_fs_info *fs_info = bctl->fs_info;
2283
2284	BUG_ON(fs_info->balance_ctl);
2285
2286	spin_lock(&fs_info->balance_lock);
2287	fs_info->balance_ctl = bctl;
2288	spin_unlock(&fs_info->balance_lock);
2289}
2290
2291static void unset_balance_control(struct btrfs_fs_info *fs_info)
2292{
2293	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2294
2295	BUG_ON(!fs_info->balance_ctl);
2296
2297	spin_lock(&fs_info->balance_lock);
2298	fs_info->balance_ctl = NULL;
2299	spin_unlock(&fs_info->balance_lock);
2300
2301	kfree(bctl);
2302}
2303
2304/*
2305 * Balance filters.  Return 1 if chunk should be filtered out
2306 * (should not be balanced).
2307 */
2308static int chunk_profiles_filter(u64 chunk_type,
2309				 struct btrfs_balance_args *bargs)
2310{
2311	chunk_type = chunk_to_extended(chunk_type) &
2312				BTRFS_EXTENDED_PROFILE_MASK;
2313
2314	if (bargs->profiles & chunk_type)
2315		return 0;
2316
2317	return 1;
2318}
2319
2320static u64 div_factor_fine(u64 num, int factor)
 
2321{
2322	if (factor <= 0)
2323		return 0;
2324	if (factor >= 100)
2325		return num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2326
2327	num *= factor;
2328	do_div(num, 100);
2329	return num;
2330}
2331
2332static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2333			      struct btrfs_balance_args *bargs)
2334{
2335	struct btrfs_block_group_cache *cache;
2336	u64 chunk_used, user_thresh;
2337	int ret = 1;
2338
2339	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2340	chunk_used = btrfs_block_group_used(&cache->item);
2341
2342	user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
 
 
 
 
 
 
 
2343	if (chunk_used < user_thresh)
2344		ret = 0;
2345
2346	btrfs_put_block_group(cache);
2347	return ret;
2348}
2349
2350static int chunk_devid_filter(struct extent_buffer *leaf,
2351			      struct btrfs_chunk *chunk,
2352			      struct btrfs_balance_args *bargs)
2353{
2354	struct btrfs_stripe *stripe;
2355	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2356	int i;
2357
2358	for (i = 0; i < num_stripes; i++) {
2359		stripe = btrfs_stripe_nr(chunk, i);
2360		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2361			return 0;
2362	}
2363
2364	return 1;
2365}
2366
2367/* [pstart, pend) */
2368static int chunk_drange_filter(struct extent_buffer *leaf,
2369			       struct btrfs_chunk *chunk,
2370			       u64 chunk_offset,
2371			       struct btrfs_balance_args *bargs)
2372{
2373	struct btrfs_stripe *stripe;
2374	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2375	u64 stripe_offset;
2376	u64 stripe_length;
2377	int factor;
2378	int i;
2379
2380	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2381		return 0;
2382
2383	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2384	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2385		factor = 2;
2386	else
2387		factor = 1;
2388	factor = num_stripes / factor;
 
 
 
 
2389
2390	for (i = 0; i < num_stripes; i++) {
2391		stripe = btrfs_stripe_nr(chunk, i);
2392		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2393			continue;
2394
2395		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2396		stripe_length = btrfs_chunk_length(leaf, chunk);
2397		do_div(stripe_length, factor);
2398
2399		if (stripe_offset < bargs->pend &&
2400		    stripe_offset + stripe_length > bargs->pstart)
2401			return 0;
2402	}
2403
2404	return 1;
2405}
2406
2407/* [vstart, vend) */
2408static int chunk_vrange_filter(struct extent_buffer *leaf,
2409			       struct btrfs_chunk *chunk,
2410			       u64 chunk_offset,
2411			       struct btrfs_balance_args *bargs)
2412{
2413	if (chunk_offset < bargs->vend &&
2414	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2415		/* at least part of the chunk is inside this vrange */
2416		return 0;
2417
2418	return 1;
2419}
2420
 
 
 
 
 
 
 
 
 
 
 
 
 
2421static int chunk_soft_convert_filter(u64 chunk_type,
2422				     struct btrfs_balance_args *bargs)
2423{
2424	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2425		return 0;
2426
2427	chunk_type = chunk_to_extended(chunk_type) &
2428				BTRFS_EXTENDED_PROFILE_MASK;
2429
2430	if (bargs->target == chunk_type)
2431		return 1;
2432
2433	return 0;
2434}
2435
2436static int should_balance_chunk(struct btrfs_root *root,
2437				struct extent_buffer *leaf,
2438				struct btrfs_chunk *chunk, u64 chunk_offset)
2439{
2440	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2441	struct btrfs_balance_args *bargs = NULL;
2442	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2443
2444	/* type filter */
2445	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2446	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2447		return 0;
2448	}
2449
2450	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2451		bargs = &bctl->data;
2452	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2453		bargs = &bctl->sys;
2454	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2455		bargs = &bctl->meta;
2456
2457	/* profiles filter */
2458	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2459	    chunk_profiles_filter(chunk_type, bargs)) {
2460		return 0;
2461	}
2462
2463	/* usage filter */
2464	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2465	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2466		return 0;
 
 
 
2467	}
2468
2469	/* devid filter */
2470	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2471	    chunk_devid_filter(leaf, chunk, bargs)) {
2472		return 0;
2473	}
2474
2475	/* drange filter, makes sense only with devid filter */
2476	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2477	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2478		return 0;
2479	}
2480
2481	/* vrange filter */
2482	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2483	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2484		return 0;
2485	}
2486
 
 
 
 
 
 
2487	/* soft profile changing mode */
2488	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2489	    chunk_soft_convert_filter(chunk_type, bargs)) {
2490		return 0;
2491	}
2492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2493	return 1;
2494}
2495
2496static u64 div_factor(u64 num, int factor)
2497{
2498	if (factor == 10)
2499		return num;
2500	num *= factor;
2501	do_div(num, 10);
2502	return num;
2503}
2504
2505static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2506{
2507	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508	struct btrfs_root *chunk_root = fs_info->chunk_root;
2509	struct btrfs_root *dev_root = fs_info->dev_root;
2510	struct list_head *devices;
2511	struct btrfs_device *device;
2512	u64 old_size;
2513	u64 size_to_free;
 
2514	struct btrfs_chunk *chunk;
2515	struct btrfs_path *path;
2516	struct btrfs_key key;
2517	struct btrfs_key found_key;
2518	struct btrfs_trans_handle *trans;
2519	struct extent_buffer *leaf;
2520	int slot;
2521	int ret;
2522	int enospc_errors = 0;
2523	bool counting = true;
 
 
 
 
 
 
 
 
2524
2525	/* step one make some room on all the devices */
2526	devices = &fs_info->fs_devices->devices;
2527	list_for_each_entry(device, devices, dev_list) {
2528		old_size = device->total_bytes;
2529		size_to_free = div_factor(old_size, 1);
2530		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531		if (!device->writeable ||
2532		    device->total_bytes - device->bytes_used > size_to_free)
 
 
2533			continue;
2534
2535		ret = btrfs_shrink_device(device, old_size - size_to_free);
2536		if (ret == -ENOSPC)
2537			break;
2538		BUG_ON(ret);
2539
2540		trans = btrfs_start_transaction(dev_root, 0);
2541		BUG_ON(IS_ERR(trans));
2542
2543		ret = btrfs_grow_device(trans, device, old_size);
2544		BUG_ON(ret);
2545
2546		btrfs_end_transaction(trans, dev_root);
2547	}
2548
2549	/* step two, relocate all the chunks */
2550	path = btrfs_alloc_path();
2551	if (!path) {
2552		ret = -ENOMEM;
2553		goto error;
2554	}
2555
2556	/* zero out stat counters */
2557	spin_lock(&fs_info->balance_lock);
2558	memset(&bctl->stat, 0, sizeof(bctl->stat));
2559	spin_unlock(&fs_info->balance_lock);
2560again:
 
 
 
 
 
 
 
 
 
2561	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562	key.offset = (u64)-1;
2563	key.type = BTRFS_CHUNK_ITEM_KEY;
2564
2565	while (1) {
2566		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567		    atomic_read(&fs_info->balance_cancel_req)) {
2568			ret = -ECANCELED;
2569			goto error;
2570		}
2571
 
2572		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2573		if (ret < 0)
 
2574			goto error;
 
2575
2576		/*
2577		 * this shouldn't happen, it means the last relocate
2578		 * failed
2579		 */
2580		if (ret == 0)
2581			BUG(); /* FIXME break ? */
2582
2583		ret = btrfs_previous_item(chunk_root, path, 0,
2584					  BTRFS_CHUNK_ITEM_KEY);
2585		if (ret) {
 
2586			ret = 0;
2587			break;
2588		}
2589
2590		leaf = path->nodes[0];
2591		slot = path->slots[0];
2592		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2593
2594		if (found_key.objectid != key.objectid)
2595			break;
2596
2597		/* chunk zero is special */
2598		if (found_key.offset == 0)
2599			break;
 
2600
2601		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
 
2602
2603		if (!counting) {
2604			spin_lock(&fs_info->balance_lock);
2605			bctl->stat.considered++;
2606			spin_unlock(&fs_info->balance_lock);
2607		}
2608
2609		ret = should_balance_chunk(chunk_root, leaf, chunk,
2610					   found_key.offset);
 
2611		btrfs_release_path(path);
2612		if (!ret)
 
2613			goto loop;
 
2614
2615		if (counting) {
 
2616			spin_lock(&fs_info->balance_lock);
2617			bctl->stat.expected++;
2618			spin_unlock(&fs_info->balance_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2619			goto loop;
2620		}
2621
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2622		ret = btrfs_relocate_chunk(chunk_root,
2623					   chunk_root->root_key.objectid,
2624					   found_key.objectid,
2625					   found_key.offset);
 
2626		if (ret && ret != -ENOSPC)
2627			goto error;
2628		if (ret == -ENOSPC) {
2629			enospc_errors++;
2630		} else {
2631			spin_lock(&fs_info->balance_lock);
2632			bctl->stat.completed++;
2633			spin_unlock(&fs_info->balance_lock);
2634		}
2635loop:
 
 
2636		key.offset = found_key.offset - 1;
2637	}
2638
2639	if (counting) {
2640		btrfs_release_path(path);
2641		counting = false;
2642		goto again;
2643	}
2644error:
2645	btrfs_free_path(path);
2646	if (enospc_errors) {
2647		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2648		       enospc_errors);
2649		if (!ret)
2650			ret = -ENOSPC;
2651	}
2652
2653	return ret;
2654}
2655
2656/**
2657 * alloc_profile_is_valid - see if a given profile is valid and reduced
2658 * @flags: profile to validate
2659 * @extended: if true @flags is treated as an extended profile
2660 */
2661static int alloc_profile_is_valid(u64 flags, int extended)
2662{
2663	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
2665
2666	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2667
2668	/* 1) check that all other bits are zeroed */
2669	if (flags & ~mask)
2670		return 0;
2671
2672	/* 2) see if profile is reduced */
2673	if (flags == 0)
2674		return !extended; /* "0" is valid for usual profiles */
2675
2676	/* true if exactly one bit set */
2677	return (flags & (flags - 1)) == 0;
2678}
2679
2680static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2681{
2682	/* cancel requested || normal exit path */
2683	return atomic_read(&fs_info->balance_cancel_req) ||
2684		(atomic_read(&fs_info->balance_pause_req) == 0 &&
2685		 atomic_read(&fs_info->balance_cancel_req) == 0);
2686}
2687
2688static void __cancel_balance(struct btrfs_fs_info *fs_info)
2689{
2690	int ret;
2691
2692	unset_balance_control(fs_info);
2693	ret = del_balance_item(fs_info->tree_root);
2694	BUG_ON(ret);
 
 
 
2695}
2696
2697void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698			       struct btrfs_ioctl_balance_args *bargs);
 
 
 
 
 
 
2699
2700/*
2701 * Should be called with both balance and volume mutexes held
2702 */
2703int btrfs_balance(struct btrfs_balance_control *bctl,
2704		  struct btrfs_ioctl_balance_args *bargs)
2705{
2706	struct btrfs_fs_info *fs_info = bctl->fs_info;
2707	u64 allowed;
2708	int mixed = 0;
2709	int ret;
 
 
2710
2711	if (btrfs_fs_closing(fs_info) ||
2712	    atomic_read(&fs_info->balance_pause_req) ||
2713	    atomic_read(&fs_info->balance_cancel_req)) {
2714		ret = -EINVAL;
2715		goto out;
2716	}
2717
2718	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2720		mixed = 1;
2721
2722	/*
2723	 * In case of mixed groups both data and meta should be picked,
2724	 * and identical options should be given for both of them.
2725	 */
2726	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727	if (mixed && (bctl->flags & allowed)) {
2728		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731			printk(KERN_ERR "btrfs: with mixed groups data and "
2732			       "metadata balance options must be the same\n");
2733			ret = -EINVAL;
2734			goto out;
2735		}
2736	}
2737
 
 
 
 
 
 
 
2738	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739	if (fs_info->fs_devices->num_devices == 1)
2740		allowed |= BTRFS_BLOCK_GROUP_DUP;
2741	else if (fs_info->fs_devices->num_devices < 4)
2742		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2743	else
2744		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745				BTRFS_BLOCK_GROUP_RAID10);
2746
2747	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749	     (bctl->data.target & ~allowed))) {
2750		printk(KERN_ERR "btrfs: unable to start balance with target "
2751		       "data profile %llu\n",
2752		       (unsigned long long)bctl->data.target);
2753		ret = -EINVAL;
2754		goto out;
2755	}
2756	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758	     (bctl->meta.target & ~allowed))) {
2759		printk(KERN_ERR "btrfs: unable to start balance with target "
2760		       "metadata profile %llu\n",
2761		       (unsigned long long)bctl->meta.target);
2762		ret = -EINVAL;
2763		goto out;
2764	}
2765	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767	     (bctl->sys.target & ~allowed))) {
2768		printk(KERN_ERR "btrfs: unable to start balance with target "
2769		       "system profile %llu\n",
2770		       (unsigned long long)bctl->sys.target);
2771		ret = -EINVAL;
2772		goto out;
2773	}
2774
2775	/* allow dup'ed data chunks only in mixed mode */
2776	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2779		ret = -EINVAL;
2780		goto out;
2781	}
2782
2783	/* allow to reduce meta or sys integrity only if force set */
2784	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785			BTRFS_BLOCK_GROUP_RAID10;
2786	if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787	     (fs_info->avail_system_alloc_bits & allowed) &&
2788	     !(bctl->sys.target & allowed)) ||
2789	    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790	     (fs_info->avail_metadata_alloc_bits & allowed) &&
2791	     !(bctl->meta.target & allowed))) {
2792		if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793			printk(KERN_INFO "btrfs: force reducing metadata "
2794			       "integrity\n");
2795		} else {
2796			printk(KERN_ERR "btrfs: balance will reduce metadata "
2797			       "integrity, use force if you want this\n");
2798			ret = -EINVAL;
2799			goto out;
 
 
 
 
 
2800		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801	}
2802
2803	ret = insert_balance_item(fs_info->tree_root, bctl);
2804	if (ret && ret != -EEXIST)
2805		goto out;
2806
2807	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2808		BUG_ON(ret == -EEXIST);
2809		set_balance_control(bctl);
2810	} else {
2811		BUG_ON(ret != -EEXIST);
2812		spin_lock(&fs_info->balance_lock);
2813		update_balance_args(bctl);
2814		spin_unlock(&fs_info->balance_lock);
2815	}
2816
2817	atomic_inc(&fs_info->balance_running);
2818	mutex_unlock(&fs_info->balance_mutex);
2819
2820	ret = __btrfs_balance(fs_info);
2821
2822	mutex_lock(&fs_info->balance_mutex);
2823	atomic_dec(&fs_info->balance_running);
2824
 
 
 
 
 
2825	if (bargs) {
2826		memset(bargs, 0, sizeof(*bargs));
2827		update_ioctl_balance_args(fs_info, 0, bargs);
2828	}
2829
2830	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2831	    balance_need_close(fs_info)) {
2832		__cancel_balance(fs_info);
2833	}
2834
2835	wake_up(&fs_info->balance_wait_q);
2836
2837	return ret;
2838out:
2839	if (bctl->flags & BTRFS_BALANCE_RESUME)
2840		__cancel_balance(fs_info);
2841	else
2842		kfree(bctl);
 
 
2843	return ret;
2844}
2845
2846static int balance_kthread(void *data)
2847{
2848	struct btrfs_fs_info *fs_info = data;
2849	int ret = 0;
2850
2851	mutex_lock(&fs_info->volume_mutex);
2852	mutex_lock(&fs_info->balance_mutex);
2853
2854	if (fs_info->balance_ctl) {
2855		printk(KERN_INFO "btrfs: continuing balance\n");
2856		ret = btrfs_balance(fs_info->balance_ctl, NULL);
2857	}
2858
2859	mutex_unlock(&fs_info->balance_mutex);
2860	mutex_unlock(&fs_info->volume_mutex);
2861
2862	return ret;
2863}
2864
2865int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2866{
2867	struct task_struct *tsk;
2868
2869	spin_lock(&fs_info->balance_lock);
2870	if (!fs_info->balance_ctl) {
2871		spin_unlock(&fs_info->balance_lock);
2872		return 0;
2873	}
2874	spin_unlock(&fs_info->balance_lock);
2875
2876	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2877		printk(KERN_INFO "btrfs: force skipping balance\n");
2878		return 0;
2879	}
2880
2881	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2882	if (IS_ERR(tsk))
2883		return PTR_ERR(tsk);
2884
2885	return 0;
2886}
2887
2888int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2889{
2890	struct btrfs_balance_control *bctl;
2891	struct btrfs_balance_item *item;
2892	struct btrfs_disk_balance_args disk_bargs;
2893	struct btrfs_path *path;
2894	struct extent_buffer *leaf;
2895	struct btrfs_key key;
2896	int ret;
2897
2898	path = btrfs_alloc_path();
2899	if (!path)
2900		return -ENOMEM;
2901
2902	key.objectid = BTRFS_BALANCE_OBJECTID;
2903	key.type = BTRFS_BALANCE_ITEM_KEY;
2904	key.offset = 0;
2905
2906	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2907	if (ret < 0)
2908		goto out;
2909	if (ret > 0) { /* ret = -ENOENT; */
2910		ret = 0;
2911		goto out;
2912	}
2913
2914	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2915	if (!bctl) {
2916		ret = -ENOMEM;
2917		goto out;
2918	}
2919
2920	leaf = path->nodes[0];
2921	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2922
2923	bctl->fs_info = fs_info;
2924	bctl->flags = btrfs_balance_flags(leaf, item);
2925	bctl->flags |= BTRFS_BALANCE_RESUME;
2926
2927	btrfs_balance_data(leaf, item, &disk_bargs);
2928	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2929	btrfs_balance_meta(leaf, item, &disk_bargs);
2930	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2931	btrfs_balance_sys(leaf, item, &disk_bargs);
2932	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2933
 
 
2934	mutex_lock(&fs_info->volume_mutex);
2935	mutex_lock(&fs_info->balance_mutex);
2936
2937	set_balance_control(bctl);
2938
2939	mutex_unlock(&fs_info->balance_mutex);
2940	mutex_unlock(&fs_info->volume_mutex);
2941out:
2942	btrfs_free_path(path);
2943	return ret;
2944}
2945
2946int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2947{
2948	int ret = 0;
2949
2950	mutex_lock(&fs_info->balance_mutex);
2951	if (!fs_info->balance_ctl) {
2952		mutex_unlock(&fs_info->balance_mutex);
2953		return -ENOTCONN;
2954	}
2955
2956	if (atomic_read(&fs_info->balance_running)) {
2957		atomic_inc(&fs_info->balance_pause_req);
2958		mutex_unlock(&fs_info->balance_mutex);
2959
2960		wait_event(fs_info->balance_wait_q,
2961			   atomic_read(&fs_info->balance_running) == 0);
2962
2963		mutex_lock(&fs_info->balance_mutex);
2964		/* we are good with balance_ctl ripped off from under us */
2965		BUG_ON(atomic_read(&fs_info->balance_running));
2966		atomic_dec(&fs_info->balance_pause_req);
2967	} else {
2968		ret = -ENOTCONN;
2969	}
2970
2971	mutex_unlock(&fs_info->balance_mutex);
2972	return ret;
2973}
2974
2975int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2976{
 
 
 
2977	mutex_lock(&fs_info->balance_mutex);
2978	if (!fs_info->balance_ctl) {
2979		mutex_unlock(&fs_info->balance_mutex);
2980		return -ENOTCONN;
2981	}
2982
2983	atomic_inc(&fs_info->balance_cancel_req);
2984	/*
2985	 * if we are running just wait and return, balance item is
2986	 * deleted in btrfs_balance in this case
2987	 */
2988	if (atomic_read(&fs_info->balance_running)) {
2989		mutex_unlock(&fs_info->balance_mutex);
2990		wait_event(fs_info->balance_wait_q,
2991			   atomic_read(&fs_info->balance_running) == 0);
2992		mutex_lock(&fs_info->balance_mutex);
2993	} else {
2994		/* __cancel_balance needs volume_mutex */
2995		mutex_unlock(&fs_info->balance_mutex);
2996		mutex_lock(&fs_info->volume_mutex);
2997		mutex_lock(&fs_info->balance_mutex);
2998
2999		if (fs_info->balance_ctl)
3000			__cancel_balance(fs_info);
3001
3002		mutex_unlock(&fs_info->volume_mutex);
3003	}
3004
3005	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3006	atomic_dec(&fs_info->balance_cancel_req);
3007	mutex_unlock(&fs_info->balance_mutex);
3008	return 0;
3009}
3010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3011/*
3012 * shrinking a device means finding all of the device extents past
3013 * the new size, and then following the back refs to the chunks.
3014 * The chunk relocation code actually frees the device extent
3015 */
3016int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3017{
3018	struct btrfs_trans_handle *trans;
3019	struct btrfs_root *root = device->dev_root;
3020	struct btrfs_dev_extent *dev_extent = NULL;
3021	struct btrfs_path *path;
3022	u64 length;
3023	u64 chunk_tree;
3024	u64 chunk_objectid;
3025	u64 chunk_offset;
3026	int ret;
3027	int slot;
3028	int failed = 0;
3029	bool retried = false;
 
3030	struct extent_buffer *l;
3031	struct btrfs_key key;
3032	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3033	u64 old_total = btrfs_super_total_bytes(super_copy);
3034	u64 old_size = device->total_bytes;
3035	u64 diff = device->total_bytes - new_size;
3036
3037	if (new_size >= device->total_bytes)
3038		return -EINVAL;
3039
3040	path = btrfs_alloc_path();
3041	if (!path)
3042		return -ENOMEM;
3043
3044	path->reada = 2;
3045
3046	lock_chunks(root);
3047
3048	device->total_bytes = new_size;
3049	if (device->writeable) {
3050		device->fs_devices->total_rw_bytes -= diff;
3051		spin_lock(&root->fs_info->free_chunk_lock);
3052		root->fs_info->free_chunk_space -= diff;
3053		spin_unlock(&root->fs_info->free_chunk_lock);
3054	}
3055	unlock_chunks(root);
3056
3057again:
3058	key.objectid = device->devid;
3059	key.offset = (u64)-1;
3060	key.type = BTRFS_DEV_EXTENT_KEY;
3061
3062	do {
 
3063		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3064		if (ret < 0)
 
3065			goto done;
 
3066
3067		ret = btrfs_previous_item(root, path, 0, key.type);
 
 
3068		if (ret < 0)
3069			goto done;
3070		if (ret) {
3071			ret = 0;
3072			btrfs_release_path(path);
3073			break;
3074		}
3075
3076		l = path->nodes[0];
3077		slot = path->slots[0];
3078		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3079
3080		if (key.objectid != device->devid) {
 
3081			btrfs_release_path(path);
3082			break;
3083		}
3084
3085		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3086		length = btrfs_dev_extent_length(l, dev_extent);
3087
3088		if (key.offset + length <= new_size) {
 
3089			btrfs_release_path(path);
3090			break;
3091		}
3092
3093		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3094		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3095		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3096		btrfs_release_path(path);
3097
3098		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3099					   chunk_offset);
3100		if (ret && ret != -ENOSPC)
3101			goto done;
3102		if (ret == -ENOSPC)
3103			failed++;
3104	} while (key.offset-- > 0);
3105
3106	if (failed && !retried) {
3107		failed = 0;
3108		retried = true;
3109		goto again;
3110	} else if (failed && retried) {
3111		ret = -ENOSPC;
3112		lock_chunks(root);
3113
3114		device->total_bytes = old_size;
3115		if (device->writeable)
3116			device->fs_devices->total_rw_bytes += diff;
3117		spin_lock(&root->fs_info->free_chunk_lock);
3118		root->fs_info->free_chunk_space += diff;
3119		spin_unlock(&root->fs_info->free_chunk_lock);
3120		unlock_chunks(root);
3121		goto done;
3122	}
3123
3124	/* Shrinking succeeded, else we would be at "done". */
3125	trans = btrfs_start_transaction(root, 0);
3126	if (IS_ERR(trans)) {
3127		ret = PTR_ERR(trans);
3128		goto done;
3129	}
3130
3131	lock_chunks(root);
3132
3133	device->disk_total_bytes = new_size;
3134	/* Now btrfs_update_device() will change the on-disk size. */
3135	ret = btrfs_update_device(trans, device);
3136	if (ret) {
3137		unlock_chunks(root);
3138		btrfs_end_transaction(trans, root);
3139		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3140	}
 
 
 
 
 
 
3141	WARN_ON(diff > old_total);
3142	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3143	unlock_chunks(root);
 
 
 
3144	btrfs_end_transaction(trans, root);
3145done:
3146	btrfs_free_path(path);
 
 
 
 
 
 
 
 
 
 
3147	return ret;
3148}
3149
3150static int btrfs_add_system_chunk(struct btrfs_root *root,
3151			   struct btrfs_key *key,
3152			   struct btrfs_chunk *chunk, int item_size)
3153{
3154	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3155	struct btrfs_disk_key disk_key;
3156	u32 array_size;
3157	u8 *ptr;
3158
 
3159	array_size = btrfs_super_sys_array_size(super_copy);
3160	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
 
 
3161		return -EFBIG;
 
3162
3163	ptr = super_copy->sys_chunk_array + array_size;
3164	btrfs_cpu_key_to_disk(&disk_key, key);
3165	memcpy(ptr, &disk_key, sizeof(disk_key));
3166	ptr += sizeof(disk_key);
3167	memcpy(ptr, chunk, item_size);
3168	item_size += sizeof(disk_key);
3169	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
 
 
3170	return 0;
3171}
3172
3173/*
3174 * sort the devices in descending order by max_avail, total_avail
3175 */
3176static int btrfs_cmp_device_info(const void *a, const void *b)
3177{
3178	const struct btrfs_device_info *di_a = a;
3179	const struct btrfs_device_info *di_b = b;
3180
3181	if (di_a->max_avail > di_b->max_avail)
3182		return -1;
3183	if (di_a->max_avail < di_b->max_avail)
3184		return 1;
3185	if (di_a->total_avail > di_b->total_avail)
3186		return -1;
3187	if (di_a->total_avail < di_b->total_avail)
3188		return 1;
3189	return 0;
3190}
3191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3192static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3193			       struct btrfs_root *extent_root,
3194			       struct map_lookup **map_ret,
3195			       u64 *num_bytes_out, u64 *stripe_size_out,
3196			       u64 start, u64 type)
3197{
3198	struct btrfs_fs_info *info = extent_root->fs_info;
3199	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3200	struct list_head *cur;
3201	struct map_lookup *map = NULL;
3202	struct extent_map_tree *em_tree;
3203	struct extent_map *em;
3204	struct btrfs_device_info *devices_info = NULL;
3205	u64 total_avail;
3206	int num_stripes;	/* total number of stripes to allocate */
 
 
3207	int sub_stripes;	/* sub_stripes info for map */
3208	int dev_stripes;	/* stripes per dev */
3209	int devs_max;		/* max devs to use */
3210	int devs_min;		/* min devs needed */
3211	int devs_increment;	/* ndevs has to be a multiple of this */
3212	int ncopies;		/* how many copies to data has */
3213	int ret;
3214	u64 max_stripe_size;
3215	u64 max_chunk_size;
3216	u64 stripe_size;
3217	u64 num_bytes;
 
3218	int ndevs;
3219	int i;
3220	int j;
 
3221
3222	BUG_ON(!alloc_profile_is_valid(type, 0));
3223
3224	if (list_empty(&fs_devices->alloc_list))
3225		return -ENOSPC;
3226
3227	sub_stripes = 1;
3228	dev_stripes = 1;
3229	devs_increment = 1;
3230	ncopies = 1;
3231	devs_max = 0;	/* 0 == as many as possible */
3232	devs_min = 1;
3233
3234	/*
3235	 * define the properties of each RAID type.
3236	 * FIXME: move this to a global table and use it in all RAID
3237	 * calculation code
3238	 */
3239	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3240		dev_stripes = 2;
3241		ncopies = 2;
3242		devs_max = 1;
3243	} else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3244		devs_min = 2;
3245	} else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3246		devs_increment = 2;
3247		ncopies = 2;
3248		devs_max = 2;
3249		devs_min = 2;
3250	} else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3251		sub_stripes = 2;
3252		devs_increment = 2;
3253		ncopies = 2;
3254		devs_min = 4;
3255	} else {
3256		devs_max = 1;
3257	}
3258
3259	if (type & BTRFS_BLOCK_GROUP_DATA) {
3260		max_stripe_size = 1024 * 1024 * 1024;
3261		max_chunk_size = 10 * max_stripe_size;
 
 
3262	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3263		/* for larger filesystems, use larger metadata chunks */
3264		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3265			max_stripe_size = 1024 * 1024 * 1024;
3266		else
3267			max_stripe_size = 256 * 1024 * 1024;
3268		max_chunk_size = max_stripe_size;
 
 
3269	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3270		max_stripe_size = 32 * 1024 * 1024;
3271		max_chunk_size = 2 * max_stripe_size;
 
 
3272	} else {
3273		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3274		       type);
3275		BUG_ON(1);
3276	}
3277
3278	/* we don't want a chunk larger than 10% of writeable space */
3279	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3280			     max_chunk_size);
3281
3282	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3283			       GFP_NOFS);
3284	if (!devices_info)
3285		return -ENOMEM;
3286
3287	cur = fs_devices->alloc_list.next;
3288
3289	/*
3290	 * in the first pass through the devices list, we gather information
3291	 * about the available holes on each device.
3292	 */
3293	ndevs = 0;
3294	while (cur != &fs_devices->alloc_list) {
3295		struct btrfs_device *device;
3296		u64 max_avail;
3297		u64 dev_offset;
3298
3299		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3300
3301		cur = cur->next;
3302
3303		if (!device->writeable) {
3304			printk(KERN_ERR
3305			       "btrfs: read-only device in alloc_list\n");
3306			WARN_ON(1);
3307			continue;
3308		}
3309
3310		if (!device->in_fs_metadata)
 
3311			continue;
3312
3313		if (device->total_bytes > device->bytes_used)
3314			total_avail = device->total_bytes - device->bytes_used;
3315		else
3316			total_avail = 0;
3317
3318		/* If there is no space on this device, skip it. */
3319		if (total_avail == 0)
3320			continue;
3321
3322		ret = find_free_dev_extent(device,
3323					   max_stripe_size * dev_stripes,
3324					   &dev_offset, &max_avail);
3325		if (ret && ret != -ENOSPC)
3326			goto error;
3327
3328		if (ret == 0)
3329			max_avail = max_stripe_size * dev_stripes;
3330
3331		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3332			continue;
3333
 
 
 
 
 
3334		devices_info[ndevs].dev_offset = dev_offset;
3335		devices_info[ndevs].max_avail = max_avail;
3336		devices_info[ndevs].total_avail = total_avail;
3337		devices_info[ndevs].dev = device;
3338		++ndevs;
3339	}
3340
3341	/*
3342	 * now sort the devices by hole size / available space
3343	 */
3344	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3345	     btrfs_cmp_device_info, NULL);
3346
3347	/* round down to number of usable stripes */
3348	ndevs -= ndevs % devs_increment;
3349
3350	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3351		ret = -ENOSPC;
3352		goto error;
3353	}
3354
3355	if (devs_max && ndevs > devs_max)
3356		ndevs = devs_max;
3357	/*
3358	 * the primary goal is to maximize the number of stripes, so use as many
3359	 * devices as possible, even if the stripes are not maximum sized.
3360	 */
3361	stripe_size = devices_info[ndevs-1].max_avail;
3362	num_stripes = ndevs * dev_stripes;
3363
3364	if (stripe_size * ndevs > max_chunk_size * ncopies) {
3365		stripe_size = max_chunk_size * ncopies;
3366		do_div(stripe_size, ndevs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3367	}
3368
3369	do_div(stripe_size, dev_stripes);
3370
3371	/* align to BTRFS_STRIPE_LEN */
3372	do_div(stripe_size, BTRFS_STRIPE_LEN);
3373	stripe_size *= BTRFS_STRIPE_LEN;
3374
3375	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3376	if (!map) {
3377		ret = -ENOMEM;
3378		goto error;
3379	}
3380	map->num_stripes = num_stripes;
3381
3382	for (i = 0; i < ndevs; ++i) {
3383		for (j = 0; j < dev_stripes; ++j) {
3384			int s = i * dev_stripes + j;
3385			map->stripes[s].dev = devices_info[i].dev;
3386			map->stripes[s].physical = devices_info[i].dev_offset +
3387						   j * stripe_size;
3388		}
3389	}
3390	map->sector_size = extent_root->sectorsize;
3391	map->stripe_len = BTRFS_STRIPE_LEN;
3392	map->io_align = BTRFS_STRIPE_LEN;
3393	map->io_width = BTRFS_STRIPE_LEN;
3394	map->type = type;
3395	map->sub_stripes = sub_stripes;
3396
3397	*map_ret = map;
3398	num_bytes = stripe_size * (num_stripes / ncopies);
3399
3400	*stripe_size_out = stripe_size;
3401	*num_bytes_out = num_bytes;
3402
3403	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3404
3405	em = alloc_extent_map();
3406	if (!em) {
 
3407		ret = -ENOMEM;
3408		goto error;
3409	}
3410	em->bdev = (struct block_device *)map;
 
3411	em->start = start;
3412	em->len = num_bytes;
3413	em->block_start = 0;
3414	em->block_len = em->len;
 
3415
3416	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3417	write_lock(&em_tree->lock);
3418	ret = add_extent_mapping(em_tree, em);
 
 
 
 
3419	write_unlock(&em_tree->lock);
3420	free_extent_map(em);
3421	if (ret)
3422		goto error;
 
3423
3424	ret = btrfs_make_block_group(trans, extent_root, 0, type,
3425				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3426				     start, num_bytes);
3427	if (ret)
3428		goto error;
3429
3430	for (i = 0; i < map->num_stripes; ++i) {
3431		struct btrfs_device *device;
3432		u64 dev_offset;
 
3433
3434		device = map->stripes[i].dev;
3435		dev_offset = map->stripes[i].physical;
 
 
3436
3437		ret = btrfs_alloc_dev_extent(trans, device,
3438				info->chunk_root->root_key.objectid,
3439				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3440				start, dev_offset, stripe_size);
3441		if (ret) {
3442			btrfs_abort_transaction(trans, extent_root, ret);
3443			goto error;
3444		}
3445	}
3446
3447	kfree(devices_info);
3448	return 0;
3449
 
 
 
 
 
 
 
 
 
 
 
3450error:
3451	kfree(map);
3452	kfree(devices_info);
3453	return ret;
3454}
3455
3456static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3457				struct btrfs_root *extent_root,
3458				struct map_lookup *map, u64 chunk_offset,
3459				u64 chunk_size, u64 stripe_size)
3460{
3461	u64 dev_offset;
3462	struct btrfs_key key;
3463	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3464	struct btrfs_device *device;
3465	struct btrfs_chunk *chunk;
3466	struct btrfs_stripe *stripe;
3467	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3468	int index = 0;
3469	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470
3471	chunk = kzalloc(item_size, GFP_NOFS);
3472	if (!chunk)
3473		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3474
3475	index = 0;
3476	while (index < map->num_stripes) {
3477		device = map->stripes[index].dev;
3478		device->bytes_used += stripe_size;
3479		ret = btrfs_update_device(trans, device);
3480		if (ret)
3481			goto out_free;
3482		index++;
 
 
 
 
 
 
 
 
 
 
3483	}
3484
3485	spin_lock(&extent_root->fs_info->free_chunk_lock);
3486	extent_root->fs_info->free_chunk_space -= (stripe_size *
3487						   map->num_stripes);
3488	spin_unlock(&extent_root->fs_info->free_chunk_lock);
3489
3490	index = 0;
3491	stripe = &chunk->stripe;
3492	while (index < map->num_stripes) {
3493		device = map->stripes[index].dev;
3494		dev_offset = map->stripes[index].physical;
3495
3496		btrfs_set_stack_stripe_devid(stripe, device->devid);
3497		btrfs_set_stack_stripe_offset(stripe, dev_offset);
3498		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3499		stripe++;
3500		index++;
3501	}
 
3502
3503	btrfs_set_stack_chunk_length(chunk, chunk_size);
3504	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3505	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3506	btrfs_set_stack_chunk_type(chunk, map->type);
3507	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3508	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3509	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3510	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3511	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3512
3513	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3514	key.type = BTRFS_CHUNK_ITEM_KEY;
3515	key.offset = chunk_offset;
3516
3517	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3518
3519	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3520		/*
3521		 * TODO: Cleanup of inserted chunk root in case of
3522		 * failure.
3523		 */
3524		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3525					     item_size);
3526	}
3527
3528out_free:
3529	kfree(chunk);
 
3530	return ret;
3531}
3532
3533/*
3534 * Chunk allocation falls into two parts. The first part does works
3535 * that make the new allocated chunk useable, but not do any operation
3536 * that modifies the chunk tree. The second part does the works that
3537 * require modifying the chunk tree. This division is important for the
3538 * bootstrap process of adding storage to a seed btrfs.
3539 */
3540int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3541		      struct btrfs_root *extent_root, u64 type)
3542{
3543	u64 chunk_offset;
3544	u64 chunk_size;
3545	u64 stripe_size;
3546	struct map_lookup *map;
3547	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3548	int ret;
3549
3550	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3551			      &chunk_offset);
3552	if (ret)
3553		return ret;
3554
3555	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3556				  &stripe_size, chunk_offset, type);
3557	if (ret)
3558		return ret;
3559
3560	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3561				   chunk_size, stripe_size);
3562	if (ret)
3563		return ret;
3564	return 0;
3565}
3566
3567static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3568					 struct btrfs_root *root,
3569					 struct btrfs_device *device)
3570{
3571	u64 chunk_offset;
3572	u64 sys_chunk_offset;
3573	u64 chunk_size;
3574	u64 sys_chunk_size;
3575	u64 stripe_size;
3576	u64 sys_stripe_size;
3577	u64 alloc_profile;
3578	struct map_lookup *map;
3579	struct map_lookup *sys_map;
3580	struct btrfs_fs_info *fs_info = root->fs_info;
3581	struct btrfs_root *extent_root = fs_info->extent_root;
3582	int ret;
3583
3584	ret = find_next_chunk(fs_info->chunk_root,
3585			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
 
 
3586	if (ret)
3587		return ret;
3588
3589	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3590				fs_info->avail_metadata_alloc_bits;
3591	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
 
 
 
3592
3593	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3594				  &stripe_size, chunk_offset, alloc_profile);
3595	if (ret)
3596		return ret;
3597
3598	sys_chunk_offset = chunk_offset + chunk_size;
3599
3600	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3601				fs_info->avail_system_alloc_bits;
3602	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3603
3604	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3605				  &sys_chunk_size, &sys_stripe_size,
3606				  sys_chunk_offset, alloc_profile);
3607	if (ret)
3608		goto abort;
3609
3610	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3611	if (ret)
3612		goto abort;
3613
3614	/*
3615	 * Modifying chunk tree needs allocating new blocks from both
3616	 * system block group and metadata block group. So we only can
3617	 * do operations require modifying the chunk tree after both
3618	 * block groups were created.
3619	 */
3620	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3621				   chunk_size, stripe_size);
3622	if (ret)
3623		goto abort;
3624
3625	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3626				   sys_chunk_offset, sys_chunk_size,
3627				   sys_stripe_size);
3628	if (ret)
3629		goto abort;
3630
3631	return 0;
3632
3633abort:
3634	btrfs_abort_transaction(trans, root, ret);
3635	return ret;
3636}
3637
3638int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3639{
3640	struct extent_map *em;
3641	struct map_lookup *map;
3642	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3643	int readonly = 0;
 
3644	int i;
3645
3646	read_lock(&map_tree->map_tree.lock);
3647	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3648	read_unlock(&map_tree->map_tree.lock);
3649	if (!em)
3650		return 1;
3651
3652	if (btrfs_test_opt(root, DEGRADED)) {
3653		free_extent_map(em);
3654		return 0;
3655	}
 
 
3656
3657	map = (struct map_lookup *)em->bdev;
3658	for (i = 0; i < map->num_stripes; i++) {
3659		if (!map->stripes[i].dev->writeable) {
3660			readonly = 1;
3661			break;
3662		}
3663	}
 
 
 
 
 
 
 
 
 
3664	free_extent_map(em);
3665	return readonly;
3666}
3667
3668void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3669{
3670	extent_map_tree_init(&tree->map_tree);
3671}
3672
3673void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3674{
3675	struct extent_map *em;
3676
3677	while (1) {
3678		write_lock(&tree->map_tree.lock);
3679		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3680		if (em)
3681			remove_extent_mapping(&tree->map_tree, em);
3682		write_unlock(&tree->map_tree.lock);
3683		if (!em)
3684			break;
3685		kfree(em->bdev);
3686		/* once for us */
3687		free_extent_map(em);
3688		/* once for the tree */
3689		free_extent_map(em);
3690	}
3691}
3692
3693int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3694{
 
3695	struct extent_map *em;
3696	struct map_lookup *map;
3697	struct extent_map_tree *em_tree = &map_tree->map_tree;
3698	int ret;
3699
3700	read_lock(&em_tree->lock);
3701	em = lookup_extent_mapping(em_tree, logical, len);
3702	read_unlock(&em_tree->lock);
3703	BUG_ON(!em);
3704
3705	BUG_ON(em->start > logical || em->start + em->len < logical);
3706	map = (struct map_lookup *)em->bdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3707	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3708		ret = map->num_stripes;
3709	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3710		ret = map->sub_stripes;
 
 
 
 
3711	else
3712		ret = 1;
3713	free_extent_map(em);
 
 
 
 
 
 
3714	return ret;
3715}
3716
3717static int find_live_mirror(struct map_lookup *map, int first, int num,
3718			    int optimal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3719{
3720	int i;
3721	if (map->stripes[optimal].dev->bdev)
3722		return optimal;
3723	for (i = first; i < first + num; i++) {
3724		if (map->stripes[i].dev->bdev)
3725			return i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3726	}
 
3727	/* we couldn't find one that doesn't fail.  Just return something
3728	 * and the io error handling code will clean up eventually
3729	 */
3730	return optimal;
3731}
3732
3733static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3734			     u64 logical, u64 *length,
3735			     struct btrfs_bio **bbio_ret,
3736			     int mirror_num)
3737{
3738	struct extent_map *em;
3739	struct map_lookup *map;
 
3740	struct extent_map_tree *em_tree = &map_tree->map_tree;
3741	u64 offset;
3742	u64 stripe_offset;
3743	u64 stripe_end_offset;
3744	u64 stripe_nr;
3745	u64 stripe_nr_orig;
3746	u64 stripe_nr_end;
3747	int stripe_index;
 
3748	int i;
3749	int ret = 0;
3750	int num_stripes;
3751	int max_errors = 0;
 
3752	struct btrfs_bio *bbio = NULL;
 
 
 
 
 
 
3753
3754	read_lock(&em_tree->lock);
3755	em = lookup_extent_mapping(em_tree, logical, *length);
3756	read_unlock(&em_tree->lock);
3757
3758	if (!em) {
3759		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3760		       (unsigned long long)logical,
3761		       (unsigned long long)*length);
3762		BUG();
 
 
 
 
 
 
 
3763	}
3764
3765	BUG_ON(em->start > logical || em->start + em->len < logical);
3766	map = (struct map_lookup *)em->bdev;
3767	offset = logical - em->start;
3768
3769	if (mirror_num > map->num_stripes)
3770		mirror_num = 0;
3771
3772	stripe_nr = offset;
3773	/*
3774	 * stripe_nr counts the total number of stripes we have to stride
3775	 * to get to this block
3776	 */
3777	do_div(stripe_nr, map->stripe_len);
3778
3779	stripe_offset = stripe_nr * map->stripe_len;
3780	BUG_ON(offset < stripe_offset);
3781
3782	/* stripe_offset is the offset of this block in its stripe*/
3783	stripe_offset = offset - stripe_offset;
3784
3785	if (rw & REQ_DISCARD)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3786		*length = min_t(u64, em->len - offset, *length);
3787	else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3788		/* we limit the length of each bio to what fits in a stripe */
3789		*length = min_t(u64, em->len - offset,
3790				map->stripe_len - stripe_offset);
 
 
 
 
 
 
 
 
 
 
3791	} else {
3792		*length = em->len - offset;
3793	}
3794
 
 
3795	if (!bbio_ret)
3796		goto out;
3797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3798	num_stripes = 1;
3799	stripe_index = 0;
3800	stripe_nr_orig = stripe_nr;
3801	stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3802			(~(map->stripe_len - 1));
3803	do_div(stripe_nr_end, map->stripe_len);
3804	stripe_end_offset = stripe_nr_end * map->stripe_len -
3805			    (offset + *length);
 
3806	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3807		if (rw & REQ_DISCARD)
3808			num_stripes = min_t(u64, map->num_stripes,
3809					    stripe_nr_end - stripe_nr_orig);
3810		stripe_index = do_div(stripe_nr, map->num_stripes);
 
 
 
3811	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3812		if (rw & (REQ_WRITE | REQ_DISCARD))
3813			num_stripes = map->num_stripes;
3814		else if (mirror_num)
3815			stripe_index = mirror_num - 1;
3816		else {
3817			stripe_index = find_live_mirror(map, 0,
3818					    map->num_stripes,
3819					    current->pid % map->num_stripes);
 
3820			mirror_num = stripe_index + 1;
3821		}
3822
3823	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3824		if (rw & (REQ_WRITE | REQ_DISCARD)) {
3825			num_stripes = map->num_stripes;
3826		} else if (mirror_num) {
3827			stripe_index = mirror_num - 1;
3828		} else {
3829			mirror_num = 1;
3830		}
3831
3832	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3833		int factor = map->num_stripes / map->sub_stripes;
3834
3835		stripe_index = do_div(stripe_nr, factor);
3836		stripe_index *= map->sub_stripes;
3837
3838		if (rw & REQ_WRITE)
3839			num_stripes = map->sub_stripes;
3840		else if (rw & REQ_DISCARD)
3841			num_stripes = min_t(u64, map->sub_stripes *
3842					    (stripe_nr_end - stripe_nr_orig),
3843					    map->num_stripes);
3844		else if (mirror_num)
3845			stripe_index += mirror_num - 1;
3846		else {
3847			int old_stripe_index = stripe_index;
3848			stripe_index = find_live_mirror(map, stripe_index,
 
3849					      map->sub_stripes, stripe_index +
3850					      current->pid % map->sub_stripes);
 
3851			mirror_num = stripe_index - old_stripe_index + 1;
3852		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3853	} else {
3854		/*
3855		 * after this do_div call, stripe_nr is the number of stripes
3856		 * on this device we have to walk to find the data, and
3857		 * stripe_index is the number of our device in the stripe array
3858		 */
3859		stripe_index = do_div(stripe_nr, map->num_stripes);
 
3860		mirror_num = stripe_index + 1;
3861	}
3862	BUG_ON(stripe_index >= map->num_stripes);
3863
3864	bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
 
 
 
 
 
 
 
 
 
3865	if (!bbio) {
3866		ret = -ENOMEM;
3867		goto out;
3868	}
3869	atomic_set(&bbio->error, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3870
3871	if (rw & REQ_DISCARD) {
3872		int factor = 0;
3873		int sub_stripes = 0;
3874		u64 stripes_per_dev = 0;
3875		u32 remaining_stripes = 0;
3876		u32 last_stripe = 0;
3877
3878		if (map->type &
3879		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3880			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3881				sub_stripes = 1;
3882			else
3883				sub_stripes = map->sub_stripes;
3884
3885			factor = map->num_stripes / sub_stripes;
3886			stripes_per_dev = div_u64_rem(stripe_nr_end -
3887						      stripe_nr_orig,
3888						      factor,
3889						      &remaining_stripes);
3890			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3891			last_stripe *= sub_stripes;
3892		}
3893
3894		for (i = 0; i < num_stripes; i++) {
3895			bbio->stripes[i].physical =
3896				map->stripes[stripe_index].physical +
3897				stripe_offset + stripe_nr * map->stripe_len;
3898			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3899
3900			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3901					 BTRFS_BLOCK_GROUP_RAID10)) {
3902				bbio->stripes[i].length = stripes_per_dev *
3903							  map->stripe_len;
3904
3905				if (i / sub_stripes < remaining_stripes)
3906					bbio->stripes[i].length +=
3907						map->stripe_len;
3908
3909				/*
3910				 * Special for the first stripe and
3911				 * the last stripe:
3912				 *
3913				 * |-------|...|-------|
3914				 *     |----------|
3915				 *    off     end_off
3916				 */
3917				if (i < sub_stripes)
3918					bbio->stripes[i].length -=
3919						stripe_offset;
3920
3921				if (stripe_index >= last_stripe &&
3922				    stripe_index <= (last_stripe +
3923						     sub_stripes - 1))
3924					bbio->stripes[i].length -=
3925						stripe_end_offset;
3926
3927				if (i == sub_stripes - 1)
3928					stripe_offset = 0;
3929			} else
3930				bbio->stripes[i].length = *length;
3931
3932			stripe_index++;
3933			if (stripe_index == map->num_stripes) {
3934				/* This could only happen for RAID0/10 */
3935				stripe_index = 0;
3936				stripe_nr++;
3937			}
3938		}
3939	} else {
3940		for (i = 0; i < num_stripes; i++) {
3941			bbio->stripes[i].physical =
3942				map->stripes[stripe_index].physical +
3943				stripe_offset +
3944				stripe_nr * map->stripe_len;
3945			bbio->stripes[i].dev =
3946				map->stripes[stripe_index].dev;
3947			stripe_index++;
3948		}
3949	}
3950
3951	if (rw & REQ_WRITE) {
3952		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3953				 BTRFS_BLOCK_GROUP_RAID10 |
3954				 BTRFS_BLOCK_GROUP_DUP)) {
3955			max_errors = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3956		}
3957	}
3958
3959	*bbio_ret = bbio;
 
3960	bbio->num_stripes = num_stripes;
3961	bbio->max_errors = max_errors;
3962	bbio->mirror_num = mirror_num;
 
 
 
 
 
 
 
 
 
 
 
 
 
3963out:
 
 
 
 
3964	free_extent_map(em);
3965	return ret;
3966}
3967
3968int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3969		      u64 logical, u64 *length,
3970		      struct btrfs_bio **bbio_ret, int mirror_num)
3971{
3972	return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3973				 mirror_num);
 
 
 
 
 
 
 
 
 
 
3974}
3975
3976int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3977		     u64 chunk_start, u64 physical, u64 devid,
3978		     u64 **logical, int *naddrs, int *stripe_len)
3979{
3980	struct extent_map_tree *em_tree = &map_tree->map_tree;
3981	struct extent_map *em;
3982	struct map_lookup *map;
3983	u64 *buf;
3984	u64 bytenr;
3985	u64 length;
3986	u64 stripe_nr;
 
3987	int i, j, nr = 0;
3988
3989	read_lock(&em_tree->lock);
3990	em = lookup_extent_mapping(em_tree, chunk_start, 1);
3991	read_unlock(&em_tree->lock);
3992
3993	BUG_ON(!em || em->start != chunk_start);
3994	map = (struct map_lookup *)em->bdev;
 
 
 
 
 
 
 
 
 
 
 
3995
3996	length = em->len;
 
 
3997	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3998		do_div(length, map->num_stripes / map->sub_stripes);
3999	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4000		do_div(length, map->num_stripes);
 
 
 
 
4001
4002	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4003	BUG_ON(!buf); /* -ENOMEM */
4004
4005	for (i = 0; i < map->num_stripes; i++) {
4006		if (devid && map->stripes[i].dev->devid != devid)
4007			continue;
4008		if (map->stripes[i].physical > physical ||
4009		    map->stripes[i].physical + length <= physical)
4010			continue;
4011
4012		stripe_nr = physical - map->stripes[i].physical;
4013		do_div(stripe_nr, map->stripe_len);
4014
4015		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4016			stripe_nr = stripe_nr * map->num_stripes + i;
4017			do_div(stripe_nr, map->sub_stripes);
4018		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4019			stripe_nr = stripe_nr * map->num_stripes + i;
4020		}
4021		bytenr = chunk_start + stripe_nr * map->stripe_len;
 
 
 
4022		WARN_ON(nr >= map->num_stripes);
4023		for (j = 0; j < nr; j++) {
4024			if (buf[j] == bytenr)
4025				break;
4026		}
4027		if (j == nr) {
4028			WARN_ON(nr >= map->num_stripes);
4029			buf[nr++] = bytenr;
4030		}
4031	}
4032
4033	*logical = buf;
4034	*naddrs = nr;
4035	*stripe_len = map->stripe_len;
4036
4037	free_extent_map(em);
4038	return 0;
4039}
4040
4041static void *merge_stripe_index_into_bio_private(void *bi_private,
4042						 unsigned int stripe_index)
4043{
4044	/*
4045	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4046	 * at most 1.
4047	 * The alternative solution (instead of stealing bits from the
4048	 * pointer) would be to allocate an intermediate structure
4049	 * that contains the old private pointer plus the stripe_index.
4050	 */
4051	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4052	BUG_ON(stripe_index > 3);
4053	return (void *)(((uintptr_t)bi_private) | stripe_index);
4054}
4055
4056static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4057{
4058	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4059}
4060
4061static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4062{
4063	return (unsigned int)((uintptr_t)bi_private) & 3;
4064}
4065
4066static void btrfs_end_bio(struct bio *bio, int err)
4067{
4068	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4069	int is_orig_bio = 0;
4070
4071	if (err) {
4072		atomic_inc(&bbio->error);
4073		if (err == -EIO || err == -EREMOTEIO) {
4074			unsigned int stripe_index =
4075				extract_stripe_index_from_bio_private(
4076					bio->bi_private);
4077			struct btrfs_device *dev;
4078
4079			BUG_ON(stripe_index >= bbio->num_stripes);
4080			dev = bbio->stripes[stripe_index].dev;
4081			if (dev->bdev) {
4082				if (bio->bi_rw & WRITE)
4083					btrfs_dev_stat_inc(dev,
4084						BTRFS_DEV_STAT_WRITE_ERRS);
4085				else
4086					btrfs_dev_stat_inc(dev,
4087						BTRFS_DEV_STAT_READ_ERRS);
4088				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4089					btrfs_dev_stat_inc(dev,
4090						BTRFS_DEV_STAT_FLUSH_ERRS);
4091				btrfs_dev_stat_print_on_error(dev);
4092			}
4093		}
4094	}
4095
4096	if (bio == bbio->orig_bio)
4097		is_orig_bio = 1;
4098
 
 
4099	if (atomic_dec_and_test(&bbio->stripes_pending)) {
4100		if (!is_orig_bio) {
4101			bio_put(bio);
4102			bio = bbio->orig_bio;
4103		}
4104		bio->bi_private = bbio->private;
4105		bio->bi_end_io = bbio->end_io;
4106		bio->bi_bdev = (struct block_device *)
4107					(unsigned long)bbio->mirror_num;
4108		/* only send an error to the higher layers if it is
4109		 * beyond the tolerance of the multi-bio
4110		 */
4111		if (atomic_read(&bbio->error) > bbio->max_errors) {
4112			err = -EIO;
4113		} else {
4114			/*
4115			 * this bio is actually up to date, we didn't
4116			 * go over the max number of errors
4117			 */
4118			set_bit(BIO_UPTODATE, &bio->bi_flags);
4119			err = 0;
4120		}
4121		kfree(bbio);
4122
4123		bio_endio(bio, err);
4124	} else if (!is_orig_bio) {
4125		bio_put(bio);
4126	}
4127}
4128
4129struct async_sched {
4130	struct bio *bio;
4131	int rw;
4132	struct btrfs_fs_info *info;
4133	struct btrfs_work work;
4134};
4135
4136/*
4137 * see run_scheduled_bios for a description of why bios are collected for
4138 * async submit.
4139 *
4140 * This will add one bio to the pending list for a device and make sure
4141 * the work struct is scheduled.
4142 */
4143static noinline void schedule_bio(struct btrfs_root *root,
4144				 struct btrfs_device *device,
4145				 int rw, struct bio *bio)
4146{
4147	int should_queue = 1;
4148	struct btrfs_pending_bios *pending_bios;
4149
 
 
 
 
 
4150	/* don't bother with additional async steps for reads, right now */
4151	if (!(rw & REQ_WRITE)) {
4152		bio_get(bio);
4153		btrfsic_submit_bio(rw, bio);
4154		bio_put(bio);
4155		return;
4156	}
4157
4158	/*
4159	 * nr_async_bios allows us to reliably return congestion to the
4160	 * higher layers.  Otherwise, the async bio makes it appear we have
4161	 * made progress against dirty pages when we've really just put it
4162	 * on a queue for later
4163	 */
4164	atomic_inc(&root->fs_info->nr_async_bios);
4165	WARN_ON(bio->bi_next);
4166	bio->bi_next = NULL;
4167	bio->bi_rw |= rw;
4168
4169	spin_lock(&device->io_lock);
4170	if (bio->bi_rw & REQ_SYNC)
4171		pending_bios = &device->pending_sync_bios;
4172	else
4173		pending_bios = &device->pending_bios;
4174
4175	if (pending_bios->tail)
4176		pending_bios->tail->bi_next = bio;
4177
4178	pending_bios->tail = bio;
4179	if (!pending_bios->head)
4180		pending_bios->head = bio;
4181	if (device->running_pending)
4182		should_queue = 0;
4183
4184	spin_unlock(&device->io_lock);
4185
4186	if (should_queue)
4187		btrfs_queue_worker(&root->fs_info->submit_workers,
4188				   &device->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4189}
4190
4191int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4192		  int mirror_num, int async_submit)
4193{
4194	struct btrfs_mapping_tree *map_tree;
4195	struct btrfs_device *dev;
4196	struct bio *first_bio = bio;
4197	u64 logical = (u64)bio->bi_sector << 9;
4198	u64 length = 0;
4199	u64 map_length;
4200	int ret;
4201	int dev_nr = 0;
4202	int total_devs = 1;
4203	struct btrfs_bio *bbio = NULL;
4204
4205	length = bio->bi_size;
4206	map_tree = &root->fs_info->mapping_tree;
4207	map_length = length;
4208
4209	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4210			      mirror_num);
4211	if (ret) /* -ENOMEM */
 
 
4212		return ret;
 
4213
4214	total_devs = bbio->num_stripes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4215	if (map_length < length) {
4216		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4217		       "len %llu\n", (unsigned long long)logical,
4218		       (unsigned long long)length,
4219		       (unsigned long long)map_length);
4220		BUG();
4221	}
4222
4223	bbio->orig_bio = first_bio;
4224	bbio->private = first_bio->bi_private;
4225	bbio->end_io = first_bio->bi_end_io;
4226	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
 
4227
4228	while (dev_nr < total_devs) {
4229		if (dev_nr < total_devs - 1) {
4230			bio = bio_clone(first_bio, GFP_NOFS);
4231			BUG_ON(!bio); /* -ENOMEM */
4232		} else {
4233			bio = first_bio;
4234		}
4235		bio->bi_private = bbio;
4236		bio->bi_private = merge_stripe_index_into_bio_private(
4237				bio->bi_private, (unsigned int)dev_nr);
4238		bio->bi_end_io = btrfs_end_bio;
4239		bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4240		dev = bbio->stripes[dev_nr].dev;
4241		if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4242#ifdef DEBUG
4243			struct rcu_string *name;
4244
4245			rcu_read_lock();
4246			name = rcu_dereference(dev->name);
4247			pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4248				 "(%s id %llu), size=%u\n", rw,
4249				 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4250				 name->str, dev->devid, bio->bi_size);
4251			rcu_read_unlock();
4252#endif
4253			bio->bi_bdev = dev->bdev;
4254			if (async_submit)
4255				schedule_bio(root, dev, rw, bio);
4256			else
4257				btrfsic_submit_bio(rw, bio);
4258		} else {
4259			bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4260			bio->bi_sector = logical >> 9;
4261			bio_endio(bio, -EIO);
4262		}
4263		dev_nr++;
4264	}
 
4265	return 0;
4266}
4267
4268struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4269				       u8 *uuid, u8 *fsid)
4270{
4271	struct btrfs_device *device;
4272	struct btrfs_fs_devices *cur_devices;
4273
4274	cur_devices = root->fs_info->fs_devices;
4275	while (cur_devices) {
4276		if (!fsid ||
4277		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4278			device = __find_device(&cur_devices->devices,
4279					       devid, uuid);
4280			if (device)
4281				return device;
4282		}
4283		cur_devices = cur_devices->seed;
4284	}
4285	return NULL;
4286}
4287
4288static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
 
4289					    u64 devid, u8 *dev_uuid)
4290{
4291	struct btrfs_device *device;
4292	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4293
4294	device = kzalloc(sizeof(*device), GFP_NOFS);
4295	if (!device)
4296		return NULL;
4297	list_add(&device->dev_list,
4298		 &fs_devices->devices);
4299	device->dev_root = root->fs_info->dev_root;
4300	device->devid = devid;
4301	device->work.func = pending_bios_fn;
4302	device->fs_devices = fs_devices;
 
 
4303	device->missing = 1;
4304	fs_devices->num_devices++;
4305	fs_devices->missing_devices++;
4306	spin_lock_init(&device->io_lock);
4307	INIT_LIST_HEAD(&device->dev_alloc_list);
4308	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4309	return device;
4310}
4311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4312static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4313			  struct extent_buffer *leaf,
4314			  struct btrfs_chunk *chunk)
4315{
4316	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4317	struct map_lookup *map;
4318	struct extent_map *em;
4319	u64 logical;
4320	u64 length;
 
4321	u64 devid;
4322	u8 uuid[BTRFS_UUID_SIZE];
4323	int num_stripes;
4324	int ret;
4325	int i;
4326
4327	logical = key->offset;
4328	length = btrfs_chunk_length(leaf, chunk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4329
4330	read_lock(&map_tree->map_tree.lock);
4331	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4332	read_unlock(&map_tree->map_tree.lock);
4333
4334	/* already mapped? */
4335	if (em && em->start <= logical && em->start + em->len > logical) {
4336		free_extent_map(em);
4337		return 0;
4338	} else if (em) {
4339		free_extent_map(em);
4340	}
4341
4342	em = alloc_extent_map();
4343	if (!em)
4344		return -ENOMEM;
4345	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4346	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4347	if (!map) {
4348		free_extent_map(em);
4349		return -ENOMEM;
4350	}
4351
4352	em->bdev = (struct block_device *)map;
 
4353	em->start = logical;
4354	em->len = length;
 
4355	em->block_start = 0;
4356	em->block_len = em->len;
4357
4358	map->num_stripes = num_stripes;
4359	map->io_width = btrfs_chunk_io_width(leaf, chunk);
4360	map->io_align = btrfs_chunk_io_align(leaf, chunk);
4361	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4362	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4363	map->type = btrfs_chunk_type(leaf, chunk);
4364	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4365	for (i = 0; i < num_stripes; i++) {
4366		map->stripes[i].physical =
4367			btrfs_stripe_offset_nr(leaf, chunk, i);
4368		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4369		read_extent_buffer(leaf, uuid, (unsigned long)
4370				   btrfs_stripe_dev_uuid_nr(chunk, i),
4371				   BTRFS_UUID_SIZE);
4372		map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4373							NULL);
4374		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4375			kfree(map);
4376			free_extent_map(em);
4377			return -EIO;
4378		}
4379		if (!map->stripes[i].dev) {
4380			map->stripes[i].dev =
4381				add_missing_dev(root, devid, uuid);
 
4382			if (!map->stripes[i].dev) {
4383				kfree(map);
4384				free_extent_map(em);
4385				return -EIO;
4386			}
 
 
4387		}
4388		map->stripes[i].dev->in_fs_metadata = 1;
4389	}
4390
4391	write_lock(&map_tree->map_tree.lock);
4392	ret = add_extent_mapping(&map_tree->map_tree, em);
4393	write_unlock(&map_tree->map_tree.lock);
4394	BUG_ON(ret); /* Tree corruption */
4395	free_extent_map(em);
4396
4397	return 0;
4398}
4399
4400static void fill_device_from_item(struct extent_buffer *leaf,
4401				 struct btrfs_dev_item *dev_item,
4402				 struct btrfs_device *device)
4403{
4404	unsigned long ptr;
4405
4406	device->devid = btrfs_device_id(leaf, dev_item);
4407	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4408	device->total_bytes = device->disk_total_bytes;
 
4409	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
 
4410	device->type = btrfs_device_type(leaf, dev_item);
4411	device->io_align = btrfs_device_io_align(leaf, dev_item);
4412	device->io_width = btrfs_device_io_width(leaf, dev_item);
4413	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
 
 
4414
4415	ptr = (unsigned long)btrfs_device_uuid(dev_item);
4416	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4417}
4418
4419static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
 
4420{
4421	struct btrfs_fs_devices *fs_devices;
4422	int ret;
4423
4424	BUG_ON(!mutex_is_locked(&uuid_mutex));
4425
4426	fs_devices = root->fs_info->fs_devices->seed;
4427	while (fs_devices) {
4428		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4429			ret = 0;
4430			goto out;
4431		}
4432		fs_devices = fs_devices->seed;
4433	}
4434
4435	fs_devices = find_fsid(fsid);
4436	if (!fs_devices) {
4437		ret = -ENOENT;
4438		goto out;
 
 
 
 
 
 
 
 
4439	}
4440
4441	fs_devices = clone_fs_devices(fs_devices);
4442	if (IS_ERR(fs_devices)) {
4443		ret = PTR_ERR(fs_devices);
4444		goto out;
4445	}
4446
4447	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4448				   root->fs_info->bdev_holder);
4449	if (ret) {
4450		free_fs_devices(fs_devices);
 
4451		goto out;
4452	}
4453
4454	if (!fs_devices->seeding) {
4455		__btrfs_close_devices(fs_devices);
4456		free_fs_devices(fs_devices);
4457		ret = -EINVAL;
4458		goto out;
4459	}
4460
4461	fs_devices->seed = root->fs_info->fs_devices->seed;
4462	root->fs_info->fs_devices->seed = fs_devices;
4463out:
4464	return ret;
4465}
4466
4467static int read_one_dev(struct btrfs_root *root,
4468			struct extent_buffer *leaf,
4469			struct btrfs_dev_item *dev_item)
4470{
 
4471	struct btrfs_device *device;
4472	u64 devid;
4473	int ret;
4474	u8 fs_uuid[BTRFS_UUID_SIZE];
4475	u8 dev_uuid[BTRFS_UUID_SIZE];
4476
4477	devid = btrfs_device_id(leaf, dev_item);
4478	read_extent_buffer(leaf, dev_uuid,
4479			   (unsigned long)btrfs_device_uuid(dev_item),
4480			   BTRFS_UUID_SIZE);
4481	read_extent_buffer(leaf, fs_uuid,
4482			   (unsigned long)btrfs_device_fsid(dev_item),
4483			   BTRFS_UUID_SIZE);
4484
4485	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4486		ret = open_seed_devices(root, fs_uuid);
4487		if (ret && !btrfs_test_opt(root, DEGRADED))
4488			return ret;
4489	}
4490
4491	device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4492	if (!device || !device->bdev) {
4493		if (!btrfs_test_opt(root, DEGRADED))
4494			return -EIO;
4495
4496		if (!device) {
4497			printk(KERN_WARNING "warning devid %llu missing\n",
4498			       (unsigned long long)devid);
4499			device = add_missing_dev(root, devid, dev_uuid);
4500			if (!device)
4501				return -ENOMEM;
4502		} else if (!device->missing) {
 
 
 
4503			/*
4504			 * this happens when a device that was properly setup
4505			 * in the device info lists suddenly goes bad.
4506			 * device->bdev is NULL, and so we have to set
4507			 * device->missing to one here
4508			 */
4509			root->fs_info->fs_devices->missing_devices++;
4510			device->missing = 1;
4511		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4512	}
4513
4514	if (device->fs_devices != root->fs_info->fs_devices) {
4515		BUG_ON(device->writeable);
4516		if (device->generation !=
4517		    btrfs_device_generation(leaf, dev_item))
4518			return -EINVAL;
4519	}
4520
4521	fill_device_from_item(leaf, dev_item, device);
4522	device->dev_root = root->fs_info->dev_root;
4523	device->in_fs_metadata = 1;
4524	if (device->writeable) {
4525		device->fs_devices->total_rw_bytes += device->total_bytes;
4526		spin_lock(&root->fs_info->free_chunk_lock);
4527		root->fs_info->free_chunk_space += device->total_bytes -
4528			device->bytes_used;
4529		spin_unlock(&root->fs_info->free_chunk_lock);
4530	}
4531	ret = 0;
4532	return ret;
4533}
4534
4535int btrfs_read_sys_array(struct btrfs_root *root)
4536{
4537	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4538	struct extent_buffer *sb;
4539	struct btrfs_disk_key *disk_key;
4540	struct btrfs_chunk *chunk;
4541	u8 *ptr;
4542	unsigned long sb_ptr;
4543	int ret = 0;
4544	u32 num_stripes;
4545	u32 array_size;
4546	u32 len = 0;
4547	u32 cur;
4548	struct btrfs_key key;
4549
4550	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4551					  BTRFS_SUPER_INFO_SIZE);
 
 
 
 
 
4552	if (!sb)
4553		return -ENOMEM;
4554	btrfs_set_buffer_uptodate(sb);
4555	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4556	/*
4557	 * The sb extent buffer is artifical and just used to read the system array.
4558	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4559	 * pages up-to-date when the page is larger: extent does not cover the
4560	 * whole page and consequently check_page_uptodate does not find all
4561	 * the page's extents up-to-date (the hole beyond sb),
4562	 * write_extent_buffer then triggers a WARN_ON.
4563	 *
4564	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4565	 * but sb spans only this function. Add an explicit SetPageUptodate call
4566	 * to silence the warning eg. on PowerPC 64.
4567	 */
4568	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4569		SetPageUptodate(sb->pages[0]);
4570
4571	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4572	array_size = btrfs_super_sys_array_size(super_copy);
4573
4574	ptr = super_copy->sys_chunk_array;
4575	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4576	cur = 0;
 
 
 
 
 
 
4577
4578	while (cur < array_size) {
4579		disk_key = (struct btrfs_disk_key *)ptr;
4580		btrfs_disk_key_to_cpu(&key, disk_key);
4581
4582		len = sizeof(*disk_key); ptr += len;
4583		sb_ptr += len;
4584		cur += len;
4585
4586		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4587			chunk = (struct btrfs_chunk *)sb_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4588			ret = read_one_chunk(root, &key, sb, chunk);
4589			if (ret)
4590				break;
4591			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4592			len = btrfs_chunk_item_size(num_stripes);
4593		} else {
 
 
 
4594			ret = -EIO;
4595			break;
4596		}
4597		ptr += len;
4598		sb_ptr += len;
4599		cur += len;
4600	}
4601	free_extent_buffer(sb);
4602	return ret;
 
 
 
 
 
 
4603}
4604
4605int btrfs_read_chunk_tree(struct btrfs_root *root)
4606{
4607	struct btrfs_path *path;
4608	struct extent_buffer *leaf;
4609	struct btrfs_key key;
4610	struct btrfs_key found_key;
4611	int ret;
4612	int slot;
4613
4614	root = root->fs_info->chunk_root;
4615
4616	path = btrfs_alloc_path();
4617	if (!path)
4618		return -ENOMEM;
4619
4620	mutex_lock(&uuid_mutex);
4621	lock_chunks(root);
4622
4623	/* first we search for all of the device items, and then we
4624	 * read in all of the chunk items.  This way we can create chunk
4625	 * mappings that reference all of the devices that are afound
 
 
4626	 */
4627	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4628	key.offset = 0;
4629	key.type = 0;
4630again:
4631	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4632	if (ret < 0)
4633		goto error;
4634	while (1) {
4635		leaf = path->nodes[0];
4636		slot = path->slots[0];
4637		if (slot >= btrfs_header_nritems(leaf)) {
4638			ret = btrfs_next_leaf(root, path);
4639			if (ret == 0)
4640				continue;
4641			if (ret < 0)
4642				goto error;
4643			break;
4644		}
4645		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4646		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4647			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4648				break;
4649			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4650				struct btrfs_dev_item *dev_item;
4651				dev_item = btrfs_item_ptr(leaf, slot,
4652						  struct btrfs_dev_item);
4653				ret = read_one_dev(root, leaf, dev_item);
4654				if (ret)
4655					goto error;
4656			}
4657		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4658			struct btrfs_chunk *chunk;
4659			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4660			ret = read_one_chunk(root, &found_key, leaf, chunk);
4661			if (ret)
4662				goto error;
4663		}
4664		path->slots[0]++;
4665	}
4666	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4667		key.objectid = 0;
4668		btrfs_release_path(path);
4669		goto again;
4670	}
4671	ret = 0;
4672error:
4673	unlock_chunks(root);
4674	mutex_unlock(&uuid_mutex);
4675
4676	btrfs_free_path(path);
4677	return ret;
4678}
4679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4680static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4681{
4682	int i;
4683
4684	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4685		btrfs_dev_stat_reset(dev, i);
4686}
4687
4688int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4689{
4690	struct btrfs_key key;
4691	struct btrfs_key found_key;
4692	struct btrfs_root *dev_root = fs_info->dev_root;
4693	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4694	struct extent_buffer *eb;
4695	int slot;
4696	int ret = 0;
4697	struct btrfs_device *device;
4698	struct btrfs_path *path = NULL;
4699	int i;
4700
4701	path = btrfs_alloc_path();
4702	if (!path) {
4703		ret = -ENOMEM;
4704		goto out;
4705	}
4706
4707	mutex_lock(&fs_devices->device_list_mutex);
4708	list_for_each_entry(device, &fs_devices->devices, dev_list) {
4709		int item_size;
4710		struct btrfs_dev_stats_item *ptr;
4711
4712		key.objectid = 0;
4713		key.type = BTRFS_DEV_STATS_KEY;
4714		key.offset = device->devid;
4715		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4716		if (ret) {
4717			printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
4718				      rcu_str_deref(device->name),
4719				      (unsigned long long)device->devid);
4720			__btrfs_reset_dev_stats(device);
4721			device->dev_stats_valid = 1;
4722			btrfs_release_path(path);
4723			continue;
4724		}
4725		slot = path->slots[0];
4726		eb = path->nodes[0];
4727		btrfs_item_key_to_cpu(eb, &found_key, slot);
4728		item_size = btrfs_item_size_nr(eb, slot);
4729
4730		ptr = btrfs_item_ptr(eb, slot,
4731				     struct btrfs_dev_stats_item);
4732
4733		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4734			if (item_size >= (1 + i) * sizeof(__le64))
4735				btrfs_dev_stat_set(device, i,
4736					btrfs_dev_stats_value(eb, ptr, i));
4737			else
4738				btrfs_dev_stat_reset(device, i);
4739		}
4740
4741		device->dev_stats_valid = 1;
4742		btrfs_dev_stat_print_on_load(device);
4743		btrfs_release_path(path);
4744	}
4745	mutex_unlock(&fs_devices->device_list_mutex);
4746
4747out:
4748	btrfs_free_path(path);
4749	return ret < 0 ? ret : 0;
4750}
4751
4752static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4753				struct btrfs_root *dev_root,
4754				struct btrfs_device *device)
4755{
4756	struct btrfs_path *path;
4757	struct btrfs_key key;
4758	struct extent_buffer *eb;
4759	struct btrfs_dev_stats_item *ptr;
4760	int ret;
4761	int i;
4762
4763	key.objectid = 0;
4764	key.type = BTRFS_DEV_STATS_KEY;
4765	key.offset = device->devid;
4766
4767	path = btrfs_alloc_path();
4768	BUG_ON(!path);
4769	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4770	if (ret < 0) {
4771		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
 
4772			      ret, rcu_str_deref(device->name));
4773		goto out;
4774	}
4775
4776	if (ret == 0 &&
4777	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4778		/* need to delete old one and insert a new one */
4779		ret = btrfs_del_item(trans, dev_root, path);
4780		if (ret != 0) {
4781			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
 
4782				      rcu_str_deref(device->name), ret);
4783			goto out;
4784		}
4785		ret = 1;
4786	}
4787
4788	if (ret == 1) {
4789		/* need to insert a new item */
4790		btrfs_release_path(path);
4791		ret = btrfs_insert_empty_item(trans, dev_root, path,
4792					      &key, sizeof(*ptr));
4793		if (ret < 0) {
4794			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4795				      rcu_str_deref(device->name), ret);
 
4796			goto out;
4797		}
4798	}
4799
4800	eb = path->nodes[0];
4801	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4802	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4803		btrfs_set_dev_stats_value(eb, ptr, i,
4804					  btrfs_dev_stat_read(device, i));
4805	btrfs_mark_buffer_dirty(eb);
4806
4807out:
4808	btrfs_free_path(path);
4809	return ret;
4810}
4811
4812/*
4813 * called from commit_transaction. Writes all changed device stats to disk.
4814 */
4815int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4816			struct btrfs_fs_info *fs_info)
4817{
4818	struct btrfs_root *dev_root = fs_info->dev_root;
4819	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4820	struct btrfs_device *device;
 
4821	int ret = 0;
4822
4823	mutex_lock(&fs_devices->device_list_mutex);
4824	list_for_each_entry(device, &fs_devices->devices, dev_list) {
4825		if (!device->dev_stats_valid || !device->dev_stats_dirty)
4826			continue;
4827
 
4828		ret = update_dev_stat_item(trans, dev_root, device);
4829		if (!ret)
4830			device->dev_stats_dirty = 0;
4831	}
4832	mutex_unlock(&fs_devices->device_list_mutex);
4833
4834	return ret;
4835}
4836
4837void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4838{
4839	btrfs_dev_stat_inc(dev, index);
4840	btrfs_dev_stat_print_on_error(dev);
4841}
4842
4843void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4844{
4845	if (!dev->dev_stats_valid)
4846		return;
4847	printk_ratelimited_in_rcu(KERN_ERR
4848			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4849			   rcu_str_deref(dev->name),
4850			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4851			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4852			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4853			   btrfs_dev_stat_read(dev,
4854					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
4855			   btrfs_dev_stat_read(dev,
4856					       BTRFS_DEV_STAT_GENERATION_ERRS));
4857}
4858
4859static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4860{
4861	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
 
 
 
 
 
 
 
 
 
4862	       rcu_str_deref(dev->name),
4863	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4864	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4865	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4866	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4867	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4868}
4869
4870int btrfs_get_dev_stats(struct btrfs_root *root,
4871			struct btrfs_ioctl_get_dev_stats *stats,
4872			int reset_after_read)
4873{
4874	struct btrfs_device *dev;
4875	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4876	int i;
4877
4878	mutex_lock(&fs_devices->device_list_mutex);
4879	dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4880	mutex_unlock(&fs_devices->device_list_mutex);
4881
4882	if (!dev) {
4883		printk(KERN_WARNING
4884		       "btrfs: get dev_stats failed, device not found\n");
4885		return -ENODEV;
4886	} else if (!dev->dev_stats_valid) {
4887		printk(KERN_WARNING
4888		       "btrfs: get dev_stats failed, not yet valid\n");
4889		return -ENODEV;
4890	} else if (reset_after_read) {
4891		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4892			if (stats->nr_items > i)
4893				stats->values[i] =
4894					btrfs_dev_stat_read_and_reset(dev, i);
4895			else
4896				btrfs_dev_stat_reset(dev, i);
4897		}
4898	} else {
4899		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4900			if (stats->nr_items > i)
4901				stats->values[i] = btrfs_dev_stat_read(dev, i);
4902	}
4903	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4904		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
4905	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4906}