Loading...
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <linux/capability.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include <asm/div64.h>
29#include "compat.h"
30#include "ctree.h"
31#include "extent_map.h"
32#include "disk-io.h"
33#include "transaction.h"
34#include "print-tree.h"
35#include "volumes.h"
36#include "async-thread.h"
37#include "check-integrity.h"
38#include "rcu-string.h"
39
40static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
46
47static DEFINE_MUTEX(uuid_mutex);
48static LIST_HEAD(fs_uuids);
49
50static void lock_chunks(struct btrfs_root *root)
51{
52 mutex_lock(&root->fs_info->chunk_mutex);
53}
54
55static void unlock_chunks(struct btrfs_root *root)
56{
57 mutex_unlock(&root->fs_info->chunk_mutex);
58}
59
60static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
61{
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
69 kfree(device);
70 }
71 kfree(fs_devices);
72}
73
74void btrfs_cleanup_fs_uuids(void)
75{
76 struct btrfs_fs_devices *fs_devices;
77
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
83 }
84}
85
86static noinline struct btrfs_device *__find_device(struct list_head *head,
87 u64 devid, u8 *uuid)
88{
89 struct btrfs_device *dev;
90
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
94 return dev;
95 }
96 }
97 return NULL;
98}
99
100static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
101{
102 struct btrfs_fs_devices *fs_devices;
103
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 return fs_devices;
107 }
108 return NULL;
109}
110
111static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 struct bio *head, struct bio *tail)
113{
114
115 struct bio *old_head;
116
117 old_head = pending_bios->head;
118 pending_bios->head = head;
119 if (pending_bios->tail)
120 tail->bi_next = old_head;
121 else
122 pending_bios->tail = tail;
123}
124
125/*
126 * we try to collect pending bios for a device so we don't get a large
127 * number of procs sending bios down to the same device. This greatly
128 * improves the schedulers ability to collect and merge the bios.
129 *
130 * But, it also turns into a long list of bios to process and that is sure
131 * to eventually make the worker thread block. The solution here is to
132 * make some progress and then put this work struct back at the end of
133 * the list if the block device is congested. This way, multiple devices
134 * can make progress from a single worker thread.
135 */
136static noinline void run_scheduled_bios(struct btrfs_device *device)
137{
138 struct bio *pending;
139 struct backing_dev_info *bdi;
140 struct btrfs_fs_info *fs_info;
141 struct btrfs_pending_bios *pending_bios;
142 struct bio *tail;
143 struct bio *cur;
144 int again = 0;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
147 unsigned long limit;
148 unsigned long last_waited = 0;
149 int force_reg = 0;
150 int sync_pending = 0;
151 struct blk_plug plug;
152
153 /*
154 * this function runs all the bios we've collected for
155 * a particular device. We don't want to wander off to
156 * another device without first sending all of these down.
157 * So, setup a plug here and finish it off before we return
158 */
159 blk_start_plug(&plug);
160
161 bdi = blk_get_backing_dev_info(device->bdev);
162 fs_info = device->dev_root->fs_info;
163 limit = btrfs_async_submit_limit(fs_info);
164 limit = limit * 2 / 3;
165
166loop:
167 spin_lock(&device->io_lock);
168
169loop_lock:
170 num_run = 0;
171
172 /* take all the bios off the list at once and process them
173 * later on (without the lock held). But, remember the
174 * tail and other pointers so the bios can be properly reinserted
175 * into the list if we hit congestion
176 */
177 if (!force_reg && device->pending_sync_bios.head) {
178 pending_bios = &device->pending_sync_bios;
179 force_reg = 1;
180 } else {
181 pending_bios = &device->pending_bios;
182 force_reg = 0;
183 }
184
185 pending = pending_bios->head;
186 tail = pending_bios->tail;
187 WARN_ON(pending && !tail);
188
189 /*
190 * if pending was null this time around, no bios need processing
191 * at all and we can stop. Otherwise it'll loop back up again
192 * and do an additional check so no bios are missed.
193 *
194 * device->running_pending is used to synchronize with the
195 * schedule_bio code.
196 */
197 if (device->pending_sync_bios.head == NULL &&
198 device->pending_bios.head == NULL) {
199 again = 0;
200 device->running_pending = 0;
201 } else {
202 again = 1;
203 device->running_pending = 1;
204 }
205
206 pending_bios->head = NULL;
207 pending_bios->tail = NULL;
208
209 spin_unlock(&device->io_lock);
210
211 while (pending) {
212
213 rmb();
214 /* we want to work on both lists, but do more bios on the
215 * sync list than the regular list
216 */
217 if ((num_run > 32 &&
218 pending_bios != &device->pending_sync_bios &&
219 device->pending_sync_bios.head) ||
220 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 device->pending_bios.head)) {
222 spin_lock(&device->io_lock);
223 requeue_list(pending_bios, pending, tail);
224 goto loop_lock;
225 }
226
227 cur = pending;
228 pending = pending->bi_next;
229 cur->bi_next = NULL;
230 atomic_dec(&fs_info->nr_async_bios);
231
232 if (atomic_read(&fs_info->nr_async_bios) < limit &&
233 waitqueue_active(&fs_info->async_submit_wait))
234 wake_up(&fs_info->async_submit_wait);
235
236 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
237
238 /*
239 * if we're doing the sync list, record that our
240 * plug has some sync requests on it
241 *
242 * If we're doing the regular list and there are
243 * sync requests sitting around, unplug before
244 * we add more
245 */
246 if (pending_bios == &device->pending_sync_bios) {
247 sync_pending = 1;
248 } else if (sync_pending) {
249 blk_finish_plug(&plug);
250 blk_start_plug(&plug);
251 sync_pending = 0;
252 }
253
254 btrfsic_submit_bio(cur->bi_rw, cur);
255 num_run++;
256 batch_run++;
257 if (need_resched())
258 cond_resched();
259
260 /*
261 * we made progress, there is more work to do and the bdi
262 * is now congested. Back off and let other work structs
263 * run instead
264 */
265 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
266 fs_info->fs_devices->open_devices > 1) {
267 struct io_context *ioc;
268
269 ioc = current->io_context;
270
271 /*
272 * the main goal here is that we don't want to
273 * block if we're going to be able to submit
274 * more requests without blocking.
275 *
276 * This code does two great things, it pokes into
277 * the elevator code from a filesystem _and_
278 * it makes assumptions about how batching works.
279 */
280 if (ioc && ioc->nr_batch_requests > 0 &&
281 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
282 (last_waited == 0 ||
283 ioc->last_waited == last_waited)) {
284 /*
285 * we want to go through our batch of
286 * requests and stop. So, we copy out
287 * the ioc->last_waited time and test
288 * against it before looping
289 */
290 last_waited = ioc->last_waited;
291 if (need_resched())
292 cond_resched();
293 continue;
294 }
295 spin_lock(&device->io_lock);
296 requeue_list(pending_bios, pending, tail);
297 device->running_pending = 1;
298
299 spin_unlock(&device->io_lock);
300 btrfs_requeue_work(&device->work);
301 goto done;
302 }
303 /* unplug every 64 requests just for good measure */
304 if (batch_run % 64 == 0) {
305 blk_finish_plug(&plug);
306 blk_start_plug(&plug);
307 sync_pending = 0;
308 }
309 }
310
311 cond_resched();
312 if (again)
313 goto loop;
314
315 spin_lock(&device->io_lock);
316 if (device->pending_bios.head || device->pending_sync_bios.head)
317 goto loop_lock;
318 spin_unlock(&device->io_lock);
319
320done:
321 blk_finish_plug(&plug);
322}
323
324static void pending_bios_fn(struct btrfs_work *work)
325{
326 struct btrfs_device *device;
327
328 device = container_of(work, struct btrfs_device, work);
329 run_scheduled_bios(device);
330}
331
332static noinline int device_list_add(const char *path,
333 struct btrfs_super_block *disk_super,
334 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
335{
336 struct btrfs_device *device;
337 struct btrfs_fs_devices *fs_devices;
338 struct rcu_string *name;
339 u64 found_transid = btrfs_super_generation(disk_super);
340
341 fs_devices = find_fsid(disk_super->fsid);
342 if (!fs_devices) {
343 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
344 if (!fs_devices)
345 return -ENOMEM;
346 INIT_LIST_HEAD(&fs_devices->devices);
347 INIT_LIST_HEAD(&fs_devices->alloc_list);
348 list_add(&fs_devices->list, &fs_uuids);
349 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
350 fs_devices->latest_devid = devid;
351 fs_devices->latest_trans = found_transid;
352 mutex_init(&fs_devices->device_list_mutex);
353 device = NULL;
354 } else {
355 device = __find_device(&fs_devices->devices, devid,
356 disk_super->dev_item.uuid);
357 }
358 if (!device) {
359 if (fs_devices->opened)
360 return -EBUSY;
361
362 device = kzalloc(sizeof(*device), GFP_NOFS);
363 if (!device) {
364 /* we can safely leave the fs_devices entry around */
365 return -ENOMEM;
366 }
367 device->devid = devid;
368 device->dev_stats_valid = 0;
369 device->work.func = pending_bios_fn;
370 memcpy(device->uuid, disk_super->dev_item.uuid,
371 BTRFS_UUID_SIZE);
372 spin_lock_init(&device->io_lock);
373
374 name = rcu_string_strdup(path, GFP_NOFS);
375 if (!name) {
376 kfree(device);
377 return -ENOMEM;
378 }
379 rcu_assign_pointer(device->name, name);
380 INIT_LIST_HEAD(&device->dev_alloc_list);
381
382 /* init readahead state */
383 spin_lock_init(&device->reada_lock);
384 device->reada_curr_zone = NULL;
385 atomic_set(&device->reada_in_flight, 0);
386 device->reada_next = 0;
387 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
388 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
389
390 mutex_lock(&fs_devices->device_list_mutex);
391 list_add_rcu(&device->dev_list, &fs_devices->devices);
392 mutex_unlock(&fs_devices->device_list_mutex);
393
394 device->fs_devices = fs_devices;
395 fs_devices->num_devices++;
396 } else if (!device->name || strcmp(device->name->str, path)) {
397 name = rcu_string_strdup(path, GFP_NOFS);
398 if (!name)
399 return -ENOMEM;
400 rcu_string_free(device->name);
401 rcu_assign_pointer(device->name, name);
402 if (device->missing) {
403 fs_devices->missing_devices--;
404 device->missing = 0;
405 }
406 }
407
408 if (found_transid > fs_devices->latest_trans) {
409 fs_devices->latest_devid = devid;
410 fs_devices->latest_trans = found_transid;
411 }
412 *fs_devices_ret = fs_devices;
413 return 0;
414}
415
416static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
417{
418 struct btrfs_fs_devices *fs_devices;
419 struct btrfs_device *device;
420 struct btrfs_device *orig_dev;
421
422 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
423 if (!fs_devices)
424 return ERR_PTR(-ENOMEM);
425
426 INIT_LIST_HEAD(&fs_devices->devices);
427 INIT_LIST_HEAD(&fs_devices->alloc_list);
428 INIT_LIST_HEAD(&fs_devices->list);
429 mutex_init(&fs_devices->device_list_mutex);
430 fs_devices->latest_devid = orig->latest_devid;
431 fs_devices->latest_trans = orig->latest_trans;
432 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
433
434 /* We have held the volume lock, it is safe to get the devices. */
435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
437
438 device = kzalloc(sizeof(*device), GFP_NOFS);
439 if (!device)
440 goto error;
441
442 /*
443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
445 */
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
447 if (!name) {
448 kfree(device);
449 goto error;
450 }
451 rcu_assign_pointer(device->name, name);
452
453 device->devid = orig_dev->devid;
454 device->work.func = pending_bios_fn;
455 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 spin_lock_init(&device->io_lock);
457 INIT_LIST_HEAD(&device->dev_list);
458 INIT_LIST_HEAD(&device->dev_alloc_list);
459
460 list_add(&device->dev_list, &fs_devices->devices);
461 device->fs_devices = fs_devices;
462 fs_devices->num_devices++;
463 }
464 return fs_devices;
465error:
466 free_fs_devices(fs_devices);
467 return ERR_PTR(-ENOMEM);
468}
469
470void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
471{
472 struct btrfs_device *device, *next;
473
474 struct block_device *latest_bdev = NULL;
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
477
478 mutex_lock(&uuid_mutex);
479again:
480 /* This is the initialized path, it is safe to release the devices. */
481 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 if (device->in_fs_metadata) {
483 if (!latest_transid ||
484 device->generation > latest_transid) {
485 latest_devid = device->devid;
486 latest_transid = device->generation;
487 latest_bdev = device->bdev;
488 }
489 continue;
490 }
491
492 if (device->bdev) {
493 blkdev_put(device->bdev, device->mode);
494 device->bdev = NULL;
495 fs_devices->open_devices--;
496 }
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
501 }
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
504 rcu_string_free(device->name);
505 kfree(device);
506 }
507
508 if (fs_devices->seed) {
509 fs_devices = fs_devices->seed;
510 goto again;
511 }
512
513 fs_devices->latest_bdev = latest_bdev;
514 fs_devices->latest_devid = latest_devid;
515 fs_devices->latest_trans = latest_transid;
516
517 mutex_unlock(&uuid_mutex);
518}
519
520static void __free_device(struct work_struct *work)
521{
522 struct btrfs_device *device;
523
524 device = container_of(work, struct btrfs_device, rcu_work);
525
526 if (device->bdev)
527 blkdev_put(device->bdev, device->mode);
528
529 rcu_string_free(device->name);
530 kfree(device);
531}
532
533static void free_device(struct rcu_head *head)
534{
535 struct btrfs_device *device;
536
537 device = container_of(head, struct btrfs_device, rcu);
538
539 INIT_WORK(&device->rcu_work, __free_device);
540 schedule_work(&device->rcu_work);
541}
542
543static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
544{
545 struct btrfs_device *device;
546
547 if (--fs_devices->opened > 0)
548 return 0;
549
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 struct btrfs_device *new_device;
553 struct rcu_string *name;
554
555 if (device->bdev)
556 fs_devices->open_devices--;
557
558 if (device->writeable) {
559 list_del_init(&device->dev_alloc_list);
560 fs_devices->rw_devices--;
561 }
562
563 if (device->can_discard)
564 fs_devices->num_can_discard--;
565
566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 BUG_ON(!new_device); /* -ENOMEM */
568 memcpy(new_device, device, sizeof(*new_device));
569
570 /* Safe because we are under uuid_mutex */
571 name = rcu_string_strdup(device->name->str, GFP_NOFS);
572 BUG_ON(device->name && !name); /* -ENOMEM */
573 rcu_assign_pointer(new_device->name, name);
574 new_device->bdev = NULL;
575 new_device->writeable = 0;
576 new_device->in_fs_metadata = 0;
577 new_device->can_discard = 0;
578 list_replace_rcu(&device->dev_list, &new_device->dev_list);
579
580 call_rcu(&device->rcu, free_device);
581 }
582 mutex_unlock(&fs_devices->device_list_mutex);
583
584 WARN_ON(fs_devices->open_devices);
585 WARN_ON(fs_devices->rw_devices);
586 fs_devices->opened = 0;
587 fs_devices->seeding = 0;
588
589 return 0;
590}
591
592int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
593{
594 struct btrfs_fs_devices *seed_devices = NULL;
595 int ret;
596
597 mutex_lock(&uuid_mutex);
598 ret = __btrfs_close_devices(fs_devices);
599 if (!fs_devices->opened) {
600 seed_devices = fs_devices->seed;
601 fs_devices->seed = NULL;
602 }
603 mutex_unlock(&uuid_mutex);
604
605 while (seed_devices) {
606 fs_devices = seed_devices;
607 seed_devices = fs_devices->seed;
608 __btrfs_close_devices(fs_devices);
609 free_fs_devices(fs_devices);
610 }
611 return ret;
612}
613
614static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
615 fmode_t flags, void *holder)
616{
617 struct request_queue *q;
618 struct block_device *bdev;
619 struct list_head *head = &fs_devices->devices;
620 struct btrfs_device *device;
621 struct block_device *latest_bdev = NULL;
622 struct buffer_head *bh;
623 struct btrfs_super_block *disk_super;
624 u64 latest_devid = 0;
625 u64 latest_transid = 0;
626 u64 devid;
627 int seeding = 1;
628 int ret = 0;
629
630 flags |= FMODE_EXCL;
631
632 list_for_each_entry(device, head, dev_list) {
633 if (device->bdev)
634 continue;
635 if (!device->name)
636 continue;
637
638 bdev = blkdev_get_by_path(device->name->str, flags, holder);
639 if (IS_ERR(bdev)) {
640 printk(KERN_INFO "open %s failed\n", device->name->str);
641 goto error;
642 }
643 filemap_write_and_wait(bdev->bd_inode->i_mapping);
644 invalidate_bdev(bdev);
645 set_blocksize(bdev, 4096);
646
647 bh = btrfs_read_dev_super(bdev);
648 if (!bh)
649 goto error_close;
650
651 disk_super = (struct btrfs_super_block *)bh->b_data;
652 devid = btrfs_stack_device_id(&disk_super->dev_item);
653 if (devid != device->devid)
654 goto error_brelse;
655
656 if (memcmp(device->uuid, disk_super->dev_item.uuid,
657 BTRFS_UUID_SIZE))
658 goto error_brelse;
659
660 device->generation = btrfs_super_generation(disk_super);
661 if (!latest_transid || device->generation > latest_transid) {
662 latest_devid = devid;
663 latest_transid = device->generation;
664 latest_bdev = bdev;
665 }
666
667 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
668 device->writeable = 0;
669 } else {
670 device->writeable = !bdev_read_only(bdev);
671 seeding = 0;
672 }
673
674 q = bdev_get_queue(bdev);
675 if (blk_queue_discard(q)) {
676 device->can_discard = 1;
677 fs_devices->num_can_discard++;
678 }
679
680 device->bdev = bdev;
681 device->in_fs_metadata = 0;
682 device->mode = flags;
683
684 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
685 fs_devices->rotating = 1;
686
687 fs_devices->open_devices++;
688 if (device->writeable) {
689 fs_devices->rw_devices++;
690 list_add(&device->dev_alloc_list,
691 &fs_devices->alloc_list);
692 }
693 brelse(bh);
694 continue;
695
696error_brelse:
697 brelse(bh);
698error_close:
699 blkdev_put(bdev, flags);
700error:
701 continue;
702 }
703 if (fs_devices->open_devices == 0) {
704 ret = -EINVAL;
705 goto out;
706 }
707 fs_devices->seeding = seeding;
708 fs_devices->opened = 1;
709 fs_devices->latest_bdev = latest_bdev;
710 fs_devices->latest_devid = latest_devid;
711 fs_devices->latest_trans = latest_transid;
712 fs_devices->total_rw_bytes = 0;
713out:
714 return ret;
715}
716
717int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
718 fmode_t flags, void *holder)
719{
720 int ret;
721
722 mutex_lock(&uuid_mutex);
723 if (fs_devices->opened) {
724 fs_devices->opened++;
725 ret = 0;
726 } else {
727 ret = __btrfs_open_devices(fs_devices, flags, holder);
728 }
729 mutex_unlock(&uuid_mutex);
730 return ret;
731}
732
733int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
734 struct btrfs_fs_devices **fs_devices_ret)
735{
736 struct btrfs_super_block *disk_super;
737 struct block_device *bdev;
738 struct buffer_head *bh;
739 int ret;
740 u64 devid;
741 u64 transid;
742
743 flags |= FMODE_EXCL;
744 bdev = blkdev_get_by_path(path, flags, holder);
745
746 if (IS_ERR(bdev)) {
747 ret = PTR_ERR(bdev);
748 goto error;
749 }
750
751 mutex_lock(&uuid_mutex);
752 ret = set_blocksize(bdev, 4096);
753 if (ret)
754 goto error_close;
755 bh = btrfs_read_dev_super(bdev);
756 if (!bh) {
757 ret = -EINVAL;
758 goto error_close;
759 }
760 disk_super = (struct btrfs_super_block *)bh->b_data;
761 devid = btrfs_stack_device_id(&disk_super->dev_item);
762 transid = btrfs_super_generation(disk_super);
763 if (disk_super->label[0])
764 printk(KERN_INFO "device label %s ", disk_super->label);
765 else
766 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
767 printk(KERN_CONT "devid %llu transid %llu %s\n",
768 (unsigned long long)devid, (unsigned long long)transid, path);
769 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
770
771 brelse(bh);
772error_close:
773 mutex_unlock(&uuid_mutex);
774 blkdev_put(bdev, flags);
775error:
776 return ret;
777}
778
779/* helper to account the used device space in the range */
780int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
781 u64 end, u64 *length)
782{
783 struct btrfs_key key;
784 struct btrfs_root *root = device->dev_root;
785 struct btrfs_dev_extent *dev_extent;
786 struct btrfs_path *path;
787 u64 extent_end;
788 int ret;
789 int slot;
790 struct extent_buffer *l;
791
792 *length = 0;
793
794 if (start >= device->total_bytes)
795 return 0;
796
797 path = btrfs_alloc_path();
798 if (!path)
799 return -ENOMEM;
800 path->reada = 2;
801
802 key.objectid = device->devid;
803 key.offset = start;
804 key.type = BTRFS_DEV_EXTENT_KEY;
805
806 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
807 if (ret < 0)
808 goto out;
809 if (ret > 0) {
810 ret = btrfs_previous_item(root, path, key.objectid, key.type);
811 if (ret < 0)
812 goto out;
813 }
814
815 while (1) {
816 l = path->nodes[0];
817 slot = path->slots[0];
818 if (slot >= btrfs_header_nritems(l)) {
819 ret = btrfs_next_leaf(root, path);
820 if (ret == 0)
821 continue;
822 if (ret < 0)
823 goto out;
824
825 break;
826 }
827 btrfs_item_key_to_cpu(l, &key, slot);
828
829 if (key.objectid < device->devid)
830 goto next;
831
832 if (key.objectid > device->devid)
833 break;
834
835 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
836 goto next;
837
838 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
839 extent_end = key.offset + btrfs_dev_extent_length(l,
840 dev_extent);
841 if (key.offset <= start && extent_end > end) {
842 *length = end - start + 1;
843 break;
844 } else if (key.offset <= start && extent_end > start)
845 *length += extent_end - start;
846 else if (key.offset > start && extent_end <= end)
847 *length += extent_end - key.offset;
848 else if (key.offset > start && key.offset <= end) {
849 *length += end - key.offset + 1;
850 break;
851 } else if (key.offset > end)
852 break;
853
854next:
855 path->slots[0]++;
856 }
857 ret = 0;
858out:
859 btrfs_free_path(path);
860 return ret;
861}
862
863/*
864 * find_free_dev_extent - find free space in the specified device
865 * @device: the device which we search the free space in
866 * @num_bytes: the size of the free space that we need
867 * @start: store the start of the free space.
868 * @len: the size of the free space. that we find, or the size of the max
869 * free space if we don't find suitable free space
870 *
871 * this uses a pretty simple search, the expectation is that it is
872 * called very infrequently and that a given device has a small number
873 * of extents
874 *
875 * @start is used to store the start of the free space if we find. But if we
876 * don't find suitable free space, it will be used to store the start position
877 * of the max free space.
878 *
879 * @len is used to store the size of the free space that we find.
880 * But if we don't find suitable free space, it is used to store the size of
881 * the max free space.
882 */
883int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
884 u64 *start, u64 *len)
885{
886 struct btrfs_key key;
887 struct btrfs_root *root = device->dev_root;
888 struct btrfs_dev_extent *dev_extent;
889 struct btrfs_path *path;
890 u64 hole_size;
891 u64 max_hole_start;
892 u64 max_hole_size;
893 u64 extent_end;
894 u64 search_start;
895 u64 search_end = device->total_bytes;
896 int ret;
897 int slot;
898 struct extent_buffer *l;
899
900 /* FIXME use last free of some kind */
901
902 /* we don't want to overwrite the superblock on the drive,
903 * so we make sure to start at an offset of at least 1MB
904 */
905 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
906
907 max_hole_start = search_start;
908 max_hole_size = 0;
909 hole_size = 0;
910
911 if (search_start >= search_end) {
912 ret = -ENOSPC;
913 goto error;
914 }
915
916 path = btrfs_alloc_path();
917 if (!path) {
918 ret = -ENOMEM;
919 goto error;
920 }
921 path->reada = 2;
922
923 key.objectid = device->devid;
924 key.offset = search_start;
925 key.type = BTRFS_DEV_EXTENT_KEY;
926
927 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
928 if (ret < 0)
929 goto out;
930 if (ret > 0) {
931 ret = btrfs_previous_item(root, path, key.objectid, key.type);
932 if (ret < 0)
933 goto out;
934 }
935
936 while (1) {
937 l = path->nodes[0];
938 slot = path->slots[0];
939 if (slot >= btrfs_header_nritems(l)) {
940 ret = btrfs_next_leaf(root, path);
941 if (ret == 0)
942 continue;
943 if (ret < 0)
944 goto out;
945
946 break;
947 }
948 btrfs_item_key_to_cpu(l, &key, slot);
949
950 if (key.objectid < device->devid)
951 goto next;
952
953 if (key.objectid > device->devid)
954 break;
955
956 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
957 goto next;
958
959 if (key.offset > search_start) {
960 hole_size = key.offset - search_start;
961
962 if (hole_size > max_hole_size) {
963 max_hole_start = search_start;
964 max_hole_size = hole_size;
965 }
966
967 /*
968 * If this free space is greater than which we need,
969 * it must be the max free space that we have found
970 * until now, so max_hole_start must point to the start
971 * of this free space and the length of this free space
972 * is stored in max_hole_size. Thus, we return
973 * max_hole_start and max_hole_size and go back to the
974 * caller.
975 */
976 if (hole_size >= num_bytes) {
977 ret = 0;
978 goto out;
979 }
980 }
981
982 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
983 extent_end = key.offset + btrfs_dev_extent_length(l,
984 dev_extent);
985 if (extent_end > search_start)
986 search_start = extent_end;
987next:
988 path->slots[0]++;
989 cond_resched();
990 }
991
992 /*
993 * At this point, search_start should be the end of
994 * allocated dev extents, and when shrinking the device,
995 * search_end may be smaller than search_start.
996 */
997 if (search_end > search_start)
998 hole_size = search_end - search_start;
999
1000 if (hole_size > max_hole_size) {
1001 max_hole_start = search_start;
1002 max_hole_size = hole_size;
1003 }
1004
1005 /* See above. */
1006 if (hole_size < num_bytes)
1007 ret = -ENOSPC;
1008 else
1009 ret = 0;
1010
1011out:
1012 btrfs_free_path(path);
1013error:
1014 *start = max_hole_start;
1015 if (len)
1016 *len = max_hole_size;
1017 return ret;
1018}
1019
1020static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1021 struct btrfs_device *device,
1022 u64 start)
1023{
1024 int ret;
1025 struct btrfs_path *path;
1026 struct btrfs_root *root = device->dev_root;
1027 struct btrfs_key key;
1028 struct btrfs_key found_key;
1029 struct extent_buffer *leaf = NULL;
1030 struct btrfs_dev_extent *extent = NULL;
1031
1032 path = btrfs_alloc_path();
1033 if (!path)
1034 return -ENOMEM;
1035
1036 key.objectid = device->devid;
1037 key.offset = start;
1038 key.type = BTRFS_DEV_EXTENT_KEY;
1039again:
1040 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1041 if (ret > 0) {
1042 ret = btrfs_previous_item(root, path, key.objectid,
1043 BTRFS_DEV_EXTENT_KEY);
1044 if (ret)
1045 goto out;
1046 leaf = path->nodes[0];
1047 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1048 extent = btrfs_item_ptr(leaf, path->slots[0],
1049 struct btrfs_dev_extent);
1050 BUG_ON(found_key.offset > start || found_key.offset +
1051 btrfs_dev_extent_length(leaf, extent) < start);
1052 key = found_key;
1053 btrfs_release_path(path);
1054 goto again;
1055 } else if (ret == 0) {
1056 leaf = path->nodes[0];
1057 extent = btrfs_item_ptr(leaf, path->slots[0],
1058 struct btrfs_dev_extent);
1059 } else {
1060 btrfs_error(root->fs_info, ret, "Slot search failed");
1061 goto out;
1062 }
1063
1064 if (device->bytes_used > 0) {
1065 u64 len = btrfs_dev_extent_length(leaf, extent);
1066 device->bytes_used -= len;
1067 spin_lock(&root->fs_info->free_chunk_lock);
1068 root->fs_info->free_chunk_space += len;
1069 spin_unlock(&root->fs_info->free_chunk_lock);
1070 }
1071 ret = btrfs_del_item(trans, root, path);
1072 if (ret) {
1073 btrfs_error(root->fs_info, ret,
1074 "Failed to remove dev extent item");
1075 }
1076out:
1077 btrfs_free_path(path);
1078 return ret;
1079}
1080
1081int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1082 struct btrfs_device *device,
1083 u64 chunk_tree, u64 chunk_objectid,
1084 u64 chunk_offset, u64 start, u64 num_bytes)
1085{
1086 int ret;
1087 struct btrfs_path *path;
1088 struct btrfs_root *root = device->dev_root;
1089 struct btrfs_dev_extent *extent;
1090 struct extent_buffer *leaf;
1091 struct btrfs_key key;
1092
1093 WARN_ON(!device->in_fs_metadata);
1094 path = btrfs_alloc_path();
1095 if (!path)
1096 return -ENOMEM;
1097
1098 key.objectid = device->devid;
1099 key.offset = start;
1100 key.type = BTRFS_DEV_EXTENT_KEY;
1101 ret = btrfs_insert_empty_item(trans, root, path, &key,
1102 sizeof(*extent));
1103 if (ret)
1104 goto out;
1105
1106 leaf = path->nodes[0];
1107 extent = btrfs_item_ptr(leaf, path->slots[0],
1108 struct btrfs_dev_extent);
1109 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1110 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1111 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1112
1113 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1114 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1115 BTRFS_UUID_SIZE);
1116
1117 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1118 btrfs_mark_buffer_dirty(leaf);
1119out:
1120 btrfs_free_path(path);
1121 return ret;
1122}
1123
1124static noinline int find_next_chunk(struct btrfs_root *root,
1125 u64 objectid, u64 *offset)
1126{
1127 struct btrfs_path *path;
1128 int ret;
1129 struct btrfs_key key;
1130 struct btrfs_chunk *chunk;
1131 struct btrfs_key found_key;
1132
1133 path = btrfs_alloc_path();
1134 if (!path)
1135 return -ENOMEM;
1136
1137 key.objectid = objectid;
1138 key.offset = (u64)-1;
1139 key.type = BTRFS_CHUNK_ITEM_KEY;
1140
1141 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1142 if (ret < 0)
1143 goto error;
1144
1145 BUG_ON(ret == 0); /* Corruption */
1146
1147 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1148 if (ret) {
1149 *offset = 0;
1150 } else {
1151 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1152 path->slots[0]);
1153 if (found_key.objectid != objectid)
1154 *offset = 0;
1155 else {
1156 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1157 struct btrfs_chunk);
1158 *offset = found_key.offset +
1159 btrfs_chunk_length(path->nodes[0], chunk);
1160 }
1161 }
1162 ret = 0;
1163error:
1164 btrfs_free_path(path);
1165 return ret;
1166}
1167
1168static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1169{
1170 int ret;
1171 struct btrfs_key key;
1172 struct btrfs_key found_key;
1173 struct btrfs_path *path;
1174
1175 root = root->fs_info->chunk_root;
1176
1177 path = btrfs_alloc_path();
1178 if (!path)
1179 return -ENOMEM;
1180
1181 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1182 key.type = BTRFS_DEV_ITEM_KEY;
1183 key.offset = (u64)-1;
1184
1185 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1186 if (ret < 0)
1187 goto error;
1188
1189 BUG_ON(ret == 0); /* Corruption */
1190
1191 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1192 BTRFS_DEV_ITEM_KEY);
1193 if (ret) {
1194 *objectid = 1;
1195 } else {
1196 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1197 path->slots[0]);
1198 *objectid = found_key.offset + 1;
1199 }
1200 ret = 0;
1201error:
1202 btrfs_free_path(path);
1203 return ret;
1204}
1205
1206/*
1207 * the device information is stored in the chunk root
1208 * the btrfs_device struct should be fully filled in
1209 */
1210int btrfs_add_device(struct btrfs_trans_handle *trans,
1211 struct btrfs_root *root,
1212 struct btrfs_device *device)
1213{
1214 int ret;
1215 struct btrfs_path *path;
1216 struct btrfs_dev_item *dev_item;
1217 struct extent_buffer *leaf;
1218 struct btrfs_key key;
1219 unsigned long ptr;
1220
1221 root = root->fs_info->chunk_root;
1222
1223 path = btrfs_alloc_path();
1224 if (!path)
1225 return -ENOMEM;
1226
1227 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1228 key.type = BTRFS_DEV_ITEM_KEY;
1229 key.offset = device->devid;
1230
1231 ret = btrfs_insert_empty_item(trans, root, path, &key,
1232 sizeof(*dev_item));
1233 if (ret)
1234 goto out;
1235
1236 leaf = path->nodes[0];
1237 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1238
1239 btrfs_set_device_id(leaf, dev_item, device->devid);
1240 btrfs_set_device_generation(leaf, dev_item, 0);
1241 btrfs_set_device_type(leaf, dev_item, device->type);
1242 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1243 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1244 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1245 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1246 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1247 btrfs_set_device_group(leaf, dev_item, 0);
1248 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1249 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1250 btrfs_set_device_start_offset(leaf, dev_item, 0);
1251
1252 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1253 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1254 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1255 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1256 btrfs_mark_buffer_dirty(leaf);
1257
1258 ret = 0;
1259out:
1260 btrfs_free_path(path);
1261 return ret;
1262}
1263
1264static int btrfs_rm_dev_item(struct btrfs_root *root,
1265 struct btrfs_device *device)
1266{
1267 int ret;
1268 struct btrfs_path *path;
1269 struct btrfs_key key;
1270 struct btrfs_trans_handle *trans;
1271
1272 root = root->fs_info->chunk_root;
1273
1274 path = btrfs_alloc_path();
1275 if (!path)
1276 return -ENOMEM;
1277
1278 trans = btrfs_start_transaction(root, 0);
1279 if (IS_ERR(trans)) {
1280 btrfs_free_path(path);
1281 return PTR_ERR(trans);
1282 }
1283 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 key.type = BTRFS_DEV_ITEM_KEY;
1285 key.offset = device->devid;
1286 lock_chunks(root);
1287
1288 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1289 if (ret < 0)
1290 goto out;
1291
1292 if (ret > 0) {
1293 ret = -ENOENT;
1294 goto out;
1295 }
1296
1297 ret = btrfs_del_item(trans, root, path);
1298 if (ret)
1299 goto out;
1300out:
1301 btrfs_free_path(path);
1302 unlock_chunks(root);
1303 btrfs_commit_transaction(trans, root);
1304 return ret;
1305}
1306
1307int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1308{
1309 struct btrfs_device *device;
1310 struct btrfs_device *next_device;
1311 struct block_device *bdev;
1312 struct buffer_head *bh = NULL;
1313 struct btrfs_super_block *disk_super;
1314 struct btrfs_fs_devices *cur_devices;
1315 u64 all_avail;
1316 u64 devid;
1317 u64 num_devices;
1318 u8 *dev_uuid;
1319 int ret = 0;
1320 bool clear_super = false;
1321
1322 mutex_lock(&uuid_mutex);
1323
1324 all_avail = root->fs_info->avail_data_alloc_bits |
1325 root->fs_info->avail_system_alloc_bits |
1326 root->fs_info->avail_metadata_alloc_bits;
1327
1328 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1329 root->fs_info->fs_devices->num_devices <= 4) {
1330 printk(KERN_ERR "btrfs: unable to go below four devices "
1331 "on raid10\n");
1332 ret = -EINVAL;
1333 goto out;
1334 }
1335
1336 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1337 root->fs_info->fs_devices->num_devices <= 2) {
1338 printk(KERN_ERR "btrfs: unable to go below two "
1339 "devices on raid1\n");
1340 ret = -EINVAL;
1341 goto out;
1342 }
1343
1344 if (strcmp(device_path, "missing") == 0) {
1345 struct list_head *devices;
1346 struct btrfs_device *tmp;
1347
1348 device = NULL;
1349 devices = &root->fs_info->fs_devices->devices;
1350 /*
1351 * It is safe to read the devices since the volume_mutex
1352 * is held.
1353 */
1354 list_for_each_entry(tmp, devices, dev_list) {
1355 if (tmp->in_fs_metadata && !tmp->bdev) {
1356 device = tmp;
1357 break;
1358 }
1359 }
1360 bdev = NULL;
1361 bh = NULL;
1362 disk_super = NULL;
1363 if (!device) {
1364 printk(KERN_ERR "btrfs: no missing devices found to "
1365 "remove\n");
1366 goto out;
1367 }
1368 } else {
1369 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1370 root->fs_info->bdev_holder);
1371 if (IS_ERR(bdev)) {
1372 ret = PTR_ERR(bdev);
1373 goto out;
1374 }
1375
1376 set_blocksize(bdev, 4096);
1377 invalidate_bdev(bdev);
1378 bh = btrfs_read_dev_super(bdev);
1379 if (!bh) {
1380 ret = -EINVAL;
1381 goto error_close;
1382 }
1383 disk_super = (struct btrfs_super_block *)bh->b_data;
1384 devid = btrfs_stack_device_id(&disk_super->dev_item);
1385 dev_uuid = disk_super->dev_item.uuid;
1386 device = btrfs_find_device(root, devid, dev_uuid,
1387 disk_super->fsid);
1388 if (!device) {
1389 ret = -ENOENT;
1390 goto error_brelse;
1391 }
1392 }
1393
1394 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1395 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1396 "device\n");
1397 ret = -EINVAL;
1398 goto error_brelse;
1399 }
1400
1401 if (device->writeable) {
1402 lock_chunks(root);
1403 list_del_init(&device->dev_alloc_list);
1404 unlock_chunks(root);
1405 root->fs_info->fs_devices->rw_devices--;
1406 clear_super = true;
1407 }
1408
1409 ret = btrfs_shrink_device(device, 0);
1410 if (ret)
1411 goto error_undo;
1412
1413 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1414 if (ret)
1415 goto error_undo;
1416
1417 spin_lock(&root->fs_info->free_chunk_lock);
1418 root->fs_info->free_chunk_space = device->total_bytes -
1419 device->bytes_used;
1420 spin_unlock(&root->fs_info->free_chunk_lock);
1421
1422 device->in_fs_metadata = 0;
1423 btrfs_scrub_cancel_dev(root, device);
1424
1425 /*
1426 * the device list mutex makes sure that we don't change
1427 * the device list while someone else is writing out all
1428 * the device supers.
1429 */
1430
1431 cur_devices = device->fs_devices;
1432 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1433 list_del_rcu(&device->dev_list);
1434
1435 device->fs_devices->num_devices--;
1436
1437 if (device->missing)
1438 root->fs_info->fs_devices->missing_devices--;
1439
1440 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1441 struct btrfs_device, dev_list);
1442 if (device->bdev == root->fs_info->sb->s_bdev)
1443 root->fs_info->sb->s_bdev = next_device->bdev;
1444 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1445 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1446
1447 if (device->bdev)
1448 device->fs_devices->open_devices--;
1449
1450 call_rcu(&device->rcu, free_device);
1451 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1452
1453 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1454 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1455
1456 if (cur_devices->open_devices == 0) {
1457 struct btrfs_fs_devices *fs_devices;
1458 fs_devices = root->fs_info->fs_devices;
1459 while (fs_devices) {
1460 if (fs_devices->seed == cur_devices)
1461 break;
1462 fs_devices = fs_devices->seed;
1463 }
1464 fs_devices->seed = cur_devices->seed;
1465 cur_devices->seed = NULL;
1466 lock_chunks(root);
1467 __btrfs_close_devices(cur_devices);
1468 unlock_chunks(root);
1469 free_fs_devices(cur_devices);
1470 }
1471
1472 /*
1473 * at this point, the device is zero sized. We want to
1474 * remove it from the devices list and zero out the old super
1475 */
1476 if (clear_super) {
1477 /* make sure this device isn't detected as part of
1478 * the FS anymore
1479 */
1480 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1481 set_buffer_dirty(bh);
1482 sync_dirty_buffer(bh);
1483 }
1484
1485 ret = 0;
1486
1487error_brelse:
1488 brelse(bh);
1489error_close:
1490 if (bdev)
1491 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1492out:
1493 mutex_unlock(&uuid_mutex);
1494 return ret;
1495error_undo:
1496 if (device->writeable) {
1497 lock_chunks(root);
1498 list_add(&device->dev_alloc_list,
1499 &root->fs_info->fs_devices->alloc_list);
1500 unlock_chunks(root);
1501 root->fs_info->fs_devices->rw_devices++;
1502 }
1503 goto error_brelse;
1504}
1505
1506/*
1507 * does all the dirty work required for changing file system's UUID.
1508 */
1509static int btrfs_prepare_sprout(struct btrfs_root *root)
1510{
1511 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1512 struct btrfs_fs_devices *old_devices;
1513 struct btrfs_fs_devices *seed_devices;
1514 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1515 struct btrfs_device *device;
1516 u64 super_flags;
1517
1518 BUG_ON(!mutex_is_locked(&uuid_mutex));
1519 if (!fs_devices->seeding)
1520 return -EINVAL;
1521
1522 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1523 if (!seed_devices)
1524 return -ENOMEM;
1525
1526 old_devices = clone_fs_devices(fs_devices);
1527 if (IS_ERR(old_devices)) {
1528 kfree(seed_devices);
1529 return PTR_ERR(old_devices);
1530 }
1531
1532 list_add(&old_devices->list, &fs_uuids);
1533
1534 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1535 seed_devices->opened = 1;
1536 INIT_LIST_HEAD(&seed_devices->devices);
1537 INIT_LIST_HEAD(&seed_devices->alloc_list);
1538 mutex_init(&seed_devices->device_list_mutex);
1539
1540 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1541 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1542 synchronize_rcu);
1543 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1544
1545 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1546 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1547 device->fs_devices = seed_devices;
1548 }
1549
1550 fs_devices->seeding = 0;
1551 fs_devices->num_devices = 0;
1552 fs_devices->open_devices = 0;
1553 fs_devices->seed = seed_devices;
1554
1555 generate_random_uuid(fs_devices->fsid);
1556 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1557 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1558 super_flags = btrfs_super_flags(disk_super) &
1559 ~BTRFS_SUPER_FLAG_SEEDING;
1560 btrfs_set_super_flags(disk_super, super_flags);
1561
1562 return 0;
1563}
1564
1565/*
1566 * strore the expected generation for seed devices in device items.
1567 */
1568static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1569 struct btrfs_root *root)
1570{
1571 struct btrfs_path *path;
1572 struct extent_buffer *leaf;
1573 struct btrfs_dev_item *dev_item;
1574 struct btrfs_device *device;
1575 struct btrfs_key key;
1576 u8 fs_uuid[BTRFS_UUID_SIZE];
1577 u8 dev_uuid[BTRFS_UUID_SIZE];
1578 u64 devid;
1579 int ret;
1580
1581 path = btrfs_alloc_path();
1582 if (!path)
1583 return -ENOMEM;
1584
1585 root = root->fs_info->chunk_root;
1586 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1587 key.offset = 0;
1588 key.type = BTRFS_DEV_ITEM_KEY;
1589
1590 while (1) {
1591 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1592 if (ret < 0)
1593 goto error;
1594
1595 leaf = path->nodes[0];
1596next_slot:
1597 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1598 ret = btrfs_next_leaf(root, path);
1599 if (ret > 0)
1600 break;
1601 if (ret < 0)
1602 goto error;
1603 leaf = path->nodes[0];
1604 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1605 btrfs_release_path(path);
1606 continue;
1607 }
1608
1609 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1610 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1611 key.type != BTRFS_DEV_ITEM_KEY)
1612 break;
1613
1614 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1615 struct btrfs_dev_item);
1616 devid = btrfs_device_id(leaf, dev_item);
1617 read_extent_buffer(leaf, dev_uuid,
1618 (unsigned long)btrfs_device_uuid(dev_item),
1619 BTRFS_UUID_SIZE);
1620 read_extent_buffer(leaf, fs_uuid,
1621 (unsigned long)btrfs_device_fsid(dev_item),
1622 BTRFS_UUID_SIZE);
1623 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1624 BUG_ON(!device); /* Logic error */
1625
1626 if (device->fs_devices->seeding) {
1627 btrfs_set_device_generation(leaf, dev_item,
1628 device->generation);
1629 btrfs_mark_buffer_dirty(leaf);
1630 }
1631
1632 path->slots[0]++;
1633 goto next_slot;
1634 }
1635 ret = 0;
1636error:
1637 btrfs_free_path(path);
1638 return ret;
1639}
1640
1641int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1642{
1643 struct request_queue *q;
1644 struct btrfs_trans_handle *trans;
1645 struct btrfs_device *device;
1646 struct block_device *bdev;
1647 struct list_head *devices;
1648 struct super_block *sb = root->fs_info->sb;
1649 struct rcu_string *name;
1650 u64 total_bytes;
1651 int seeding_dev = 0;
1652 int ret = 0;
1653
1654 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1655 return -EROFS;
1656
1657 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1658 root->fs_info->bdev_holder);
1659 if (IS_ERR(bdev))
1660 return PTR_ERR(bdev);
1661
1662 if (root->fs_info->fs_devices->seeding) {
1663 seeding_dev = 1;
1664 down_write(&sb->s_umount);
1665 mutex_lock(&uuid_mutex);
1666 }
1667
1668 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1669
1670 devices = &root->fs_info->fs_devices->devices;
1671 /*
1672 * we have the volume lock, so we don't need the extra
1673 * device list mutex while reading the list here.
1674 */
1675 list_for_each_entry(device, devices, dev_list) {
1676 if (device->bdev == bdev) {
1677 ret = -EEXIST;
1678 goto error;
1679 }
1680 }
1681
1682 device = kzalloc(sizeof(*device), GFP_NOFS);
1683 if (!device) {
1684 /* we can safely leave the fs_devices entry around */
1685 ret = -ENOMEM;
1686 goto error;
1687 }
1688
1689 name = rcu_string_strdup(device_path, GFP_NOFS);
1690 if (!name) {
1691 kfree(device);
1692 ret = -ENOMEM;
1693 goto error;
1694 }
1695 rcu_assign_pointer(device->name, name);
1696
1697 ret = find_next_devid(root, &device->devid);
1698 if (ret) {
1699 rcu_string_free(device->name);
1700 kfree(device);
1701 goto error;
1702 }
1703
1704 trans = btrfs_start_transaction(root, 0);
1705 if (IS_ERR(trans)) {
1706 rcu_string_free(device->name);
1707 kfree(device);
1708 ret = PTR_ERR(trans);
1709 goto error;
1710 }
1711
1712 lock_chunks(root);
1713
1714 q = bdev_get_queue(bdev);
1715 if (blk_queue_discard(q))
1716 device->can_discard = 1;
1717 device->writeable = 1;
1718 device->work.func = pending_bios_fn;
1719 generate_random_uuid(device->uuid);
1720 spin_lock_init(&device->io_lock);
1721 device->generation = trans->transid;
1722 device->io_width = root->sectorsize;
1723 device->io_align = root->sectorsize;
1724 device->sector_size = root->sectorsize;
1725 device->total_bytes = i_size_read(bdev->bd_inode);
1726 device->disk_total_bytes = device->total_bytes;
1727 device->dev_root = root->fs_info->dev_root;
1728 device->bdev = bdev;
1729 device->in_fs_metadata = 1;
1730 device->mode = FMODE_EXCL;
1731 set_blocksize(device->bdev, 4096);
1732
1733 if (seeding_dev) {
1734 sb->s_flags &= ~MS_RDONLY;
1735 ret = btrfs_prepare_sprout(root);
1736 BUG_ON(ret); /* -ENOMEM */
1737 }
1738
1739 device->fs_devices = root->fs_info->fs_devices;
1740
1741 /*
1742 * we don't want write_supers to jump in here with our device
1743 * half setup
1744 */
1745 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1746 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1747 list_add(&device->dev_alloc_list,
1748 &root->fs_info->fs_devices->alloc_list);
1749 root->fs_info->fs_devices->num_devices++;
1750 root->fs_info->fs_devices->open_devices++;
1751 root->fs_info->fs_devices->rw_devices++;
1752 if (device->can_discard)
1753 root->fs_info->fs_devices->num_can_discard++;
1754 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1755
1756 spin_lock(&root->fs_info->free_chunk_lock);
1757 root->fs_info->free_chunk_space += device->total_bytes;
1758 spin_unlock(&root->fs_info->free_chunk_lock);
1759
1760 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1761 root->fs_info->fs_devices->rotating = 1;
1762
1763 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1764 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1765 total_bytes + device->total_bytes);
1766
1767 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1768 btrfs_set_super_num_devices(root->fs_info->super_copy,
1769 total_bytes + 1);
1770 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1771
1772 if (seeding_dev) {
1773 ret = init_first_rw_device(trans, root, device);
1774 if (ret)
1775 goto error_trans;
1776 ret = btrfs_finish_sprout(trans, root);
1777 if (ret)
1778 goto error_trans;
1779 } else {
1780 ret = btrfs_add_device(trans, root, device);
1781 if (ret)
1782 goto error_trans;
1783 }
1784
1785 /*
1786 * we've got more storage, clear any full flags on the space
1787 * infos
1788 */
1789 btrfs_clear_space_info_full(root->fs_info);
1790
1791 unlock_chunks(root);
1792 ret = btrfs_commit_transaction(trans, root);
1793
1794 if (seeding_dev) {
1795 mutex_unlock(&uuid_mutex);
1796 up_write(&sb->s_umount);
1797
1798 if (ret) /* transaction commit */
1799 return ret;
1800
1801 ret = btrfs_relocate_sys_chunks(root);
1802 if (ret < 0)
1803 btrfs_error(root->fs_info, ret,
1804 "Failed to relocate sys chunks after "
1805 "device initialization. This can be fixed "
1806 "using the \"btrfs balance\" command.");
1807 }
1808
1809 return ret;
1810
1811error_trans:
1812 unlock_chunks(root);
1813 btrfs_abort_transaction(trans, root, ret);
1814 btrfs_end_transaction(trans, root);
1815 rcu_string_free(device->name);
1816 kfree(device);
1817error:
1818 blkdev_put(bdev, FMODE_EXCL);
1819 if (seeding_dev) {
1820 mutex_unlock(&uuid_mutex);
1821 up_write(&sb->s_umount);
1822 }
1823 return ret;
1824}
1825
1826static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1827 struct btrfs_device *device)
1828{
1829 int ret;
1830 struct btrfs_path *path;
1831 struct btrfs_root *root;
1832 struct btrfs_dev_item *dev_item;
1833 struct extent_buffer *leaf;
1834 struct btrfs_key key;
1835
1836 root = device->dev_root->fs_info->chunk_root;
1837
1838 path = btrfs_alloc_path();
1839 if (!path)
1840 return -ENOMEM;
1841
1842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1843 key.type = BTRFS_DEV_ITEM_KEY;
1844 key.offset = device->devid;
1845
1846 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1847 if (ret < 0)
1848 goto out;
1849
1850 if (ret > 0) {
1851 ret = -ENOENT;
1852 goto out;
1853 }
1854
1855 leaf = path->nodes[0];
1856 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1857
1858 btrfs_set_device_id(leaf, dev_item, device->devid);
1859 btrfs_set_device_type(leaf, dev_item, device->type);
1860 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1861 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1862 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1863 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1864 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1865 btrfs_mark_buffer_dirty(leaf);
1866
1867out:
1868 btrfs_free_path(path);
1869 return ret;
1870}
1871
1872static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1873 struct btrfs_device *device, u64 new_size)
1874{
1875 struct btrfs_super_block *super_copy =
1876 device->dev_root->fs_info->super_copy;
1877 u64 old_total = btrfs_super_total_bytes(super_copy);
1878 u64 diff = new_size - device->total_bytes;
1879
1880 if (!device->writeable)
1881 return -EACCES;
1882 if (new_size <= device->total_bytes)
1883 return -EINVAL;
1884
1885 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1886 device->fs_devices->total_rw_bytes += diff;
1887
1888 device->total_bytes = new_size;
1889 device->disk_total_bytes = new_size;
1890 btrfs_clear_space_info_full(device->dev_root->fs_info);
1891
1892 return btrfs_update_device(trans, device);
1893}
1894
1895int btrfs_grow_device(struct btrfs_trans_handle *trans,
1896 struct btrfs_device *device, u64 new_size)
1897{
1898 int ret;
1899 lock_chunks(device->dev_root);
1900 ret = __btrfs_grow_device(trans, device, new_size);
1901 unlock_chunks(device->dev_root);
1902 return ret;
1903}
1904
1905static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1906 struct btrfs_root *root,
1907 u64 chunk_tree, u64 chunk_objectid,
1908 u64 chunk_offset)
1909{
1910 int ret;
1911 struct btrfs_path *path;
1912 struct btrfs_key key;
1913
1914 root = root->fs_info->chunk_root;
1915 path = btrfs_alloc_path();
1916 if (!path)
1917 return -ENOMEM;
1918
1919 key.objectid = chunk_objectid;
1920 key.offset = chunk_offset;
1921 key.type = BTRFS_CHUNK_ITEM_KEY;
1922
1923 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1924 if (ret < 0)
1925 goto out;
1926 else if (ret > 0) { /* Logic error or corruption */
1927 btrfs_error(root->fs_info, -ENOENT,
1928 "Failed lookup while freeing chunk.");
1929 ret = -ENOENT;
1930 goto out;
1931 }
1932
1933 ret = btrfs_del_item(trans, root, path);
1934 if (ret < 0)
1935 btrfs_error(root->fs_info, ret,
1936 "Failed to delete chunk item.");
1937out:
1938 btrfs_free_path(path);
1939 return ret;
1940}
1941
1942static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1943 chunk_offset)
1944{
1945 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1946 struct btrfs_disk_key *disk_key;
1947 struct btrfs_chunk *chunk;
1948 u8 *ptr;
1949 int ret = 0;
1950 u32 num_stripes;
1951 u32 array_size;
1952 u32 len = 0;
1953 u32 cur;
1954 struct btrfs_key key;
1955
1956 array_size = btrfs_super_sys_array_size(super_copy);
1957
1958 ptr = super_copy->sys_chunk_array;
1959 cur = 0;
1960
1961 while (cur < array_size) {
1962 disk_key = (struct btrfs_disk_key *)ptr;
1963 btrfs_disk_key_to_cpu(&key, disk_key);
1964
1965 len = sizeof(*disk_key);
1966
1967 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1968 chunk = (struct btrfs_chunk *)(ptr + len);
1969 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1970 len += btrfs_chunk_item_size(num_stripes);
1971 } else {
1972 ret = -EIO;
1973 break;
1974 }
1975 if (key.objectid == chunk_objectid &&
1976 key.offset == chunk_offset) {
1977 memmove(ptr, ptr + len, array_size - (cur + len));
1978 array_size -= len;
1979 btrfs_set_super_sys_array_size(super_copy, array_size);
1980 } else {
1981 ptr += len;
1982 cur += len;
1983 }
1984 }
1985 return ret;
1986}
1987
1988static int btrfs_relocate_chunk(struct btrfs_root *root,
1989 u64 chunk_tree, u64 chunk_objectid,
1990 u64 chunk_offset)
1991{
1992 struct extent_map_tree *em_tree;
1993 struct btrfs_root *extent_root;
1994 struct btrfs_trans_handle *trans;
1995 struct extent_map *em;
1996 struct map_lookup *map;
1997 int ret;
1998 int i;
1999
2000 root = root->fs_info->chunk_root;
2001 extent_root = root->fs_info->extent_root;
2002 em_tree = &root->fs_info->mapping_tree.map_tree;
2003
2004 ret = btrfs_can_relocate(extent_root, chunk_offset);
2005 if (ret)
2006 return -ENOSPC;
2007
2008 /* step one, relocate all the extents inside this chunk */
2009 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2010 if (ret)
2011 return ret;
2012
2013 trans = btrfs_start_transaction(root, 0);
2014 BUG_ON(IS_ERR(trans));
2015
2016 lock_chunks(root);
2017
2018 /*
2019 * step two, delete the device extents and the
2020 * chunk tree entries
2021 */
2022 read_lock(&em_tree->lock);
2023 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2024 read_unlock(&em_tree->lock);
2025
2026 BUG_ON(!em || em->start > chunk_offset ||
2027 em->start + em->len < chunk_offset);
2028 map = (struct map_lookup *)em->bdev;
2029
2030 for (i = 0; i < map->num_stripes; i++) {
2031 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2032 map->stripes[i].physical);
2033 BUG_ON(ret);
2034
2035 if (map->stripes[i].dev) {
2036 ret = btrfs_update_device(trans, map->stripes[i].dev);
2037 BUG_ON(ret);
2038 }
2039 }
2040 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2041 chunk_offset);
2042
2043 BUG_ON(ret);
2044
2045 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2046
2047 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2048 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2049 BUG_ON(ret);
2050 }
2051
2052 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2053 BUG_ON(ret);
2054
2055 write_lock(&em_tree->lock);
2056 remove_extent_mapping(em_tree, em);
2057 write_unlock(&em_tree->lock);
2058
2059 kfree(map);
2060 em->bdev = NULL;
2061
2062 /* once for the tree */
2063 free_extent_map(em);
2064 /* once for us */
2065 free_extent_map(em);
2066
2067 unlock_chunks(root);
2068 btrfs_end_transaction(trans, root);
2069 return 0;
2070}
2071
2072static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2073{
2074 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2075 struct btrfs_path *path;
2076 struct extent_buffer *leaf;
2077 struct btrfs_chunk *chunk;
2078 struct btrfs_key key;
2079 struct btrfs_key found_key;
2080 u64 chunk_tree = chunk_root->root_key.objectid;
2081 u64 chunk_type;
2082 bool retried = false;
2083 int failed = 0;
2084 int ret;
2085
2086 path = btrfs_alloc_path();
2087 if (!path)
2088 return -ENOMEM;
2089
2090again:
2091 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2092 key.offset = (u64)-1;
2093 key.type = BTRFS_CHUNK_ITEM_KEY;
2094
2095 while (1) {
2096 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2097 if (ret < 0)
2098 goto error;
2099 BUG_ON(ret == 0); /* Corruption */
2100
2101 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2102 key.type);
2103 if (ret < 0)
2104 goto error;
2105 if (ret > 0)
2106 break;
2107
2108 leaf = path->nodes[0];
2109 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2110
2111 chunk = btrfs_item_ptr(leaf, path->slots[0],
2112 struct btrfs_chunk);
2113 chunk_type = btrfs_chunk_type(leaf, chunk);
2114 btrfs_release_path(path);
2115
2116 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2117 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2118 found_key.objectid,
2119 found_key.offset);
2120 if (ret == -ENOSPC)
2121 failed++;
2122 else if (ret)
2123 BUG();
2124 }
2125
2126 if (found_key.offset == 0)
2127 break;
2128 key.offset = found_key.offset - 1;
2129 }
2130 ret = 0;
2131 if (failed && !retried) {
2132 failed = 0;
2133 retried = true;
2134 goto again;
2135 } else if (failed && retried) {
2136 WARN_ON(1);
2137 ret = -ENOSPC;
2138 }
2139error:
2140 btrfs_free_path(path);
2141 return ret;
2142}
2143
2144static int insert_balance_item(struct btrfs_root *root,
2145 struct btrfs_balance_control *bctl)
2146{
2147 struct btrfs_trans_handle *trans;
2148 struct btrfs_balance_item *item;
2149 struct btrfs_disk_balance_args disk_bargs;
2150 struct btrfs_path *path;
2151 struct extent_buffer *leaf;
2152 struct btrfs_key key;
2153 int ret, err;
2154
2155 path = btrfs_alloc_path();
2156 if (!path)
2157 return -ENOMEM;
2158
2159 trans = btrfs_start_transaction(root, 0);
2160 if (IS_ERR(trans)) {
2161 btrfs_free_path(path);
2162 return PTR_ERR(trans);
2163 }
2164
2165 key.objectid = BTRFS_BALANCE_OBJECTID;
2166 key.type = BTRFS_BALANCE_ITEM_KEY;
2167 key.offset = 0;
2168
2169 ret = btrfs_insert_empty_item(trans, root, path, &key,
2170 sizeof(*item));
2171 if (ret)
2172 goto out;
2173
2174 leaf = path->nodes[0];
2175 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2176
2177 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2178
2179 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2180 btrfs_set_balance_data(leaf, item, &disk_bargs);
2181 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2182 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2183 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2184 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2185
2186 btrfs_set_balance_flags(leaf, item, bctl->flags);
2187
2188 btrfs_mark_buffer_dirty(leaf);
2189out:
2190 btrfs_free_path(path);
2191 err = btrfs_commit_transaction(trans, root);
2192 if (err && !ret)
2193 ret = err;
2194 return ret;
2195}
2196
2197static int del_balance_item(struct btrfs_root *root)
2198{
2199 struct btrfs_trans_handle *trans;
2200 struct btrfs_path *path;
2201 struct btrfs_key key;
2202 int ret, err;
2203
2204 path = btrfs_alloc_path();
2205 if (!path)
2206 return -ENOMEM;
2207
2208 trans = btrfs_start_transaction(root, 0);
2209 if (IS_ERR(trans)) {
2210 btrfs_free_path(path);
2211 return PTR_ERR(trans);
2212 }
2213
2214 key.objectid = BTRFS_BALANCE_OBJECTID;
2215 key.type = BTRFS_BALANCE_ITEM_KEY;
2216 key.offset = 0;
2217
2218 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2219 if (ret < 0)
2220 goto out;
2221 if (ret > 0) {
2222 ret = -ENOENT;
2223 goto out;
2224 }
2225
2226 ret = btrfs_del_item(trans, root, path);
2227out:
2228 btrfs_free_path(path);
2229 err = btrfs_commit_transaction(trans, root);
2230 if (err && !ret)
2231 ret = err;
2232 return ret;
2233}
2234
2235/*
2236 * This is a heuristic used to reduce the number of chunks balanced on
2237 * resume after balance was interrupted.
2238 */
2239static void update_balance_args(struct btrfs_balance_control *bctl)
2240{
2241 /*
2242 * Turn on soft mode for chunk types that were being converted.
2243 */
2244 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2245 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2246 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2247 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2248 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2249 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2250
2251 /*
2252 * Turn on usage filter if is not already used. The idea is
2253 * that chunks that we have already balanced should be
2254 * reasonably full. Don't do it for chunks that are being
2255 * converted - that will keep us from relocating unconverted
2256 * (albeit full) chunks.
2257 */
2258 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2259 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2260 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2261 bctl->data.usage = 90;
2262 }
2263 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2264 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2265 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2266 bctl->sys.usage = 90;
2267 }
2268 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2269 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2270 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2271 bctl->meta.usage = 90;
2272 }
2273}
2274
2275/*
2276 * Should be called with both balance and volume mutexes held to
2277 * serialize other volume operations (add_dev/rm_dev/resize) with
2278 * restriper. Same goes for unset_balance_control.
2279 */
2280static void set_balance_control(struct btrfs_balance_control *bctl)
2281{
2282 struct btrfs_fs_info *fs_info = bctl->fs_info;
2283
2284 BUG_ON(fs_info->balance_ctl);
2285
2286 spin_lock(&fs_info->balance_lock);
2287 fs_info->balance_ctl = bctl;
2288 spin_unlock(&fs_info->balance_lock);
2289}
2290
2291static void unset_balance_control(struct btrfs_fs_info *fs_info)
2292{
2293 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2294
2295 BUG_ON(!fs_info->balance_ctl);
2296
2297 spin_lock(&fs_info->balance_lock);
2298 fs_info->balance_ctl = NULL;
2299 spin_unlock(&fs_info->balance_lock);
2300
2301 kfree(bctl);
2302}
2303
2304/*
2305 * Balance filters. Return 1 if chunk should be filtered out
2306 * (should not be balanced).
2307 */
2308static int chunk_profiles_filter(u64 chunk_type,
2309 struct btrfs_balance_args *bargs)
2310{
2311 chunk_type = chunk_to_extended(chunk_type) &
2312 BTRFS_EXTENDED_PROFILE_MASK;
2313
2314 if (bargs->profiles & chunk_type)
2315 return 0;
2316
2317 return 1;
2318}
2319
2320static u64 div_factor_fine(u64 num, int factor)
2321{
2322 if (factor <= 0)
2323 return 0;
2324 if (factor >= 100)
2325 return num;
2326
2327 num *= factor;
2328 do_div(num, 100);
2329 return num;
2330}
2331
2332static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2333 struct btrfs_balance_args *bargs)
2334{
2335 struct btrfs_block_group_cache *cache;
2336 u64 chunk_used, user_thresh;
2337 int ret = 1;
2338
2339 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2340 chunk_used = btrfs_block_group_used(&cache->item);
2341
2342 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2343 if (chunk_used < user_thresh)
2344 ret = 0;
2345
2346 btrfs_put_block_group(cache);
2347 return ret;
2348}
2349
2350static int chunk_devid_filter(struct extent_buffer *leaf,
2351 struct btrfs_chunk *chunk,
2352 struct btrfs_balance_args *bargs)
2353{
2354 struct btrfs_stripe *stripe;
2355 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2356 int i;
2357
2358 for (i = 0; i < num_stripes; i++) {
2359 stripe = btrfs_stripe_nr(chunk, i);
2360 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2361 return 0;
2362 }
2363
2364 return 1;
2365}
2366
2367/* [pstart, pend) */
2368static int chunk_drange_filter(struct extent_buffer *leaf,
2369 struct btrfs_chunk *chunk,
2370 u64 chunk_offset,
2371 struct btrfs_balance_args *bargs)
2372{
2373 struct btrfs_stripe *stripe;
2374 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2375 u64 stripe_offset;
2376 u64 stripe_length;
2377 int factor;
2378 int i;
2379
2380 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2381 return 0;
2382
2383 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2384 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2385 factor = 2;
2386 else
2387 factor = 1;
2388 factor = num_stripes / factor;
2389
2390 for (i = 0; i < num_stripes; i++) {
2391 stripe = btrfs_stripe_nr(chunk, i);
2392 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2393 continue;
2394
2395 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2396 stripe_length = btrfs_chunk_length(leaf, chunk);
2397 do_div(stripe_length, factor);
2398
2399 if (stripe_offset < bargs->pend &&
2400 stripe_offset + stripe_length > bargs->pstart)
2401 return 0;
2402 }
2403
2404 return 1;
2405}
2406
2407/* [vstart, vend) */
2408static int chunk_vrange_filter(struct extent_buffer *leaf,
2409 struct btrfs_chunk *chunk,
2410 u64 chunk_offset,
2411 struct btrfs_balance_args *bargs)
2412{
2413 if (chunk_offset < bargs->vend &&
2414 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2415 /* at least part of the chunk is inside this vrange */
2416 return 0;
2417
2418 return 1;
2419}
2420
2421static int chunk_soft_convert_filter(u64 chunk_type,
2422 struct btrfs_balance_args *bargs)
2423{
2424 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2425 return 0;
2426
2427 chunk_type = chunk_to_extended(chunk_type) &
2428 BTRFS_EXTENDED_PROFILE_MASK;
2429
2430 if (bargs->target == chunk_type)
2431 return 1;
2432
2433 return 0;
2434}
2435
2436static int should_balance_chunk(struct btrfs_root *root,
2437 struct extent_buffer *leaf,
2438 struct btrfs_chunk *chunk, u64 chunk_offset)
2439{
2440 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2441 struct btrfs_balance_args *bargs = NULL;
2442 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2443
2444 /* type filter */
2445 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2446 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2447 return 0;
2448 }
2449
2450 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2451 bargs = &bctl->data;
2452 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2453 bargs = &bctl->sys;
2454 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2455 bargs = &bctl->meta;
2456
2457 /* profiles filter */
2458 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2459 chunk_profiles_filter(chunk_type, bargs)) {
2460 return 0;
2461 }
2462
2463 /* usage filter */
2464 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2465 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2466 return 0;
2467 }
2468
2469 /* devid filter */
2470 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2471 chunk_devid_filter(leaf, chunk, bargs)) {
2472 return 0;
2473 }
2474
2475 /* drange filter, makes sense only with devid filter */
2476 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2477 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2478 return 0;
2479 }
2480
2481 /* vrange filter */
2482 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2483 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2484 return 0;
2485 }
2486
2487 /* soft profile changing mode */
2488 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2489 chunk_soft_convert_filter(chunk_type, bargs)) {
2490 return 0;
2491 }
2492
2493 return 1;
2494}
2495
2496static u64 div_factor(u64 num, int factor)
2497{
2498 if (factor == 10)
2499 return num;
2500 num *= factor;
2501 do_div(num, 10);
2502 return num;
2503}
2504
2505static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2506{
2507 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508 struct btrfs_root *chunk_root = fs_info->chunk_root;
2509 struct btrfs_root *dev_root = fs_info->dev_root;
2510 struct list_head *devices;
2511 struct btrfs_device *device;
2512 u64 old_size;
2513 u64 size_to_free;
2514 struct btrfs_chunk *chunk;
2515 struct btrfs_path *path;
2516 struct btrfs_key key;
2517 struct btrfs_key found_key;
2518 struct btrfs_trans_handle *trans;
2519 struct extent_buffer *leaf;
2520 int slot;
2521 int ret;
2522 int enospc_errors = 0;
2523 bool counting = true;
2524
2525 /* step one make some room on all the devices */
2526 devices = &fs_info->fs_devices->devices;
2527 list_for_each_entry(device, devices, dev_list) {
2528 old_size = device->total_bytes;
2529 size_to_free = div_factor(old_size, 1);
2530 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531 if (!device->writeable ||
2532 device->total_bytes - device->bytes_used > size_to_free)
2533 continue;
2534
2535 ret = btrfs_shrink_device(device, old_size - size_to_free);
2536 if (ret == -ENOSPC)
2537 break;
2538 BUG_ON(ret);
2539
2540 trans = btrfs_start_transaction(dev_root, 0);
2541 BUG_ON(IS_ERR(trans));
2542
2543 ret = btrfs_grow_device(trans, device, old_size);
2544 BUG_ON(ret);
2545
2546 btrfs_end_transaction(trans, dev_root);
2547 }
2548
2549 /* step two, relocate all the chunks */
2550 path = btrfs_alloc_path();
2551 if (!path) {
2552 ret = -ENOMEM;
2553 goto error;
2554 }
2555
2556 /* zero out stat counters */
2557 spin_lock(&fs_info->balance_lock);
2558 memset(&bctl->stat, 0, sizeof(bctl->stat));
2559 spin_unlock(&fs_info->balance_lock);
2560again:
2561 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562 key.offset = (u64)-1;
2563 key.type = BTRFS_CHUNK_ITEM_KEY;
2564
2565 while (1) {
2566 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567 atomic_read(&fs_info->balance_cancel_req)) {
2568 ret = -ECANCELED;
2569 goto error;
2570 }
2571
2572 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2573 if (ret < 0)
2574 goto error;
2575
2576 /*
2577 * this shouldn't happen, it means the last relocate
2578 * failed
2579 */
2580 if (ret == 0)
2581 BUG(); /* FIXME break ? */
2582
2583 ret = btrfs_previous_item(chunk_root, path, 0,
2584 BTRFS_CHUNK_ITEM_KEY);
2585 if (ret) {
2586 ret = 0;
2587 break;
2588 }
2589
2590 leaf = path->nodes[0];
2591 slot = path->slots[0];
2592 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2593
2594 if (found_key.objectid != key.objectid)
2595 break;
2596
2597 /* chunk zero is special */
2598 if (found_key.offset == 0)
2599 break;
2600
2601 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2602
2603 if (!counting) {
2604 spin_lock(&fs_info->balance_lock);
2605 bctl->stat.considered++;
2606 spin_unlock(&fs_info->balance_lock);
2607 }
2608
2609 ret = should_balance_chunk(chunk_root, leaf, chunk,
2610 found_key.offset);
2611 btrfs_release_path(path);
2612 if (!ret)
2613 goto loop;
2614
2615 if (counting) {
2616 spin_lock(&fs_info->balance_lock);
2617 bctl->stat.expected++;
2618 spin_unlock(&fs_info->balance_lock);
2619 goto loop;
2620 }
2621
2622 ret = btrfs_relocate_chunk(chunk_root,
2623 chunk_root->root_key.objectid,
2624 found_key.objectid,
2625 found_key.offset);
2626 if (ret && ret != -ENOSPC)
2627 goto error;
2628 if (ret == -ENOSPC) {
2629 enospc_errors++;
2630 } else {
2631 spin_lock(&fs_info->balance_lock);
2632 bctl->stat.completed++;
2633 spin_unlock(&fs_info->balance_lock);
2634 }
2635loop:
2636 key.offset = found_key.offset - 1;
2637 }
2638
2639 if (counting) {
2640 btrfs_release_path(path);
2641 counting = false;
2642 goto again;
2643 }
2644error:
2645 btrfs_free_path(path);
2646 if (enospc_errors) {
2647 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2648 enospc_errors);
2649 if (!ret)
2650 ret = -ENOSPC;
2651 }
2652
2653 return ret;
2654}
2655
2656/**
2657 * alloc_profile_is_valid - see if a given profile is valid and reduced
2658 * @flags: profile to validate
2659 * @extended: if true @flags is treated as an extended profile
2660 */
2661static int alloc_profile_is_valid(u64 flags, int extended)
2662{
2663 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2665
2666 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2667
2668 /* 1) check that all other bits are zeroed */
2669 if (flags & ~mask)
2670 return 0;
2671
2672 /* 2) see if profile is reduced */
2673 if (flags == 0)
2674 return !extended; /* "0" is valid for usual profiles */
2675
2676 /* true if exactly one bit set */
2677 return (flags & (flags - 1)) == 0;
2678}
2679
2680static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2681{
2682 /* cancel requested || normal exit path */
2683 return atomic_read(&fs_info->balance_cancel_req) ||
2684 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2685 atomic_read(&fs_info->balance_cancel_req) == 0);
2686}
2687
2688static void __cancel_balance(struct btrfs_fs_info *fs_info)
2689{
2690 int ret;
2691
2692 unset_balance_control(fs_info);
2693 ret = del_balance_item(fs_info->tree_root);
2694 BUG_ON(ret);
2695}
2696
2697void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698 struct btrfs_ioctl_balance_args *bargs);
2699
2700/*
2701 * Should be called with both balance and volume mutexes held
2702 */
2703int btrfs_balance(struct btrfs_balance_control *bctl,
2704 struct btrfs_ioctl_balance_args *bargs)
2705{
2706 struct btrfs_fs_info *fs_info = bctl->fs_info;
2707 u64 allowed;
2708 int mixed = 0;
2709 int ret;
2710
2711 if (btrfs_fs_closing(fs_info) ||
2712 atomic_read(&fs_info->balance_pause_req) ||
2713 atomic_read(&fs_info->balance_cancel_req)) {
2714 ret = -EINVAL;
2715 goto out;
2716 }
2717
2718 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2720 mixed = 1;
2721
2722 /*
2723 * In case of mixed groups both data and meta should be picked,
2724 * and identical options should be given for both of them.
2725 */
2726 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727 if (mixed && (bctl->flags & allowed)) {
2728 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731 printk(KERN_ERR "btrfs: with mixed groups data and "
2732 "metadata balance options must be the same\n");
2733 ret = -EINVAL;
2734 goto out;
2735 }
2736 }
2737
2738 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739 if (fs_info->fs_devices->num_devices == 1)
2740 allowed |= BTRFS_BLOCK_GROUP_DUP;
2741 else if (fs_info->fs_devices->num_devices < 4)
2742 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2743 else
2744 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745 BTRFS_BLOCK_GROUP_RAID10);
2746
2747 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749 (bctl->data.target & ~allowed))) {
2750 printk(KERN_ERR "btrfs: unable to start balance with target "
2751 "data profile %llu\n",
2752 (unsigned long long)bctl->data.target);
2753 ret = -EINVAL;
2754 goto out;
2755 }
2756 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758 (bctl->meta.target & ~allowed))) {
2759 printk(KERN_ERR "btrfs: unable to start balance with target "
2760 "metadata profile %llu\n",
2761 (unsigned long long)bctl->meta.target);
2762 ret = -EINVAL;
2763 goto out;
2764 }
2765 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767 (bctl->sys.target & ~allowed))) {
2768 printk(KERN_ERR "btrfs: unable to start balance with target "
2769 "system profile %llu\n",
2770 (unsigned long long)bctl->sys.target);
2771 ret = -EINVAL;
2772 goto out;
2773 }
2774
2775 /* allow dup'ed data chunks only in mixed mode */
2776 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2779 ret = -EINVAL;
2780 goto out;
2781 }
2782
2783 /* allow to reduce meta or sys integrity only if force set */
2784 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785 BTRFS_BLOCK_GROUP_RAID10;
2786 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787 (fs_info->avail_system_alloc_bits & allowed) &&
2788 !(bctl->sys.target & allowed)) ||
2789 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 (fs_info->avail_metadata_alloc_bits & allowed) &&
2791 !(bctl->meta.target & allowed))) {
2792 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793 printk(KERN_INFO "btrfs: force reducing metadata "
2794 "integrity\n");
2795 } else {
2796 printk(KERN_ERR "btrfs: balance will reduce metadata "
2797 "integrity, use force if you want this\n");
2798 ret = -EINVAL;
2799 goto out;
2800 }
2801 }
2802
2803 ret = insert_balance_item(fs_info->tree_root, bctl);
2804 if (ret && ret != -EEXIST)
2805 goto out;
2806
2807 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2808 BUG_ON(ret == -EEXIST);
2809 set_balance_control(bctl);
2810 } else {
2811 BUG_ON(ret != -EEXIST);
2812 spin_lock(&fs_info->balance_lock);
2813 update_balance_args(bctl);
2814 spin_unlock(&fs_info->balance_lock);
2815 }
2816
2817 atomic_inc(&fs_info->balance_running);
2818 mutex_unlock(&fs_info->balance_mutex);
2819
2820 ret = __btrfs_balance(fs_info);
2821
2822 mutex_lock(&fs_info->balance_mutex);
2823 atomic_dec(&fs_info->balance_running);
2824
2825 if (bargs) {
2826 memset(bargs, 0, sizeof(*bargs));
2827 update_ioctl_balance_args(fs_info, 0, bargs);
2828 }
2829
2830 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2831 balance_need_close(fs_info)) {
2832 __cancel_balance(fs_info);
2833 }
2834
2835 wake_up(&fs_info->balance_wait_q);
2836
2837 return ret;
2838out:
2839 if (bctl->flags & BTRFS_BALANCE_RESUME)
2840 __cancel_balance(fs_info);
2841 else
2842 kfree(bctl);
2843 return ret;
2844}
2845
2846static int balance_kthread(void *data)
2847{
2848 struct btrfs_fs_info *fs_info = data;
2849 int ret = 0;
2850
2851 mutex_lock(&fs_info->volume_mutex);
2852 mutex_lock(&fs_info->balance_mutex);
2853
2854 if (fs_info->balance_ctl) {
2855 printk(KERN_INFO "btrfs: continuing balance\n");
2856 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2857 }
2858
2859 mutex_unlock(&fs_info->balance_mutex);
2860 mutex_unlock(&fs_info->volume_mutex);
2861
2862 return ret;
2863}
2864
2865int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2866{
2867 struct task_struct *tsk;
2868
2869 spin_lock(&fs_info->balance_lock);
2870 if (!fs_info->balance_ctl) {
2871 spin_unlock(&fs_info->balance_lock);
2872 return 0;
2873 }
2874 spin_unlock(&fs_info->balance_lock);
2875
2876 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2877 printk(KERN_INFO "btrfs: force skipping balance\n");
2878 return 0;
2879 }
2880
2881 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2882 if (IS_ERR(tsk))
2883 return PTR_ERR(tsk);
2884
2885 return 0;
2886}
2887
2888int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2889{
2890 struct btrfs_balance_control *bctl;
2891 struct btrfs_balance_item *item;
2892 struct btrfs_disk_balance_args disk_bargs;
2893 struct btrfs_path *path;
2894 struct extent_buffer *leaf;
2895 struct btrfs_key key;
2896 int ret;
2897
2898 path = btrfs_alloc_path();
2899 if (!path)
2900 return -ENOMEM;
2901
2902 key.objectid = BTRFS_BALANCE_OBJECTID;
2903 key.type = BTRFS_BALANCE_ITEM_KEY;
2904 key.offset = 0;
2905
2906 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2907 if (ret < 0)
2908 goto out;
2909 if (ret > 0) { /* ret = -ENOENT; */
2910 ret = 0;
2911 goto out;
2912 }
2913
2914 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2915 if (!bctl) {
2916 ret = -ENOMEM;
2917 goto out;
2918 }
2919
2920 leaf = path->nodes[0];
2921 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2922
2923 bctl->fs_info = fs_info;
2924 bctl->flags = btrfs_balance_flags(leaf, item);
2925 bctl->flags |= BTRFS_BALANCE_RESUME;
2926
2927 btrfs_balance_data(leaf, item, &disk_bargs);
2928 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2929 btrfs_balance_meta(leaf, item, &disk_bargs);
2930 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2931 btrfs_balance_sys(leaf, item, &disk_bargs);
2932 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2933
2934 mutex_lock(&fs_info->volume_mutex);
2935 mutex_lock(&fs_info->balance_mutex);
2936
2937 set_balance_control(bctl);
2938
2939 mutex_unlock(&fs_info->balance_mutex);
2940 mutex_unlock(&fs_info->volume_mutex);
2941out:
2942 btrfs_free_path(path);
2943 return ret;
2944}
2945
2946int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2947{
2948 int ret = 0;
2949
2950 mutex_lock(&fs_info->balance_mutex);
2951 if (!fs_info->balance_ctl) {
2952 mutex_unlock(&fs_info->balance_mutex);
2953 return -ENOTCONN;
2954 }
2955
2956 if (atomic_read(&fs_info->balance_running)) {
2957 atomic_inc(&fs_info->balance_pause_req);
2958 mutex_unlock(&fs_info->balance_mutex);
2959
2960 wait_event(fs_info->balance_wait_q,
2961 atomic_read(&fs_info->balance_running) == 0);
2962
2963 mutex_lock(&fs_info->balance_mutex);
2964 /* we are good with balance_ctl ripped off from under us */
2965 BUG_ON(atomic_read(&fs_info->balance_running));
2966 atomic_dec(&fs_info->balance_pause_req);
2967 } else {
2968 ret = -ENOTCONN;
2969 }
2970
2971 mutex_unlock(&fs_info->balance_mutex);
2972 return ret;
2973}
2974
2975int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2976{
2977 mutex_lock(&fs_info->balance_mutex);
2978 if (!fs_info->balance_ctl) {
2979 mutex_unlock(&fs_info->balance_mutex);
2980 return -ENOTCONN;
2981 }
2982
2983 atomic_inc(&fs_info->balance_cancel_req);
2984 /*
2985 * if we are running just wait and return, balance item is
2986 * deleted in btrfs_balance in this case
2987 */
2988 if (atomic_read(&fs_info->balance_running)) {
2989 mutex_unlock(&fs_info->balance_mutex);
2990 wait_event(fs_info->balance_wait_q,
2991 atomic_read(&fs_info->balance_running) == 0);
2992 mutex_lock(&fs_info->balance_mutex);
2993 } else {
2994 /* __cancel_balance needs volume_mutex */
2995 mutex_unlock(&fs_info->balance_mutex);
2996 mutex_lock(&fs_info->volume_mutex);
2997 mutex_lock(&fs_info->balance_mutex);
2998
2999 if (fs_info->balance_ctl)
3000 __cancel_balance(fs_info);
3001
3002 mutex_unlock(&fs_info->volume_mutex);
3003 }
3004
3005 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3006 atomic_dec(&fs_info->balance_cancel_req);
3007 mutex_unlock(&fs_info->balance_mutex);
3008 return 0;
3009}
3010
3011/*
3012 * shrinking a device means finding all of the device extents past
3013 * the new size, and then following the back refs to the chunks.
3014 * The chunk relocation code actually frees the device extent
3015 */
3016int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3017{
3018 struct btrfs_trans_handle *trans;
3019 struct btrfs_root *root = device->dev_root;
3020 struct btrfs_dev_extent *dev_extent = NULL;
3021 struct btrfs_path *path;
3022 u64 length;
3023 u64 chunk_tree;
3024 u64 chunk_objectid;
3025 u64 chunk_offset;
3026 int ret;
3027 int slot;
3028 int failed = 0;
3029 bool retried = false;
3030 struct extent_buffer *l;
3031 struct btrfs_key key;
3032 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3033 u64 old_total = btrfs_super_total_bytes(super_copy);
3034 u64 old_size = device->total_bytes;
3035 u64 diff = device->total_bytes - new_size;
3036
3037 if (new_size >= device->total_bytes)
3038 return -EINVAL;
3039
3040 path = btrfs_alloc_path();
3041 if (!path)
3042 return -ENOMEM;
3043
3044 path->reada = 2;
3045
3046 lock_chunks(root);
3047
3048 device->total_bytes = new_size;
3049 if (device->writeable) {
3050 device->fs_devices->total_rw_bytes -= diff;
3051 spin_lock(&root->fs_info->free_chunk_lock);
3052 root->fs_info->free_chunk_space -= diff;
3053 spin_unlock(&root->fs_info->free_chunk_lock);
3054 }
3055 unlock_chunks(root);
3056
3057again:
3058 key.objectid = device->devid;
3059 key.offset = (u64)-1;
3060 key.type = BTRFS_DEV_EXTENT_KEY;
3061
3062 do {
3063 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3064 if (ret < 0)
3065 goto done;
3066
3067 ret = btrfs_previous_item(root, path, 0, key.type);
3068 if (ret < 0)
3069 goto done;
3070 if (ret) {
3071 ret = 0;
3072 btrfs_release_path(path);
3073 break;
3074 }
3075
3076 l = path->nodes[0];
3077 slot = path->slots[0];
3078 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3079
3080 if (key.objectid != device->devid) {
3081 btrfs_release_path(path);
3082 break;
3083 }
3084
3085 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3086 length = btrfs_dev_extent_length(l, dev_extent);
3087
3088 if (key.offset + length <= new_size) {
3089 btrfs_release_path(path);
3090 break;
3091 }
3092
3093 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3094 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3095 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3096 btrfs_release_path(path);
3097
3098 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3099 chunk_offset);
3100 if (ret && ret != -ENOSPC)
3101 goto done;
3102 if (ret == -ENOSPC)
3103 failed++;
3104 } while (key.offset-- > 0);
3105
3106 if (failed && !retried) {
3107 failed = 0;
3108 retried = true;
3109 goto again;
3110 } else if (failed && retried) {
3111 ret = -ENOSPC;
3112 lock_chunks(root);
3113
3114 device->total_bytes = old_size;
3115 if (device->writeable)
3116 device->fs_devices->total_rw_bytes += diff;
3117 spin_lock(&root->fs_info->free_chunk_lock);
3118 root->fs_info->free_chunk_space += diff;
3119 spin_unlock(&root->fs_info->free_chunk_lock);
3120 unlock_chunks(root);
3121 goto done;
3122 }
3123
3124 /* Shrinking succeeded, else we would be at "done". */
3125 trans = btrfs_start_transaction(root, 0);
3126 if (IS_ERR(trans)) {
3127 ret = PTR_ERR(trans);
3128 goto done;
3129 }
3130
3131 lock_chunks(root);
3132
3133 device->disk_total_bytes = new_size;
3134 /* Now btrfs_update_device() will change the on-disk size. */
3135 ret = btrfs_update_device(trans, device);
3136 if (ret) {
3137 unlock_chunks(root);
3138 btrfs_end_transaction(trans, root);
3139 goto done;
3140 }
3141 WARN_ON(diff > old_total);
3142 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3143 unlock_chunks(root);
3144 btrfs_end_transaction(trans, root);
3145done:
3146 btrfs_free_path(path);
3147 return ret;
3148}
3149
3150static int btrfs_add_system_chunk(struct btrfs_root *root,
3151 struct btrfs_key *key,
3152 struct btrfs_chunk *chunk, int item_size)
3153{
3154 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3155 struct btrfs_disk_key disk_key;
3156 u32 array_size;
3157 u8 *ptr;
3158
3159 array_size = btrfs_super_sys_array_size(super_copy);
3160 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3161 return -EFBIG;
3162
3163 ptr = super_copy->sys_chunk_array + array_size;
3164 btrfs_cpu_key_to_disk(&disk_key, key);
3165 memcpy(ptr, &disk_key, sizeof(disk_key));
3166 ptr += sizeof(disk_key);
3167 memcpy(ptr, chunk, item_size);
3168 item_size += sizeof(disk_key);
3169 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3170 return 0;
3171}
3172
3173/*
3174 * sort the devices in descending order by max_avail, total_avail
3175 */
3176static int btrfs_cmp_device_info(const void *a, const void *b)
3177{
3178 const struct btrfs_device_info *di_a = a;
3179 const struct btrfs_device_info *di_b = b;
3180
3181 if (di_a->max_avail > di_b->max_avail)
3182 return -1;
3183 if (di_a->max_avail < di_b->max_avail)
3184 return 1;
3185 if (di_a->total_avail > di_b->total_avail)
3186 return -1;
3187 if (di_a->total_avail < di_b->total_avail)
3188 return 1;
3189 return 0;
3190}
3191
3192static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3193 struct btrfs_root *extent_root,
3194 struct map_lookup **map_ret,
3195 u64 *num_bytes_out, u64 *stripe_size_out,
3196 u64 start, u64 type)
3197{
3198 struct btrfs_fs_info *info = extent_root->fs_info;
3199 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3200 struct list_head *cur;
3201 struct map_lookup *map = NULL;
3202 struct extent_map_tree *em_tree;
3203 struct extent_map *em;
3204 struct btrfs_device_info *devices_info = NULL;
3205 u64 total_avail;
3206 int num_stripes; /* total number of stripes to allocate */
3207 int sub_stripes; /* sub_stripes info for map */
3208 int dev_stripes; /* stripes per dev */
3209 int devs_max; /* max devs to use */
3210 int devs_min; /* min devs needed */
3211 int devs_increment; /* ndevs has to be a multiple of this */
3212 int ncopies; /* how many copies to data has */
3213 int ret;
3214 u64 max_stripe_size;
3215 u64 max_chunk_size;
3216 u64 stripe_size;
3217 u64 num_bytes;
3218 int ndevs;
3219 int i;
3220 int j;
3221
3222 BUG_ON(!alloc_profile_is_valid(type, 0));
3223
3224 if (list_empty(&fs_devices->alloc_list))
3225 return -ENOSPC;
3226
3227 sub_stripes = 1;
3228 dev_stripes = 1;
3229 devs_increment = 1;
3230 ncopies = 1;
3231 devs_max = 0; /* 0 == as many as possible */
3232 devs_min = 1;
3233
3234 /*
3235 * define the properties of each RAID type.
3236 * FIXME: move this to a global table and use it in all RAID
3237 * calculation code
3238 */
3239 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3240 dev_stripes = 2;
3241 ncopies = 2;
3242 devs_max = 1;
3243 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3244 devs_min = 2;
3245 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3246 devs_increment = 2;
3247 ncopies = 2;
3248 devs_max = 2;
3249 devs_min = 2;
3250 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3251 sub_stripes = 2;
3252 devs_increment = 2;
3253 ncopies = 2;
3254 devs_min = 4;
3255 } else {
3256 devs_max = 1;
3257 }
3258
3259 if (type & BTRFS_BLOCK_GROUP_DATA) {
3260 max_stripe_size = 1024 * 1024 * 1024;
3261 max_chunk_size = 10 * max_stripe_size;
3262 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3263 /* for larger filesystems, use larger metadata chunks */
3264 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3265 max_stripe_size = 1024 * 1024 * 1024;
3266 else
3267 max_stripe_size = 256 * 1024 * 1024;
3268 max_chunk_size = max_stripe_size;
3269 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3270 max_stripe_size = 32 * 1024 * 1024;
3271 max_chunk_size = 2 * max_stripe_size;
3272 } else {
3273 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3274 type);
3275 BUG_ON(1);
3276 }
3277
3278 /* we don't want a chunk larger than 10% of writeable space */
3279 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3280 max_chunk_size);
3281
3282 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3283 GFP_NOFS);
3284 if (!devices_info)
3285 return -ENOMEM;
3286
3287 cur = fs_devices->alloc_list.next;
3288
3289 /*
3290 * in the first pass through the devices list, we gather information
3291 * about the available holes on each device.
3292 */
3293 ndevs = 0;
3294 while (cur != &fs_devices->alloc_list) {
3295 struct btrfs_device *device;
3296 u64 max_avail;
3297 u64 dev_offset;
3298
3299 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3300
3301 cur = cur->next;
3302
3303 if (!device->writeable) {
3304 printk(KERN_ERR
3305 "btrfs: read-only device in alloc_list\n");
3306 WARN_ON(1);
3307 continue;
3308 }
3309
3310 if (!device->in_fs_metadata)
3311 continue;
3312
3313 if (device->total_bytes > device->bytes_used)
3314 total_avail = device->total_bytes - device->bytes_used;
3315 else
3316 total_avail = 0;
3317
3318 /* If there is no space on this device, skip it. */
3319 if (total_avail == 0)
3320 continue;
3321
3322 ret = find_free_dev_extent(device,
3323 max_stripe_size * dev_stripes,
3324 &dev_offset, &max_avail);
3325 if (ret && ret != -ENOSPC)
3326 goto error;
3327
3328 if (ret == 0)
3329 max_avail = max_stripe_size * dev_stripes;
3330
3331 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3332 continue;
3333
3334 devices_info[ndevs].dev_offset = dev_offset;
3335 devices_info[ndevs].max_avail = max_avail;
3336 devices_info[ndevs].total_avail = total_avail;
3337 devices_info[ndevs].dev = device;
3338 ++ndevs;
3339 }
3340
3341 /*
3342 * now sort the devices by hole size / available space
3343 */
3344 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3345 btrfs_cmp_device_info, NULL);
3346
3347 /* round down to number of usable stripes */
3348 ndevs -= ndevs % devs_increment;
3349
3350 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3351 ret = -ENOSPC;
3352 goto error;
3353 }
3354
3355 if (devs_max && ndevs > devs_max)
3356 ndevs = devs_max;
3357 /*
3358 * the primary goal is to maximize the number of stripes, so use as many
3359 * devices as possible, even if the stripes are not maximum sized.
3360 */
3361 stripe_size = devices_info[ndevs-1].max_avail;
3362 num_stripes = ndevs * dev_stripes;
3363
3364 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3365 stripe_size = max_chunk_size * ncopies;
3366 do_div(stripe_size, ndevs);
3367 }
3368
3369 do_div(stripe_size, dev_stripes);
3370
3371 /* align to BTRFS_STRIPE_LEN */
3372 do_div(stripe_size, BTRFS_STRIPE_LEN);
3373 stripe_size *= BTRFS_STRIPE_LEN;
3374
3375 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3376 if (!map) {
3377 ret = -ENOMEM;
3378 goto error;
3379 }
3380 map->num_stripes = num_stripes;
3381
3382 for (i = 0; i < ndevs; ++i) {
3383 for (j = 0; j < dev_stripes; ++j) {
3384 int s = i * dev_stripes + j;
3385 map->stripes[s].dev = devices_info[i].dev;
3386 map->stripes[s].physical = devices_info[i].dev_offset +
3387 j * stripe_size;
3388 }
3389 }
3390 map->sector_size = extent_root->sectorsize;
3391 map->stripe_len = BTRFS_STRIPE_LEN;
3392 map->io_align = BTRFS_STRIPE_LEN;
3393 map->io_width = BTRFS_STRIPE_LEN;
3394 map->type = type;
3395 map->sub_stripes = sub_stripes;
3396
3397 *map_ret = map;
3398 num_bytes = stripe_size * (num_stripes / ncopies);
3399
3400 *stripe_size_out = stripe_size;
3401 *num_bytes_out = num_bytes;
3402
3403 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3404
3405 em = alloc_extent_map();
3406 if (!em) {
3407 ret = -ENOMEM;
3408 goto error;
3409 }
3410 em->bdev = (struct block_device *)map;
3411 em->start = start;
3412 em->len = num_bytes;
3413 em->block_start = 0;
3414 em->block_len = em->len;
3415
3416 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3417 write_lock(&em_tree->lock);
3418 ret = add_extent_mapping(em_tree, em);
3419 write_unlock(&em_tree->lock);
3420 free_extent_map(em);
3421 if (ret)
3422 goto error;
3423
3424 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3425 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3426 start, num_bytes);
3427 if (ret)
3428 goto error;
3429
3430 for (i = 0; i < map->num_stripes; ++i) {
3431 struct btrfs_device *device;
3432 u64 dev_offset;
3433
3434 device = map->stripes[i].dev;
3435 dev_offset = map->stripes[i].physical;
3436
3437 ret = btrfs_alloc_dev_extent(trans, device,
3438 info->chunk_root->root_key.objectid,
3439 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3440 start, dev_offset, stripe_size);
3441 if (ret) {
3442 btrfs_abort_transaction(trans, extent_root, ret);
3443 goto error;
3444 }
3445 }
3446
3447 kfree(devices_info);
3448 return 0;
3449
3450error:
3451 kfree(map);
3452 kfree(devices_info);
3453 return ret;
3454}
3455
3456static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3457 struct btrfs_root *extent_root,
3458 struct map_lookup *map, u64 chunk_offset,
3459 u64 chunk_size, u64 stripe_size)
3460{
3461 u64 dev_offset;
3462 struct btrfs_key key;
3463 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3464 struct btrfs_device *device;
3465 struct btrfs_chunk *chunk;
3466 struct btrfs_stripe *stripe;
3467 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3468 int index = 0;
3469 int ret;
3470
3471 chunk = kzalloc(item_size, GFP_NOFS);
3472 if (!chunk)
3473 return -ENOMEM;
3474
3475 index = 0;
3476 while (index < map->num_stripes) {
3477 device = map->stripes[index].dev;
3478 device->bytes_used += stripe_size;
3479 ret = btrfs_update_device(trans, device);
3480 if (ret)
3481 goto out_free;
3482 index++;
3483 }
3484
3485 spin_lock(&extent_root->fs_info->free_chunk_lock);
3486 extent_root->fs_info->free_chunk_space -= (stripe_size *
3487 map->num_stripes);
3488 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3489
3490 index = 0;
3491 stripe = &chunk->stripe;
3492 while (index < map->num_stripes) {
3493 device = map->stripes[index].dev;
3494 dev_offset = map->stripes[index].physical;
3495
3496 btrfs_set_stack_stripe_devid(stripe, device->devid);
3497 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3498 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3499 stripe++;
3500 index++;
3501 }
3502
3503 btrfs_set_stack_chunk_length(chunk, chunk_size);
3504 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3505 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3506 btrfs_set_stack_chunk_type(chunk, map->type);
3507 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3508 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3509 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3510 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3511 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3512
3513 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3514 key.type = BTRFS_CHUNK_ITEM_KEY;
3515 key.offset = chunk_offset;
3516
3517 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3518
3519 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3520 /*
3521 * TODO: Cleanup of inserted chunk root in case of
3522 * failure.
3523 */
3524 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3525 item_size);
3526 }
3527
3528out_free:
3529 kfree(chunk);
3530 return ret;
3531}
3532
3533/*
3534 * Chunk allocation falls into two parts. The first part does works
3535 * that make the new allocated chunk useable, but not do any operation
3536 * that modifies the chunk tree. The second part does the works that
3537 * require modifying the chunk tree. This division is important for the
3538 * bootstrap process of adding storage to a seed btrfs.
3539 */
3540int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3541 struct btrfs_root *extent_root, u64 type)
3542{
3543 u64 chunk_offset;
3544 u64 chunk_size;
3545 u64 stripe_size;
3546 struct map_lookup *map;
3547 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3548 int ret;
3549
3550 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3551 &chunk_offset);
3552 if (ret)
3553 return ret;
3554
3555 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3556 &stripe_size, chunk_offset, type);
3557 if (ret)
3558 return ret;
3559
3560 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3561 chunk_size, stripe_size);
3562 if (ret)
3563 return ret;
3564 return 0;
3565}
3566
3567static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3568 struct btrfs_root *root,
3569 struct btrfs_device *device)
3570{
3571 u64 chunk_offset;
3572 u64 sys_chunk_offset;
3573 u64 chunk_size;
3574 u64 sys_chunk_size;
3575 u64 stripe_size;
3576 u64 sys_stripe_size;
3577 u64 alloc_profile;
3578 struct map_lookup *map;
3579 struct map_lookup *sys_map;
3580 struct btrfs_fs_info *fs_info = root->fs_info;
3581 struct btrfs_root *extent_root = fs_info->extent_root;
3582 int ret;
3583
3584 ret = find_next_chunk(fs_info->chunk_root,
3585 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3586 if (ret)
3587 return ret;
3588
3589 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3590 fs_info->avail_metadata_alloc_bits;
3591 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3592
3593 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3594 &stripe_size, chunk_offset, alloc_profile);
3595 if (ret)
3596 return ret;
3597
3598 sys_chunk_offset = chunk_offset + chunk_size;
3599
3600 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3601 fs_info->avail_system_alloc_bits;
3602 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3603
3604 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3605 &sys_chunk_size, &sys_stripe_size,
3606 sys_chunk_offset, alloc_profile);
3607 if (ret)
3608 goto abort;
3609
3610 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3611 if (ret)
3612 goto abort;
3613
3614 /*
3615 * Modifying chunk tree needs allocating new blocks from both
3616 * system block group and metadata block group. So we only can
3617 * do operations require modifying the chunk tree after both
3618 * block groups were created.
3619 */
3620 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3621 chunk_size, stripe_size);
3622 if (ret)
3623 goto abort;
3624
3625 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3626 sys_chunk_offset, sys_chunk_size,
3627 sys_stripe_size);
3628 if (ret)
3629 goto abort;
3630
3631 return 0;
3632
3633abort:
3634 btrfs_abort_transaction(trans, root, ret);
3635 return ret;
3636}
3637
3638int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3639{
3640 struct extent_map *em;
3641 struct map_lookup *map;
3642 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3643 int readonly = 0;
3644 int i;
3645
3646 read_lock(&map_tree->map_tree.lock);
3647 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3648 read_unlock(&map_tree->map_tree.lock);
3649 if (!em)
3650 return 1;
3651
3652 if (btrfs_test_opt(root, DEGRADED)) {
3653 free_extent_map(em);
3654 return 0;
3655 }
3656
3657 map = (struct map_lookup *)em->bdev;
3658 for (i = 0; i < map->num_stripes; i++) {
3659 if (!map->stripes[i].dev->writeable) {
3660 readonly = 1;
3661 break;
3662 }
3663 }
3664 free_extent_map(em);
3665 return readonly;
3666}
3667
3668void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3669{
3670 extent_map_tree_init(&tree->map_tree);
3671}
3672
3673void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3674{
3675 struct extent_map *em;
3676
3677 while (1) {
3678 write_lock(&tree->map_tree.lock);
3679 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3680 if (em)
3681 remove_extent_mapping(&tree->map_tree, em);
3682 write_unlock(&tree->map_tree.lock);
3683 if (!em)
3684 break;
3685 kfree(em->bdev);
3686 /* once for us */
3687 free_extent_map(em);
3688 /* once for the tree */
3689 free_extent_map(em);
3690 }
3691}
3692
3693int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3694{
3695 struct extent_map *em;
3696 struct map_lookup *map;
3697 struct extent_map_tree *em_tree = &map_tree->map_tree;
3698 int ret;
3699
3700 read_lock(&em_tree->lock);
3701 em = lookup_extent_mapping(em_tree, logical, len);
3702 read_unlock(&em_tree->lock);
3703 BUG_ON(!em);
3704
3705 BUG_ON(em->start > logical || em->start + em->len < logical);
3706 map = (struct map_lookup *)em->bdev;
3707 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3708 ret = map->num_stripes;
3709 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3710 ret = map->sub_stripes;
3711 else
3712 ret = 1;
3713 free_extent_map(em);
3714 return ret;
3715}
3716
3717static int find_live_mirror(struct map_lookup *map, int first, int num,
3718 int optimal)
3719{
3720 int i;
3721 if (map->stripes[optimal].dev->bdev)
3722 return optimal;
3723 for (i = first; i < first + num; i++) {
3724 if (map->stripes[i].dev->bdev)
3725 return i;
3726 }
3727 /* we couldn't find one that doesn't fail. Just return something
3728 * and the io error handling code will clean up eventually
3729 */
3730 return optimal;
3731}
3732
3733static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3734 u64 logical, u64 *length,
3735 struct btrfs_bio **bbio_ret,
3736 int mirror_num)
3737{
3738 struct extent_map *em;
3739 struct map_lookup *map;
3740 struct extent_map_tree *em_tree = &map_tree->map_tree;
3741 u64 offset;
3742 u64 stripe_offset;
3743 u64 stripe_end_offset;
3744 u64 stripe_nr;
3745 u64 stripe_nr_orig;
3746 u64 stripe_nr_end;
3747 int stripe_index;
3748 int i;
3749 int ret = 0;
3750 int num_stripes;
3751 int max_errors = 0;
3752 struct btrfs_bio *bbio = NULL;
3753
3754 read_lock(&em_tree->lock);
3755 em = lookup_extent_mapping(em_tree, logical, *length);
3756 read_unlock(&em_tree->lock);
3757
3758 if (!em) {
3759 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3760 (unsigned long long)logical,
3761 (unsigned long long)*length);
3762 BUG();
3763 }
3764
3765 BUG_ON(em->start > logical || em->start + em->len < logical);
3766 map = (struct map_lookup *)em->bdev;
3767 offset = logical - em->start;
3768
3769 if (mirror_num > map->num_stripes)
3770 mirror_num = 0;
3771
3772 stripe_nr = offset;
3773 /*
3774 * stripe_nr counts the total number of stripes we have to stride
3775 * to get to this block
3776 */
3777 do_div(stripe_nr, map->stripe_len);
3778
3779 stripe_offset = stripe_nr * map->stripe_len;
3780 BUG_ON(offset < stripe_offset);
3781
3782 /* stripe_offset is the offset of this block in its stripe*/
3783 stripe_offset = offset - stripe_offset;
3784
3785 if (rw & REQ_DISCARD)
3786 *length = min_t(u64, em->len - offset, *length);
3787 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3788 /* we limit the length of each bio to what fits in a stripe */
3789 *length = min_t(u64, em->len - offset,
3790 map->stripe_len - stripe_offset);
3791 } else {
3792 *length = em->len - offset;
3793 }
3794
3795 if (!bbio_ret)
3796 goto out;
3797
3798 num_stripes = 1;
3799 stripe_index = 0;
3800 stripe_nr_orig = stripe_nr;
3801 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3802 (~(map->stripe_len - 1));
3803 do_div(stripe_nr_end, map->stripe_len);
3804 stripe_end_offset = stripe_nr_end * map->stripe_len -
3805 (offset + *length);
3806 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3807 if (rw & REQ_DISCARD)
3808 num_stripes = min_t(u64, map->num_stripes,
3809 stripe_nr_end - stripe_nr_orig);
3810 stripe_index = do_div(stripe_nr, map->num_stripes);
3811 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3812 if (rw & (REQ_WRITE | REQ_DISCARD))
3813 num_stripes = map->num_stripes;
3814 else if (mirror_num)
3815 stripe_index = mirror_num - 1;
3816 else {
3817 stripe_index = find_live_mirror(map, 0,
3818 map->num_stripes,
3819 current->pid % map->num_stripes);
3820 mirror_num = stripe_index + 1;
3821 }
3822
3823 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3824 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3825 num_stripes = map->num_stripes;
3826 } else if (mirror_num) {
3827 stripe_index = mirror_num - 1;
3828 } else {
3829 mirror_num = 1;
3830 }
3831
3832 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3833 int factor = map->num_stripes / map->sub_stripes;
3834
3835 stripe_index = do_div(stripe_nr, factor);
3836 stripe_index *= map->sub_stripes;
3837
3838 if (rw & REQ_WRITE)
3839 num_stripes = map->sub_stripes;
3840 else if (rw & REQ_DISCARD)
3841 num_stripes = min_t(u64, map->sub_stripes *
3842 (stripe_nr_end - stripe_nr_orig),
3843 map->num_stripes);
3844 else if (mirror_num)
3845 stripe_index += mirror_num - 1;
3846 else {
3847 int old_stripe_index = stripe_index;
3848 stripe_index = find_live_mirror(map, stripe_index,
3849 map->sub_stripes, stripe_index +
3850 current->pid % map->sub_stripes);
3851 mirror_num = stripe_index - old_stripe_index + 1;
3852 }
3853 } else {
3854 /*
3855 * after this do_div call, stripe_nr is the number of stripes
3856 * on this device we have to walk to find the data, and
3857 * stripe_index is the number of our device in the stripe array
3858 */
3859 stripe_index = do_div(stripe_nr, map->num_stripes);
3860 mirror_num = stripe_index + 1;
3861 }
3862 BUG_ON(stripe_index >= map->num_stripes);
3863
3864 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3865 if (!bbio) {
3866 ret = -ENOMEM;
3867 goto out;
3868 }
3869 atomic_set(&bbio->error, 0);
3870
3871 if (rw & REQ_DISCARD) {
3872 int factor = 0;
3873 int sub_stripes = 0;
3874 u64 stripes_per_dev = 0;
3875 u32 remaining_stripes = 0;
3876 u32 last_stripe = 0;
3877
3878 if (map->type &
3879 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3880 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3881 sub_stripes = 1;
3882 else
3883 sub_stripes = map->sub_stripes;
3884
3885 factor = map->num_stripes / sub_stripes;
3886 stripes_per_dev = div_u64_rem(stripe_nr_end -
3887 stripe_nr_orig,
3888 factor,
3889 &remaining_stripes);
3890 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3891 last_stripe *= sub_stripes;
3892 }
3893
3894 for (i = 0; i < num_stripes; i++) {
3895 bbio->stripes[i].physical =
3896 map->stripes[stripe_index].physical +
3897 stripe_offset + stripe_nr * map->stripe_len;
3898 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3899
3900 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3901 BTRFS_BLOCK_GROUP_RAID10)) {
3902 bbio->stripes[i].length = stripes_per_dev *
3903 map->stripe_len;
3904
3905 if (i / sub_stripes < remaining_stripes)
3906 bbio->stripes[i].length +=
3907 map->stripe_len;
3908
3909 /*
3910 * Special for the first stripe and
3911 * the last stripe:
3912 *
3913 * |-------|...|-------|
3914 * |----------|
3915 * off end_off
3916 */
3917 if (i < sub_stripes)
3918 bbio->stripes[i].length -=
3919 stripe_offset;
3920
3921 if (stripe_index >= last_stripe &&
3922 stripe_index <= (last_stripe +
3923 sub_stripes - 1))
3924 bbio->stripes[i].length -=
3925 stripe_end_offset;
3926
3927 if (i == sub_stripes - 1)
3928 stripe_offset = 0;
3929 } else
3930 bbio->stripes[i].length = *length;
3931
3932 stripe_index++;
3933 if (stripe_index == map->num_stripes) {
3934 /* This could only happen for RAID0/10 */
3935 stripe_index = 0;
3936 stripe_nr++;
3937 }
3938 }
3939 } else {
3940 for (i = 0; i < num_stripes; i++) {
3941 bbio->stripes[i].physical =
3942 map->stripes[stripe_index].physical +
3943 stripe_offset +
3944 stripe_nr * map->stripe_len;
3945 bbio->stripes[i].dev =
3946 map->stripes[stripe_index].dev;
3947 stripe_index++;
3948 }
3949 }
3950
3951 if (rw & REQ_WRITE) {
3952 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3953 BTRFS_BLOCK_GROUP_RAID10 |
3954 BTRFS_BLOCK_GROUP_DUP)) {
3955 max_errors = 1;
3956 }
3957 }
3958
3959 *bbio_ret = bbio;
3960 bbio->num_stripes = num_stripes;
3961 bbio->max_errors = max_errors;
3962 bbio->mirror_num = mirror_num;
3963out:
3964 free_extent_map(em);
3965 return ret;
3966}
3967
3968int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3969 u64 logical, u64 *length,
3970 struct btrfs_bio **bbio_ret, int mirror_num)
3971{
3972 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3973 mirror_num);
3974}
3975
3976int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3977 u64 chunk_start, u64 physical, u64 devid,
3978 u64 **logical, int *naddrs, int *stripe_len)
3979{
3980 struct extent_map_tree *em_tree = &map_tree->map_tree;
3981 struct extent_map *em;
3982 struct map_lookup *map;
3983 u64 *buf;
3984 u64 bytenr;
3985 u64 length;
3986 u64 stripe_nr;
3987 int i, j, nr = 0;
3988
3989 read_lock(&em_tree->lock);
3990 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3991 read_unlock(&em_tree->lock);
3992
3993 BUG_ON(!em || em->start != chunk_start);
3994 map = (struct map_lookup *)em->bdev;
3995
3996 length = em->len;
3997 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3998 do_div(length, map->num_stripes / map->sub_stripes);
3999 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4000 do_div(length, map->num_stripes);
4001
4002 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4003 BUG_ON(!buf); /* -ENOMEM */
4004
4005 for (i = 0; i < map->num_stripes; i++) {
4006 if (devid && map->stripes[i].dev->devid != devid)
4007 continue;
4008 if (map->stripes[i].physical > physical ||
4009 map->stripes[i].physical + length <= physical)
4010 continue;
4011
4012 stripe_nr = physical - map->stripes[i].physical;
4013 do_div(stripe_nr, map->stripe_len);
4014
4015 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4016 stripe_nr = stripe_nr * map->num_stripes + i;
4017 do_div(stripe_nr, map->sub_stripes);
4018 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4019 stripe_nr = stripe_nr * map->num_stripes + i;
4020 }
4021 bytenr = chunk_start + stripe_nr * map->stripe_len;
4022 WARN_ON(nr >= map->num_stripes);
4023 for (j = 0; j < nr; j++) {
4024 if (buf[j] == bytenr)
4025 break;
4026 }
4027 if (j == nr) {
4028 WARN_ON(nr >= map->num_stripes);
4029 buf[nr++] = bytenr;
4030 }
4031 }
4032
4033 *logical = buf;
4034 *naddrs = nr;
4035 *stripe_len = map->stripe_len;
4036
4037 free_extent_map(em);
4038 return 0;
4039}
4040
4041static void *merge_stripe_index_into_bio_private(void *bi_private,
4042 unsigned int stripe_index)
4043{
4044 /*
4045 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4046 * at most 1.
4047 * The alternative solution (instead of stealing bits from the
4048 * pointer) would be to allocate an intermediate structure
4049 * that contains the old private pointer plus the stripe_index.
4050 */
4051 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4052 BUG_ON(stripe_index > 3);
4053 return (void *)(((uintptr_t)bi_private) | stripe_index);
4054}
4055
4056static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4057{
4058 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4059}
4060
4061static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4062{
4063 return (unsigned int)((uintptr_t)bi_private) & 3;
4064}
4065
4066static void btrfs_end_bio(struct bio *bio, int err)
4067{
4068 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4069 int is_orig_bio = 0;
4070
4071 if (err) {
4072 atomic_inc(&bbio->error);
4073 if (err == -EIO || err == -EREMOTEIO) {
4074 unsigned int stripe_index =
4075 extract_stripe_index_from_bio_private(
4076 bio->bi_private);
4077 struct btrfs_device *dev;
4078
4079 BUG_ON(stripe_index >= bbio->num_stripes);
4080 dev = bbio->stripes[stripe_index].dev;
4081 if (dev->bdev) {
4082 if (bio->bi_rw & WRITE)
4083 btrfs_dev_stat_inc(dev,
4084 BTRFS_DEV_STAT_WRITE_ERRS);
4085 else
4086 btrfs_dev_stat_inc(dev,
4087 BTRFS_DEV_STAT_READ_ERRS);
4088 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4089 btrfs_dev_stat_inc(dev,
4090 BTRFS_DEV_STAT_FLUSH_ERRS);
4091 btrfs_dev_stat_print_on_error(dev);
4092 }
4093 }
4094 }
4095
4096 if (bio == bbio->orig_bio)
4097 is_orig_bio = 1;
4098
4099 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4100 if (!is_orig_bio) {
4101 bio_put(bio);
4102 bio = bbio->orig_bio;
4103 }
4104 bio->bi_private = bbio->private;
4105 bio->bi_end_io = bbio->end_io;
4106 bio->bi_bdev = (struct block_device *)
4107 (unsigned long)bbio->mirror_num;
4108 /* only send an error to the higher layers if it is
4109 * beyond the tolerance of the multi-bio
4110 */
4111 if (atomic_read(&bbio->error) > bbio->max_errors) {
4112 err = -EIO;
4113 } else {
4114 /*
4115 * this bio is actually up to date, we didn't
4116 * go over the max number of errors
4117 */
4118 set_bit(BIO_UPTODATE, &bio->bi_flags);
4119 err = 0;
4120 }
4121 kfree(bbio);
4122
4123 bio_endio(bio, err);
4124 } else if (!is_orig_bio) {
4125 bio_put(bio);
4126 }
4127}
4128
4129struct async_sched {
4130 struct bio *bio;
4131 int rw;
4132 struct btrfs_fs_info *info;
4133 struct btrfs_work work;
4134};
4135
4136/*
4137 * see run_scheduled_bios for a description of why bios are collected for
4138 * async submit.
4139 *
4140 * This will add one bio to the pending list for a device and make sure
4141 * the work struct is scheduled.
4142 */
4143static noinline void schedule_bio(struct btrfs_root *root,
4144 struct btrfs_device *device,
4145 int rw, struct bio *bio)
4146{
4147 int should_queue = 1;
4148 struct btrfs_pending_bios *pending_bios;
4149
4150 /* don't bother with additional async steps for reads, right now */
4151 if (!(rw & REQ_WRITE)) {
4152 bio_get(bio);
4153 btrfsic_submit_bio(rw, bio);
4154 bio_put(bio);
4155 return;
4156 }
4157
4158 /*
4159 * nr_async_bios allows us to reliably return congestion to the
4160 * higher layers. Otherwise, the async bio makes it appear we have
4161 * made progress against dirty pages when we've really just put it
4162 * on a queue for later
4163 */
4164 atomic_inc(&root->fs_info->nr_async_bios);
4165 WARN_ON(bio->bi_next);
4166 bio->bi_next = NULL;
4167 bio->bi_rw |= rw;
4168
4169 spin_lock(&device->io_lock);
4170 if (bio->bi_rw & REQ_SYNC)
4171 pending_bios = &device->pending_sync_bios;
4172 else
4173 pending_bios = &device->pending_bios;
4174
4175 if (pending_bios->tail)
4176 pending_bios->tail->bi_next = bio;
4177
4178 pending_bios->tail = bio;
4179 if (!pending_bios->head)
4180 pending_bios->head = bio;
4181 if (device->running_pending)
4182 should_queue = 0;
4183
4184 spin_unlock(&device->io_lock);
4185
4186 if (should_queue)
4187 btrfs_queue_worker(&root->fs_info->submit_workers,
4188 &device->work);
4189}
4190
4191int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4192 int mirror_num, int async_submit)
4193{
4194 struct btrfs_mapping_tree *map_tree;
4195 struct btrfs_device *dev;
4196 struct bio *first_bio = bio;
4197 u64 logical = (u64)bio->bi_sector << 9;
4198 u64 length = 0;
4199 u64 map_length;
4200 int ret;
4201 int dev_nr = 0;
4202 int total_devs = 1;
4203 struct btrfs_bio *bbio = NULL;
4204
4205 length = bio->bi_size;
4206 map_tree = &root->fs_info->mapping_tree;
4207 map_length = length;
4208
4209 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4210 mirror_num);
4211 if (ret) /* -ENOMEM */
4212 return ret;
4213
4214 total_devs = bbio->num_stripes;
4215 if (map_length < length) {
4216 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4217 "len %llu\n", (unsigned long long)logical,
4218 (unsigned long long)length,
4219 (unsigned long long)map_length);
4220 BUG();
4221 }
4222
4223 bbio->orig_bio = first_bio;
4224 bbio->private = first_bio->bi_private;
4225 bbio->end_io = first_bio->bi_end_io;
4226 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4227
4228 while (dev_nr < total_devs) {
4229 if (dev_nr < total_devs - 1) {
4230 bio = bio_clone(first_bio, GFP_NOFS);
4231 BUG_ON(!bio); /* -ENOMEM */
4232 } else {
4233 bio = first_bio;
4234 }
4235 bio->bi_private = bbio;
4236 bio->bi_private = merge_stripe_index_into_bio_private(
4237 bio->bi_private, (unsigned int)dev_nr);
4238 bio->bi_end_io = btrfs_end_bio;
4239 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4240 dev = bbio->stripes[dev_nr].dev;
4241 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4242#ifdef DEBUG
4243 struct rcu_string *name;
4244
4245 rcu_read_lock();
4246 name = rcu_dereference(dev->name);
4247 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4248 "(%s id %llu), size=%u\n", rw,
4249 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4250 name->str, dev->devid, bio->bi_size);
4251 rcu_read_unlock();
4252#endif
4253 bio->bi_bdev = dev->bdev;
4254 if (async_submit)
4255 schedule_bio(root, dev, rw, bio);
4256 else
4257 btrfsic_submit_bio(rw, bio);
4258 } else {
4259 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4260 bio->bi_sector = logical >> 9;
4261 bio_endio(bio, -EIO);
4262 }
4263 dev_nr++;
4264 }
4265 return 0;
4266}
4267
4268struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4269 u8 *uuid, u8 *fsid)
4270{
4271 struct btrfs_device *device;
4272 struct btrfs_fs_devices *cur_devices;
4273
4274 cur_devices = root->fs_info->fs_devices;
4275 while (cur_devices) {
4276 if (!fsid ||
4277 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4278 device = __find_device(&cur_devices->devices,
4279 devid, uuid);
4280 if (device)
4281 return device;
4282 }
4283 cur_devices = cur_devices->seed;
4284 }
4285 return NULL;
4286}
4287
4288static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4289 u64 devid, u8 *dev_uuid)
4290{
4291 struct btrfs_device *device;
4292 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4293
4294 device = kzalloc(sizeof(*device), GFP_NOFS);
4295 if (!device)
4296 return NULL;
4297 list_add(&device->dev_list,
4298 &fs_devices->devices);
4299 device->dev_root = root->fs_info->dev_root;
4300 device->devid = devid;
4301 device->work.func = pending_bios_fn;
4302 device->fs_devices = fs_devices;
4303 device->missing = 1;
4304 fs_devices->num_devices++;
4305 fs_devices->missing_devices++;
4306 spin_lock_init(&device->io_lock);
4307 INIT_LIST_HEAD(&device->dev_alloc_list);
4308 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4309 return device;
4310}
4311
4312static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4313 struct extent_buffer *leaf,
4314 struct btrfs_chunk *chunk)
4315{
4316 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4317 struct map_lookup *map;
4318 struct extent_map *em;
4319 u64 logical;
4320 u64 length;
4321 u64 devid;
4322 u8 uuid[BTRFS_UUID_SIZE];
4323 int num_stripes;
4324 int ret;
4325 int i;
4326
4327 logical = key->offset;
4328 length = btrfs_chunk_length(leaf, chunk);
4329
4330 read_lock(&map_tree->map_tree.lock);
4331 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4332 read_unlock(&map_tree->map_tree.lock);
4333
4334 /* already mapped? */
4335 if (em && em->start <= logical && em->start + em->len > logical) {
4336 free_extent_map(em);
4337 return 0;
4338 } else if (em) {
4339 free_extent_map(em);
4340 }
4341
4342 em = alloc_extent_map();
4343 if (!em)
4344 return -ENOMEM;
4345 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4346 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4347 if (!map) {
4348 free_extent_map(em);
4349 return -ENOMEM;
4350 }
4351
4352 em->bdev = (struct block_device *)map;
4353 em->start = logical;
4354 em->len = length;
4355 em->block_start = 0;
4356 em->block_len = em->len;
4357
4358 map->num_stripes = num_stripes;
4359 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4360 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4361 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4362 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4363 map->type = btrfs_chunk_type(leaf, chunk);
4364 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4365 for (i = 0; i < num_stripes; i++) {
4366 map->stripes[i].physical =
4367 btrfs_stripe_offset_nr(leaf, chunk, i);
4368 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4369 read_extent_buffer(leaf, uuid, (unsigned long)
4370 btrfs_stripe_dev_uuid_nr(chunk, i),
4371 BTRFS_UUID_SIZE);
4372 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4373 NULL);
4374 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4375 kfree(map);
4376 free_extent_map(em);
4377 return -EIO;
4378 }
4379 if (!map->stripes[i].dev) {
4380 map->stripes[i].dev =
4381 add_missing_dev(root, devid, uuid);
4382 if (!map->stripes[i].dev) {
4383 kfree(map);
4384 free_extent_map(em);
4385 return -EIO;
4386 }
4387 }
4388 map->stripes[i].dev->in_fs_metadata = 1;
4389 }
4390
4391 write_lock(&map_tree->map_tree.lock);
4392 ret = add_extent_mapping(&map_tree->map_tree, em);
4393 write_unlock(&map_tree->map_tree.lock);
4394 BUG_ON(ret); /* Tree corruption */
4395 free_extent_map(em);
4396
4397 return 0;
4398}
4399
4400static void fill_device_from_item(struct extent_buffer *leaf,
4401 struct btrfs_dev_item *dev_item,
4402 struct btrfs_device *device)
4403{
4404 unsigned long ptr;
4405
4406 device->devid = btrfs_device_id(leaf, dev_item);
4407 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4408 device->total_bytes = device->disk_total_bytes;
4409 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4410 device->type = btrfs_device_type(leaf, dev_item);
4411 device->io_align = btrfs_device_io_align(leaf, dev_item);
4412 device->io_width = btrfs_device_io_width(leaf, dev_item);
4413 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4414
4415 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4416 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4417}
4418
4419static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4420{
4421 struct btrfs_fs_devices *fs_devices;
4422 int ret;
4423
4424 BUG_ON(!mutex_is_locked(&uuid_mutex));
4425
4426 fs_devices = root->fs_info->fs_devices->seed;
4427 while (fs_devices) {
4428 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4429 ret = 0;
4430 goto out;
4431 }
4432 fs_devices = fs_devices->seed;
4433 }
4434
4435 fs_devices = find_fsid(fsid);
4436 if (!fs_devices) {
4437 ret = -ENOENT;
4438 goto out;
4439 }
4440
4441 fs_devices = clone_fs_devices(fs_devices);
4442 if (IS_ERR(fs_devices)) {
4443 ret = PTR_ERR(fs_devices);
4444 goto out;
4445 }
4446
4447 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4448 root->fs_info->bdev_holder);
4449 if (ret) {
4450 free_fs_devices(fs_devices);
4451 goto out;
4452 }
4453
4454 if (!fs_devices->seeding) {
4455 __btrfs_close_devices(fs_devices);
4456 free_fs_devices(fs_devices);
4457 ret = -EINVAL;
4458 goto out;
4459 }
4460
4461 fs_devices->seed = root->fs_info->fs_devices->seed;
4462 root->fs_info->fs_devices->seed = fs_devices;
4463out:
4464 return ret;
4465}
4466
4467static int read_one_dev(struct btrfs_root *root,
4468 struct extent_buffer *leaf,
4469 struct btrfs_dev_item *dev_item)
4470{
4471 struct btrfs_device *device;
4472 u64 devid;
4473 int ret;
4474 u8 fs_uuid[BTRFS_UUID_SIZE];
4475 u8 dev_uuid[BTRFS_UUID_SIZE];
4476
4477 devid = btrfs_device_id(leaf, dev_item);
4478 read_extent_buffer(leaf, dev_uuid,
4479 (unsigned long)btrfs_device_uuid(dev_item),
4480 BTRFS_UUID_SIZE);
4481 read_extent_buffer(leaf, fs_uuid,
4482 (unsigned long)btrfs_device_fsid(dev_item),
4483 BTRFS_UUID_SIZE);
4484
4485 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4486 ret = open_seed_devices(root, fs_uuid);
4487 if (ret && !btrfs_test_opt(root, DEGRADED))
4488 return ret;
4489 }
4490
4491 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4492 if (!device || !device->bdev) {
4493 if (!btrfs_test_opt(root, DEGRADED))
4494 return -EIO;
4495
4496 if (!device) {
4497 printk(KERN_WARNING "warning devid %llu missing\n",
4498 (unsigned long long)devid);
4499 device = add_missing_dev(root, devid, dev_uuid);
4500 if (!device)
4501 return -ENOMEM;
4502 } else if (!device->missing) {
4503 /*
4504 * this happens when a device that was properly setup
4505 * in the device info lists suddenly goes bad.
4506 * device->bdev is NULL, and so we have to set
4507 * device->missing to one here
4508 */
4509 root->fs_info->fs_devices->missing_devices++;
4510 device->missing = 1;
4511 }
4512 }
4513
4514 if (device->fs_devices != root->fs_info->fs_devices) {
4515 BUG_ON(device->writeable);
4516 if (device->generation !=
4517 btrfs_device_generation(leaf, dev_item))
4518 return -EINVAL;
4519 }
4520
4521 fill_device_from_item(leaf, dev_item, device);
4522 device->dev_root = root->fs_info->dev_root;
4523 device->in_fs_metadata = 1;
4524 if (device->writeable) {
4525 device->fs_devices->total_rw_bytes += device->total_bytes;
4526 spin_lock(&root->fs_info->free_chunk_lock);
4527 root->fs_info->free_chunk_space += device->total_bytes -
4528 device->bytes_used;
4529 spin_unlock(&root->fs_info->free_chunk_lock);
4530 }
4531 ret = 0;
4532 return ret;
4533}
4534
4535int btrfs_read_sys_array(struct btrfs_root *root)
4536{
4537 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4538 struct extent_buffer *sb;
4539 struct btrfs_disk_key *disk_key;
4540 struct btrfs_chunk *chunk;
4541 u8 *ptr;
4542 unsigned long sb_ptr;
4543 int ret = 0;
4544 u32 num_stripes;
4545 u32 array_size;
4546 u32 len = 0;
4547 u32 cur;
4548 struct btrfs_key key;
4549
4550 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4551 BTRFS_SUPER_INFO_SIZE);
4552 if (!sb)
4553 return -ENOMEM;
4554 btrfs_set_buffer_uptodate(sb);
4555 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4556 /*
4557 * The sb extent buffer is artifical and just used to read the system array.
4558 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4559 * pages up-to-date when the page is larger: extent does not cover the
4560 * whole page and consequently check_page_uptodate does not find all
4561 * the page's extents up-to-date (the hole beyond sb),
4562 * write_extent_buffer then triggers a WARN_ON.
4563 *
4564 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4565 * but sb spans only this function. Add an explicit SetPageUptodate call
4566 * to silence the warning eg. on PowerPC 64.
4567 */
4568 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4569 SetPageUptodate(sb->pages[0]);
4570
4571 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4572 array_size = btrfs_super_sys_array_size(super_copy);
4573
4574 ptr = super_copy->sys_chunk_array;
4575 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4576 cur = 0;
4577
4578 while (cur < array_size) {
4579 disk_key = (struct btrfs_disk_key *)ptr;
4580 btrfs_disk_key_to_cpu(&key, disk_key);
4581
4582 len = sizeof(*disk_key); ptr += len;
4583 sb_ptr += len;
4584 cur += len;
4585
4586 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4587 chunk = (struct btrfs_chunk *)sb_ptr;
4588 ret = read_one_chunk(root, &key, sb, chunk);
4589 if (ret)
4590 break;
4591 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4592 len = btrfs_chunk_item_size(num_stripes);
4593 } else {
4594 ret = -EIO;
4595 break;
4596 }
4597 ptr += len;
4598 sb_ptr += len;
4599 cur += len;
4600 }
4601 free_extent_buffer(sb);
4602 return ret;
4603}
4604
4605int btrfs_read_chunk_tree(struct btrfs_root *root)
4606{
4607 struct btrfs_path *path;
4608 struct extent_buffer *leaf;
4609 struct btrfs_key key;
4610 struct btrfs_key found_key;
4611 int ret;
4612 int slot;
4613
4614 root = root->fs_info->chunk_root;
4615
4616 path = btrfs_alloc_path();
4617 if (!path)
4618 return -ENOMEM;
4619
4620 mutex_lock(&uuid_mutex);
4621 lock_chunks(root);
4622
4623 /* first we search for all of the device items, and then we
4624 * read in all of the chunk items. This way we can create chunk
4625 * mappings that reference all of the devices that are afound
4626 */
4627 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4628 key.offset = 0;
4629 key.type = 0;
4630again:
4631 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4632 if (ret < 0)
4633 goto error;
4634 while (1) {
4635 leaf = path->nodes[0];
4636 slot = path->slots[0];
4637 if (slot >= btrfs_header_nritems(leaf)) {
4638 ret = btrfs_next_leaf(root, path);
4639 if (ret == 0)
4640 continue;
4641 if (ret < 0)
4642 goto error;
4643 break;
4644 }
4645 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4646 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4647 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4648 break;
4649 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4650 struct btrfs_dev_item *dev_item;
4651 dev_item = btrfs_item_ptr(leaf, slot,
4652 struct btrfs_dev_item);
4653 ret = read_one_dev(root, leaf, dev_item);
4654 if (ret)
4655 goto error;
4656 }
4657 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4658 struct btrfs_chunk *chunk;
4659 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4660 ret = read_one_chunk(root, &found_key, leaf, chunk);
4661 if (ret)
4662 goto error;
4663 }
4664 path->slots[0]++;
4665 }
4666 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4667 key.objectid = 0;
4668 btrfs_release_path(path);
4669 goto again;
4670 }
4671 ret = 0;
4672error:
4673 unlock_chunks(root);
4674 mutex_unlock(&uuid_mutex);
4675
4676 btrfs_free_path(path);
4677 return ret;
4678}
4679
4680static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4681{
4682 int i;
4683
4684 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4685 btrfs_dev_stat_reset(dev, i);
4686}
4687
4688int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4689{
4690 struct btrfs_key key;
4691 struct btrfs_key found_key;
4692 struct btrfs_root *dev_root = fs_info->dev_root;
4693 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4694 struct extent_buffer *eb;
4695 int slot;
4696 int ret = 0;
4697 struct btrfs_device *device;
4698 struct btrfs_path *path = NULL;
4699 int i;
4700
4701 path = btrfs_alloc_path();
4702 if (!path) {
4703 ret = -ENOMEM;
4704 goto out;
4705 }
4706
4707 mutex_lock(&fs_devices->device_list_mutex);
4708 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4709 int item_size;
4710 struct btrfs_dev_stats_item *ptr;
4711
4712 key.objectid = 0;
4713 key.type = BTRFS_DEV_STATS_KEY;
4714 key.offset = device->devid;
4715 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4716 if (ret) {
4717 printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
4718 rcu_str_deref(device->name),
4719 (unsigned long long)device->devid);
4720 __btrfs_reset_dev_stats(device);
4721 device->dev_stats_valid = 1;
4722 btrfs_release_path(path);
4723 continue;
4724 }
4725 slot = path->slots[0];
4726 eb = path->nodes[0];
4727 btrfs_item_key_to_cpu(eb, &found_key, slot);
4728 item_size = btrfs_item_size_nr(eb, slot);
4729
4730 ptr = btrfs_item_ptr(eb, slot,
4731 struct btrfs_dev_stats_item);
4732
4733 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4734 if (item_size >= (1 + i) * sizeof(__le64))
4735 btrfs_dev_stat_set(device, i,
4736 btrfs_dev_stats_value(eb, ptr, i));
4737 else
4738 btrfs_dev_stat_reset(device, i);
4739 }
4740
4741 device->dev_stats_valid = 1;
4742 btrfs_dev_stat_print_on_load(device);
4743 btrfs_release_path(path);
4744 }
4745 mutex_unlock(&fs_devices->device_list_mutex);
4746
4747out:
4748 btrfs_free_path(path);
4749 return ret < 0 ? ret : 0;
4750}
4751
4752static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4753 struct btrfs_root *dev_root,
4754 struct btrfs_device *device)
4755{
4756 struct btrfs_path *path;
4757 struct btrfs_key key;
4758 struct extent_buffer *eb;
4759 struct btrfs_dev_stats_item *ptr;
4760 int ret;
4761 int i;
4762
4763 key.objectid = 0;
4764 key.type = BTRFS_DEV_STATS_KEY;
4765 key.offset = device->devid;
4766
4767 path = btrfs_alloc_path();
4768 BUG_ON(!path);
4769 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4770 if (ret < 0) {
4771 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4772 ret, rcu_str_deref(device->name));
4773 goto out;
4774 }
4775
4776 if (ret == 0 &&
4777 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4778 /* need to delete old one and insert a new one */
4779 ret = btrfs_del_item(trans, dev_root, path);
4780 if (ret != 0) {
4781 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4782 rcu_str_deref(device->name), ret);
4783 goto out;
4784 }
4785 ret = 1;
4786 }
4787
4788 if (ret == 1) {
4789 /* need to insert a new item */
4790 btrfs_release_path(path);
4791 ret = btrfs_insert_empty_item(trans, dev_root, path,
4792 &key, sizeof(*ptr));
4793 if (ret < 0) {
4794 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4795 rcu_str_deref(device->name), ret);
4796 goto out;
4797 }
4798 }
4799
4800 eb = path->nodes[0];
4801 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4802 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4803 btrfs_set_dev_stats_value(eb, ptr, i,
4804 btrfs_dev_stat_read(device, i));
4805 btrfs_mark_buffer_dirty(eb);
4806
4807out:
4808 btrfs_free_path(path);
4809 return ret;
4810}
4811
4812/*
4813 * called from commit_transaction. Writes all changed device stats to disk.
4814 */
4815int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4816 struct btrfs_fs_info *fs_info)
4817{
4818 struct btrfs_root *dev_root = fs_info->dev_root;
4819 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4820 struct btrfs_device *device;
4821 int ret = 0;
4822
4823 mutex_lock(&fs_devices->device_list_mutex);
4824 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4825 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4826 continue;
4827
4828 ret = update_dev_stat_item(trans, dev_root, device);
4829 if (!ret)
4830 device->dev_stats_dirty = 0;
4831 }
4832 mutex_unlock(&fs_devices->device_list_mutex);
4833
4834 return ret;
4835}
4836
4837void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4838{
4839 btrfs_dev_stat_inc(dev, index);
4840 btrfs_dev_stat_print_on_error(dev);
4841}
4842
4843void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4844{
4845 if (!dev->dev_stats_valid)
4846 return;
4847 printk_ratelimited_in_rcu(KERN_ERR
4848 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4849 rcu_str_deref(dev->name),
4850 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4851 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4852 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4853 btrfs_dev_stat_read(dev,
4854 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4855 btrfs_dev_stat_read(dev,
4856 BTRFS_DEV_STAT_GENERATION_ERRS));
4857}
4858
4859static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4860{
4861 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4862 rcu_str_deref(dev->name),
4863 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4864 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4865 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4866 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4867 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4868}
4869
4870int btrfs_get_dev_stats(struct btrfs_root *root,
4871 struct btrfs_ioctl_get_dev_stats *stats,
4872 int reset_after_read)
4873{
4874 struct btrfs_device *dev;
4875 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4876 int i;
4877
4878 mutex_lock(&fs_devices->device_list_mutex);
4879 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4880 mutex_unlock(&fs_devices->device_list_mutex);
4881
4882 if (!dev) {
4883 printk(KERN_WARNING
4884 "btrfs: get dev_stats failed, device not found\n");
4885 return -ENODEV;
4886 } else if (!dev->dev_stats_valid) {
4887 printk(KERN_WARNING
4888 "btrfs: get dev_stats failed, not yet valid\n");
4889 return -ENODEV;
4890 } else if (reset_after_read) {
4891 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4892 if (stats->nr_items > i)
4893 stats->values[i] =
4894 btrfs_dev_stat_read_and_reset(dev, i);
4895 else
4896 btrfs_dev_stat_reset(dev, i);
4897 }
4898 } else {
4899 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4900 if (stats->nr_items > i)
4901 stats->values[i] = btrfs_dev_stat_read(dev, i);
4902 }
4903 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4904 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
4905 return 0;
4906}
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <linux/capability.h>
26#include <asm/div64.h>
27#include "compat.h"
28#include "ctree.h"
29#include "extent_map.h"
30#include "disk-io.h"
31#include "transaction.h"
32#include "print-tree.h"
33#include "volumes.h"
34#include "async-thread.h"
35
36static int init_first_rw_device(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct btrfs_device *device);
39static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
40
41static DEFINE_MUTEX(uuid_mutex);
42static LIST_HEAD(fs_uuids);
43
44static void lock_chunks(struct btrfs_root *root)
45{
46 mutex_lock(&root->fs_info->chunk_mutex);
47}
48
49static void unlock_chunks(struct btrfs_root *root)
50{
51 mutex_unlock(&root->fs_info->chunk_mutex);
52}
53
54static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
55{
56 struct btrfs_device *device;
57 WARN_ON(fs_devices->opened);
58 while (!list_empty(&fs_devices->devices)) {
59 device = list_entry(fs_devices->devices.next,
60 struct btrfs_device, dev_list);
61 list_del(&device->dev_list);
62 kfree(device->name);
63 kfree(device);
64 }
65 kfree(fs_devices);
66}
67
68int btrfs_cleanup_fs_uuids(void)
69{
70 struct btrfs_fs_devices *fs_devices;
71
72 while (!list_empty(&fs_uuids)) {
73 fs_devices = list_entry(fs_uuids.next,
74 struct btrfs_fs_devices, list);
75 list_del(&fs_devices->list);
76 free_fs_devices(fs_devices);
77 }
78 return 0;
79}
80
81static noinline struct btrfs_device *__find_device(struct list_head *head,
82 u64 devid, u8 *uuid)
83{
84 struct btrfs_device *dev;
85
86 list_for_each_entry(dev, head, dev_list) {
87 if (dev->devid == devid &&
88 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
89 return dev;
90 }
91 }
92 return NULL;
93}
94
95static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
96{
97 struct btrfs_fs_devices *fs_devices;
98
99 list_for_each_entry(fs_devices, &fs_uuids, list) {
100 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
101 return fs_devices;
102 }
103 return NULL;
104}
105
106static void requeue_list(struct btrfs_pending_bios *pending_bios,
107 struct bio *head, struct bio *tail)
108{
109
110 struct bio *old_head;
111
112 old_head = pending_bios->head;
113 pending_bios->head = head;
114 if (pending_bios->tail)
115 tail->bi_next = old_head;
116 else
117 pending_bios->tail = tail;
118}
119
120/*
121 * we try to collect pending bios for a device so we don't get a large
122 * number of procs sending bios down to the same device. This greatly
123 * improves the schedulers ability to collect and merge the bios.
124 *
125 * But, it also turns into a long list of bios to process and that is sure
126 * to eventually make the worker thread block. The solution here is to
127 * make some progress and then put this work struct back at the end of
128 * the list if the block device is congested. This way, multiple devices
129 * can make progress from a single worker thread.
130 */
131static noinline int run_scheduled_bios(struct btrfs_device *device)
132{
133 struct bio *pending;
134 struct backing_dev_info *bdi;
135 struct btrfs_fs_info *fs_info;
136 struct btrfs_pending_bios *pending_bios;
137 struct bio *tail;
138 struct bio *cur;
139 int again = 0;
140 unsigned long num_run;
141 unsigned long batch_run = 0;
142 unsigned long limit;
143 unsigned long last_waited = 0;
144 int force_reg = 0;
145 int sync_pending = 0;
146 struct blk_plug plug;
147
148 /*
149 * this function runs all the bios we've collected for
150 * a particular device. We don't want to wander off to
151 * another device without first sending all of these down.
152 * So, setup a plug here and finish it off before we return
153 */
154 blk_start_plug(&plug);
155
156 bdi = blk_get_backing_dev_info(device->bdev);
157 fs_info = device->dev_root->fs_info;
158 limit = btrfs_async_submit_limit(fs_info);
159 limit = limit * 2 / 3;
160
161loop:
162 spin_lock(&device->io_lock);
163
164loop_lock:
165 num_run = 0;
166
167 /* take all the bios off the list at once and process them
168 * later on (without the lock held). But, remember the
169 * tail and other pointers so the bios can be properly reinserted
170 * into the list if we hit congestion
171 */
172 if (!force_reg && device->pending_sync_bios.head) {
173 pending_bios = &device->pending_sync_bios;
174 force_reg = 1;
175 } else {
176 pending_bios = &device->pending_bios;
177 force_reg = 0;
178 }
179
180 pending = pending_bios->head;
181 tail = pending_bios->tail;
182 WARN_ON(pending && !tail);
183
184 /*
185 * if pending was null this time around, no bios need processing
186 * at all and we can stop. Otherwise it'll loop back up again
187 * and do an additional check so no bios are missed.
188 *
189 * device->running_pending is used to synchronize with the
190 * schedule_bio code.
191 */
192 if (device->pending_sync_bios.head == NULL &&
193 device->pending_bios.head == NULL) {
194 again = 0;
195 device->running_pending = 0;
196 } else {
197 again = 1;
198 device->running_pending = 1;
199 }
200
201 pending_bios->head = NULL;
202 pending_bios->tail = NULL;
203
204 spin_unlock(&device->io_lock);
205
206 while (pending) {
207
208 rmb();
209 /* we want to work on both lists, but do more bios on the
210 * sync list than the regular list
211 */
212 if ((num_run > 32 &&
213 pending_bios != &device->pending_sync_bios &&
214 device->pending_sync_bios.head) ||
215 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
216 device->pending_bios.head)) {
217 spin_lock(&device->io_lock);
218 requeue_list(pending_bios, pending, tail);
219 goto loop_lock;
220 }
221
222 cur = pending;
223 pending = pending->bi_next;
224 cur->bi_next = NULL;
225 atomic_dec(&fs_info->nr_async_bios);
226
227 if (atomic_read(&fs_info->nr_async_bios) < limit &&
228 waitqueue_active(&fs_info->async_submit_wait))
229 wake_up(&fs_info->async_submit_wait);
230
231 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
232
233 /*
234 * if we're doing the sync list, record that our
235 * plug has some sync requests on it
236 *
237 * If we're doing the regular list and there are
238 * sync requests sitting around, unplug before
239 * we add more
240 */
241 if (pending_bios == &device->pending_sync_bios) {
242 sync_pending = 1;
243 } else if (sync_pending) {
244 blk_finish_plug(&plug);
245 blk_start_plug(&plug);
246 sync_pending = 0;
247 }
248
249 submit_bio(cur->bi_rw, cur);
250 num_run++;
251 batch_run++;
252 if (need_resched())
253 cond_resched();
254
255 /*
256 * we made progress, there is more work to do and the bdi
257 * is now congested. Back off and let other work structs
258 * run instead
259 */
260 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
261 fs_info->fs_devices->open_devices > 1) {
262 struct io_context *ioc;
263
264 ioc = current->io_context;
265
266 /*
267 * the main goal here is that we don't want to
268 * block if we're going to be able to submit
269 * more requests without blocking.
270 *
271 * This code does two great things, it pokes into
272 * the elevator code from a filesystem _and_
273 * it makes assumptions about how batching works.
274 */
275 if (ioc && ioc->nr_batch_requests > 0 &&
276 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
277 (last_waited == 0 ||
278 ioc->last_waited == last_waited)) {
279 /*
280 * we want to go through our batch of
281 * requests and stop. So, we copy out
282 * the ioc->last_waited time and test
283 * against it before looping
284 */
285 last_waited = ioc->last_waited;
286 if (need_resched())
287 cond_resched();
288 continue;
289 }
290 spin_lock(&device->io_lock);
291 requeue_list(pending_bios, pending, tail);
292 device->running_pending = 1;
293
294 spin_unlock(&device->io_lock);
295 btrfs_requeue_work(&device->work);
296 goto done;
297 }
298 }
299
300 cond_resched();
301 if (again)
302 goto loop;
303
304 spin_lock(&device->io_lock);
305 if (device->pending_bios.head || device->pending_sync_bios.head)
306 goto loop_lock;
307 spin_unlock(&device->io_lock);
308
309done:
310 blk_finish_plug(&plug);
311 return 0;
312}
313
314static void pending_bios_fn(struct btrfs_work *work)
315{
316 struct btrfs_device *device;
317
318 device = container_of(work, struct btrfs_device, work);
319 run_scheduled_bios(device);
320}
321
322static noinline int device_list_add(const char *path,
323 struct btrfs_super_block *disk_super,
324 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
325{
326 struct btrfs_device *device;
327 struct btrfs_fs_devices *fs_devices;
328 u64 found_transid = btrfs_super_generation(disk_super);
329 char *name;
330
331 fs_devices = find_fsid(disk_super->fsid);
332 if (!fs_devices) {
333 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
334 if (!fs_devices)
335 return -ENOMEM;
336 INIT_LIST_HEAD(&fs_devices->devices);
337 INIT_LIST_HEAD(&fs_devices->alloc_list);
338 list_add(&fs_devices->list, &fs_uuids);
339 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
340 fs_devices->latest_devid = devid;
341 fs_devices->latest_trans = found_transid;
342 mutex_init(&fs_devices->device_list_mutex);
343 device = NULL;
344 } else {
345 device = __find_device(&fs_devices->devices, devid,
346 disk_super->dev_item.uuid);
347 }
348 if (!device) {
349 if (fs_devices->opened)
350 return -EBUSY;
351
352 device = kzalloc(sizeof(*device), GFP_NOFS);
353 if (!device) {
354 /* we can safely leave the fs_devices entry around */
355 return -ENOMEM;
356 }
357 device->devid = devid;
358 device->work.func = pending_bios_fn;
359 memcpy(device->uuid, disk_super->dev_item.uuid,
360 BTRFS_UUID_SIZE);
361 spin_lock_init(&device->io_lock);
362 device->name = kstrdup(path, GFP_NOFS);
363 if (!device->name) {
364 kfree(device);
365 return -ENOMEM;
366 }
367 INIT_LIST_HEAD(&device->dev_alloc_list);
368
369 mutex_lock(&fs_devices->device_list_mutex);
370 list_add_rcu(&device->dev_list, &fs_devices->devices);
371 mutex_unlock(&fs_devices->device_list_mutex);
372
373 device->fs_devices = fs_devices;
374 fs_devices->num_devices++;
375 } else if (!device->name || strcmp(device->name, path)) {
376 name = kstrdup(path, GFP_NOFS);
377 if (!name)
378 return -ENOMEM;
379 kfree(device->name);
380 device->name = name;
381 if (device->missing) {
382 fs_devices->missing_devices--;
383 device->missing = 0;
384 }
385 }
386
387 if (found_transid > fs_devices->latest_trans) {
388 fs_devices->latest_devid = devid;
389 fs_devices->latest_trans = found_transid;
390 }
391 *fs_devices_ret = fs_devices;
392 return 0;
393}
394
395static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
396{
397 struct btrfs_fs_devices *fs_devices;
398 struct btrfs_device *device;
399 struct btrfs_device *orig_dev;
400
401 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
402 if (!fs_devices)
403 return ERR_PTR(-ENOMEM);
404
405 INIT_LIST_HEAD(&fs_devices->devices);
406 INIT_LIST_HEAD(&fs_devices->alloc_list);
407 INIT_LIST_HEAD(&fs_devices->list);
408 mutex_init(&fs_devices->device_list_mutex);
409 fs_devices->latest_devid = orig->latest_devid;
410 fs_devices->latest_trans = orig->latest_trans;
411 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
412
413 /* We have held the volume lock, it is safe to get the devices. */
414 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
415 device = kzalloc(sizeof(*device), GFP_NOFS);
416 if (!device)
417 goto error;
418
419 device->name = kstrdup(orig_dev->name, GFP_NOFS);
420 if (!device->name) {
421 kfree(device);
422 goto error;
423 }
424
425 device->devid = orig_dev->devid;
426 device->work.func = pending_bios_fn;
427 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
428 spin_lock_init(&device->io_lock);
429 INIT_LIST_HEAD(&device->dev_list);
430 INIT_LIST_HEAD(&device->dev_alloc_list);
431
432 list_add(&device->dev_list, &fs_devices->devices);
433 device->fs_devices = fs_devices;
434 fs_devices->num_devices++;
435 }
436 return fs_devices;
437error:
438 free_fs_devices(fs_devices);
439 return ERR_PTR(-ENOMEM);
440}
441
442int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
443{
444 struct btrfs_device *device, *next;
445
446 mutex_lock(&uuid_mutex);
447again:
448 /* This is the initialized path, it is safe to release the devices. */
449 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
450 if (device->in_fs_metadata)
451 continue;
452
453 if (device->bdev) {
454 blkdev_put(device->bdev, device->mode);
455 device->bdev = NULL;
456 fs_devices->open_devices--;
457 }
458 if (device->writeable) {
459 list_del_init(&device->dev_alloc_list);
460 device->writeable = 0;
461 fs_devices->rw_devices--;
462 }
463 list_del_init(&device->dev_list);
464 fs_devices->num_devices--;
465 kfree(device->name);
466 kfree(device);
467 }
468
469 if (fs_devices->seed) {
470 fs_devices = fs_devices->seed;
471 goto again;
472 }
473
474 mutex_unlock(&uuid_mutex);
475 return 0;
476}
477
478static void __free_device(struct work_struct *work)
479{
480 struct btrfs_device *device;
481
482 device = container_of(work, struct btrfs_device, rcu_work);
483
484 if (device->bdev)
485 blkdev_put(device->bdev, device->mode);
486
487 kfree(device->name);
488 kfree(device);
489}
490
491static void free_device(struct rcu_head *head)
492{
493 struct btrfs_device *device;
494
495 device = container_of(head, struct btrfs_device, rcu);
496
497 INIT_WORK(&device->rcu_work, __free_device);
498 schedule_work(&device->rcu_work);
499}
500
501static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
502{
503 struct btrfs_device *device;
504
505 if (--fs_devices->opened > 0)
506 return 0;
507
508 mutex_lock(&fs_devices->device_list_mutex);
509 list_for_each_entry(device, &fs_devices->devices, dev_list) {
510 struct btrfs_device *new_device;
511
512 if (device->bdev)
513 fs_devices->open_devices--;
514
515 if (device->writeable) {
516 list_del_init(&device->dev_alloc_list);
517 fs_devices->rw_devices--;
518 }
519
520 if (device->can_discard)
521 fs_devices->num_can_discard--;
522
523 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
524 BUG_ON(!new_device);
525 memcpy(new_device, device, sizeof(*new_device));
526 new_device->name = kstrdup(device->name, GFP_NOFS);
527 BUG_ON(device->name && !new_device->name);
528 new_device->bdev = NULL;
529 new_device->writeable = 0;
530 new_device->in_fs_metadata = 0;
531 new_device->can_discard = 0;
532 list_replace_rcu(&device->dev_list, &new_device->dev_list);
533
534 call_rcu(&device->rcu, free_device);
535 }
536 mutex_unlock(&fs_devices->device_list_mutex);
537
538 WARN_ON(fs_devices->open_devices);
539 WARN_ON(fs_devices->rw_devices);
540 fs_devices->opened = 0;
541 fs_devices->seeding = 0;
542
543 return 0;
544}
545
546int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
547{
548 struct btrfs_fs_devices *seed_devices = NULL;
549 int ret;
550
551 mutex_lock(&uuid_mutex);
552 ret = __btrfs_close_devices(fs_devices);
553 if (!fs_devices->opened) {
554 seed_devices = fs_devices->seed;
555 fs_devices->seed = NULL;
556 }
557 mutex_unlock(&uuid_mutex);
558
559 while (seed_devices) {
560 fs_devices = seed_devices;
561 seed_devices = fs_devices->seed;
562 __btrfs_close_devices(fs_devices);
563 free_fs_devices(fs_devices);
564 }
565 return ret;
566}
567
568static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
569 fmode_t flags, void *holder)
570{
571 struct request_queue *q;
572 struct block_device *bdev;
573 struct list_head *head = &fs_devices->devices;
574 struct btrfs_device *device;
575 struct block_device *latest_bdev = NULL;
576 struct buffer_head *bh;
577 struct btrfs_super_block *disk_super;
578 u64 latest_devid = 0;
579 u64 latest_transid = 0;
580 u64 devid;
581 int seeding = 1;
582 int ret = 0;
583
584 flags |= FMODE_EXCL;
585
586 list_for_each_entry(device, head, dev_list) {
587 if (device->bdev)
588 continue;
589 if (!device->name)
590 continue;
591
592 bdev = blkdev_get_by_path(device->name, flags, holder);
593 if (IS_ERR(bdev)) {
594 printk(KERN_INFO "open %s failed\n", device->name);
595 goto error;
596 }
597 set_blocksize(bdev, 4096);
598
599 bh = btrfs_read_dev_super(bdev);
600 if (!bh) {
601 ret = -EINVAL;
602 goto error_close;
603 }
604
605 disk_super = (struct btrfs_super_block *)bh->b_data;
606 devid = btrfs_stack_device_id(&disk_super->dev_item);
607 if (devid != device->devid)
608 goto error_brelse;
609
610 if (memcmp(device->uuid, disk_super->dev_item.uuid,
611 BTRFS_UUID_SIZE))
612 goto error_brelse;
613
614 device->generation = btrfs_super_generation(disk_super);
615 if (!latest_transid || device->generation > latest_transid) {
616 latest_devid = devid;
617 latest_transid = device->generation;
618 latest_bdev = bdev;
619 }
620
621 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
622 device->writeable = 0;
623 } else {
624 device->writeable = !bdev_read_only(bdev);
625 seeding = 0;
626 }
627
628 q = bdev_get_queue(bdev);
629 if (blk_queue_discard(q)) {
630 device->can_discard = 1;
631 fs_devices->num_can_discard++;
632 }
633
634 device->bdev = bdev;
635 device->in_fs_metadata = 0;
636 device->mode = flags;
637
638 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
639 fs_devices->rotating = 1;
640
641 fs_devices->open_devices++;
642 if (device->writeable) {
643 fs_devices->rw_devices++;
644 list_add(&device->dev_alloc_list,
645 &fs_devices->alloc_list);
646 }
647 brelse(bh);
648 continue;
649
650error_brelse:
651 brelse(bh);
652error_close:
653 blkdev_put(bdev, flags);
654error:
655 continue;
656 }
657 if (fs_devices->open_devices == 0) {
658 ret = -EIO;
659 goto out;
660 }
661 fs_devices->seeding = seeding;
662 fs_devices->opened = 1;
663 fs_devices->latest_bdev = latest_bdev;
664 fs_devices->latest_devid = latest_devid;
665 fs_devices->latest_trans = latest_transid;
666 fs_devices->total_rw_bytes = 0;
667out:
668 return ret;
669}
670
671int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
672 fmode_t flags, void *holder)
673{
674 int ret;
675
676 mutex_lock(&uuid_mutex);
677 if (fs_devices->opened) {
678 fs_devices->opened++;
679 ret = 0;
680 } else {
681 ret = __btrfs_open_devices(fs_devices, flags, holder);
682 }
683 mutex_unlock(&uuid_mutex);
684 return ret;
685}
686
687int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
688 struct btrfs_fs_devices **fs_devices_ret)
689{
690 struct btrfs_super_block *disk_super;
691 struct block_device *bdev;
692 struct buffer_head *bh;
693 int ret;
694 u64 devid;
695 u64 transid;
696
697 mutex_lock(&uuid_mutex);
698
699 flags |= FMODE_EXCL;
700 bdev = blkdev_get_by_path(path, flags, holder);
701
702 if (IS_ERR(bdev)) {
703 ret = PTR_ERR(bdev);
704 goto error;
705 }
706
707 ret = set_blocksize(bdev, 4096);
708 if (ret)
709 goto error_close;
710 bh = btrfs_read_dev_super(bdev);
711 if (!bh) {
712 ret = -EINVAL;
713 goto error_close;
714 }
715 disk_super = (struct btrfs_super_block *)bh->b_data;
716 devid = btrfs_stack_device_id(&disk_super->dev_item);
717 transid = btrfs_super_generation(disk_super);
718 if (disk_super->label[0])
719 printk(KERN_INFO "device label %s ", disk_super->label);
720 else
721 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
722 printk(KERN_CONT "devid %llu transid %llu %s\n",
723 (unsigned long long)devid, (unsigned long long)transid, path);
724 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
725
726 brelse(bh);
727error_close:
728 blkdev_put(bdev, flags);
729error:
730 mutex_unlock(&uuid_mutex);
731 return ret;
732}
733
734/* helper to account the used device space in the range */
735int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
736 u64 end, u64 *length)
737{
738 struct btrfs_key key;
739 struct btrfs_root *root = device->dev_root;
740 struct btrfs_dev_extent *dev_extent;
741 struct btrfs_path *path;
742 u64 extent_end;
743 int ret;
744 int slot;
745 struct extent_buffer *l;
746
747 *length = 0;
748
749 if (start >= device->total_bytes)
750 return 0;
751
752 path = btrfs_alloc_path();
753 if (!path)
754 return -ENOMEM;
755 path->reada = 2;
756
757 key.objectid = device->devid;
758 key.offset = start;
759 key.type = BTRFS_DEV_EXTENT_KEY;
760
761 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
762 if (ret < 0)
763 goto out;
764 if (ret > 0) {
765 ret = btrfs_previous_item(root, path, key.objectid, key.type);
766 if (ret < 0)
767 goto out;
768 }
769
770 while (1) {
771 l = path->nodes[0];
772 slot = path->slots[0];
773 if (slot >= btrfs_header_nritems(l)) {
774 ret = btrfs_next_leaf(root, path);
775 if (ret == 0)
776 continue;
777 if (ret < 0)
778 goto out;
779
780 break;
781 }
782 btrfs_item_key_to_cpu(l, &key, slot);
783
784 if (key.objectid < device->devid)
785 goto next;
786
787 if (key.objectid > device->devid)
788 break;
789
790 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
791 goto next;
792
793 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
794 extent_end = key.offset + btrfs_dev_extent_length(l,
795 dev_extent);
796 if (key.offset <= start && extent_end > end) {
797 *length = end - start + 1;
798 break;
799 } else if (key.offset <= start && extent_end > start)
800 *length += extent_end - start;
801 else if (key.offset > start && extent_end <= end)
802 *length += extent_end - key.offset;
803 else if (key.offset > start && key.offset <= end) {
804 *length += end - key.offset + 1;
805 break;
806 } else if (key.offset > end)
807 break;
808
809next:
810 path->slots[0]++;
811 }
812 ret = 0;
813out:
814 btrfs_free_path(path);
815 return ret;
816}
817
818/*
819 * find_free_dev_extent - find free space in the specified device
820 * @trans: transaction handler
821 * @device: the device which we search the free space in
822 * @num_bytes: the size of the free space that we need
823 * @start: store the start of the free space.
824 * @len: the size of the free space. that we find, or the size of the max
825 * free space if we don't find suitable free space
826 *
827 * this uses a pretty simple search, the expectation is that it is
828 * called very infrequently and that a given device has a small number
829 * of extents
830 *
831 * @start is used to store the start of the free space if we find. But if we
832 * don't find suitable free space, it will be used to store the start position
833 * of the max free space.
834 *
835 * @len is used to store the size of the free space that we find.
836 * But if we don't find suitable free space, it is used to store the size of
837 * the max free space.
838 */
839int find_free_dev_extent(struct btrfs_trans_handle *trans,
840 struct btrfs_device *device, u64 num_bytes,
841 u64 *start, u64 *len)
842{
843 struct btrfs_key key;
844 struct btrfs_root *root = device->dev_root;
845 struct btrfs_dev_extent *dev_extent;
846 struct btrfs_path *path;
847 u64 hole_size;
848 u64 max_hole_start;
849 u64 max_hole_size;
850 u64 extent_end;
851 u64 search_start;
852 u64 search_end = device->total_bytes;
853 int ret;
854 int slot;
855 struct extent_buffer *l;
856
857 /* FIXME use last free of some kind */
858
859 /* we don't want to overwrite the superblock on the drive,
860 * so we make sure to start at an offset of at least 1MB
861 */
862 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
863
864 max_hole_start = search_start;
865 max_hole_size = 0;
866 hole_size = 0;
867
868 if (search_start >= search_end) {
869 ret = -ENOSPC;
870 goto error;
871 }
872
873 path = btrfs_alloc_path();
874 if (!path) {
875 ret = -ENOMEM;
876 goto error;
877 }
878 path->reada = 2;
879
880 key.objectid = device->devid;
881 key.offset = search_start;
882 key.type = BTRFS_DEV_EXTENT_KEY;
883
884 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
885 if (ret < 0)
886 goto out;
887 if (ret > 0) {
888 ret = btrfs_previous_item(root, path, key.objectid, key.type);
889 if (ret < 0)
890 goto out;
891 }
892
893 while (1) {
894 l = path->nodes[0];
895 slot = path->slots[0];
896 if (slot >= btrfs_header_nritems(l)) {
897 ret = btrfs_next_leaf(root, path);
898 if (ret == 0)
899 continue;
900 if (ret < 0)
901 goto out;
902
903 break;
904 }
905 btrfs_item_key_to_cpu(l, &key, slot);
906
907 if (key.objectid < device->devid)
908 goto next;
909
910 if (key.objectid > device->devid)
911 break;
912
913 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
914 goto next;
915
916 if (key.offset > search_start) {
917 hole_size = key.offset - search_start;
918
919 if (hole_size > max_hole_size) {
920 max_hole_start = search_start;
921 max_hole_size = hole_size;
922 }
923
924 /*
925 * If this free space is greater than which we need,
926 * it must be the max free space that we have found
927 * until now, so max_hole_start must point to the start
928 * of this free space and the length of this free space
929 * is stored in max_hole_size. Thus, we return
930 * max_hole_start and max_hole_size and go back to the
931 * caller.
932 */
933 if (hole_size >= num_bytes) {
934 ret = 0;
935 goto out;
936 }
937 }
938
939 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
940 extent_end = key.offset + btrfs_dev_extent_length(l,
941 dev_extent);
942 if (extent_end > search_start)
943 search_start = extent_end;
944next:
945 path->slots[0]++;
946 cond_resched();
947 }
948
949 /*
950 * At this point, search_start should be the end of
951 * allocated dev extents, and when shrinking the device,
952 * search_end may be smaller than search_start.
953 */
954 if (search_end > search_start)
955 hole_size = search_end - search_start;
956
957 if (hole_size > max_hole_size) {
958 max_hole_start = search_start;
959 max_hole_size = hole_size;
960 }
961
962 /* See above. */
963 if (hole_size < num_bytes)
964 ret = -ENOSPC;
965 else
966 ret = 0;
967
968out:
969 btrfs_free_path(path);
970error:
971 *start = max_hole_start;
972 if (len)
973 *len = max_hole_size;
974 return ret;
975}
976
977static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
978 struct btrfs_device *device,
979 u64 start)
980{
981 int ret;
982 struct btrfs_path *path;
983 struct btrfs_root *root = device->dev_root;
984 struct btrfs_key key;
985 struct btrfs_key found_key;
986 struct extent_buffer *leaf = NULL;
987 struct btrfs_dev_extent *extent = NULL;
988
989 path = btrfs_alloc_path();
990 if (!path)
991 return -ENOMEM;
992
993 key.objectid = device->devid;
994 key.offset = start;
995 key.type = BTRFS_DEV_EXTENT_KEY;
996
997 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
998 if (ret > 0) {
999 ret = btrfs_previous_item(root, path, key.objectid,
1000 BTRFS_DEV_EXTENT_KEY);
1001 if (ret)
1002 goto out;
1003 leaf = path->nodes[0];
1004 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1005 extent = btrfs_item_ptr(leaf, path->slots[0],
1006 struct btrfs_dev_extent);
1007 BUG_ON(found_key.offset > start || found_key.offset +
1008 btrfs_dev_extent_length(leaf, extent) < start);
1009 } else if (ret == 0) {
1010 leaf = path->nodes[0];
1011 extent = btrfs_item_ptr(leaf, path->slots[0],
1012 struct btrfs_dev_extent);
1013 }
1014 BUG_ON(ret);
1015
1016 if (device->bytes_used > 0)
1017 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
1018 ret = btrfs_del_item(trans, root, path);
1019
1020out:
1021 btrfs_free_path(path);
1022 return ret;
1023}
1024
1025int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1026 struct btrfs_device *device,
1027 u64 chunk_tree, u64 chunk_objectid,
1028 u64 chunk_offset, u64 start, u64 num_bytes)
1029{
1030 int ret;
1031 struct btrfs_path *path;
1032 struct btrfs_root *root = device->dev_root;
1033 struct btrfs_dev_extent *extent;
1034 struct extent_buffer *leaf;
1035 struct btrfs_key key;
1036
1037 WARN_ON(!device->in_fs_metadata);
1038 path = btrfs_alloc_path();
1039 if (!path)
1040 return -ENOMEM;
1041
1042 key.objectid = device->devid;
1043 key.offset = start;
1044 key.type = BTRFS_DEV_EXTENT_KEY;
1045 ret = btrfs_insert_empty_item(trans, root, path, &key,
1046 sizeof(*extent));
1047 BUG_ON(ret);
1048
1049 leaf = path->nodes[0];
1050 extent = btrfs_item_ptr(leaf, path->slots[0],
1051 struct btrfs_dev_extent);
1052 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1053 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1054 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1055
1056 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1057 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1058 BTRFS_UUID_SIZE);
1059
1060 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1061 btrfs_mark_buffer_dirty(leaf);
1062 btrfs_free_path(path);
1063 return ret;
1064}
1065
1066static noinline int find_next_chunk(struct btrfs_root *root,
1067 u64 objectid, u64 *offset)
1068{
1069 struct btrfs_path *path;
1070 int ret;
1071 struct btrfs_key key;
1072 struct btrfs_chunk *chunk;
1073 struct btrfs_key found_key;
1074
1075 path = btrfs_alloc_path();
1076 if (!path)
1077 return -ENOMEM;
1078
1079 key.objectid = objectid;
1080 key.offset = (u64)-1;
1081 key.type = BTRFS_CHUNK_ITEM_KEY;
1082
1083 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1084 if (ret < 0)
1085 goto error;
1086
1087 BUG_ON(ret == 0);
1088
1089 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1090 if (ret) {
1091 *offset = 0;
1092 } else {
1093 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1094 path->slots[0]);
1095 if (found_key.objectid != objectid)
1096 *offset = 0;
1097 else {
1098 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1099 struct btrfs_chunk);
1100 *offset = found_key.offset +
1101 btrfs_chunk_length(path->nodes[0], chunk);
1102 }
1103 }
1104 ret = 0;
1105error:
1106 btrfs_free_path(path);
1107 return ret;
1108}
1109
1110static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1111{
1112 int ret;
1113 struct btrfs_key key;
1114 struct btrfs_key found_key;
1115 struct btrfs_path *path;
1116
1117 root = root->fs_info->chunk_root;
1118
1119 path = btrfs_alloc_path();
1120 if (!path)
1121 return -ENOMEM;
1122
1123 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1124 key.type = BTRFS_DEV_ITEM_KEY;
1125 key.offset = (u64)-1;
1126
1127 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1128 if (ret < 0)
1129 goto error;
1130
1131 BUG_ON(ret == 0);
1132
1133 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1134 BTRFS_DEV_ITEM_KEY);
1135 if (ret) {
1136 *objectid = 1;
1137 } else {
1138 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1139 path->slots[0]);
1140 *objectid = found_key.offset + 1;
1141 }
1142 ret = 0;
1143error:
1144 btrfs_free_path(path);
1145 return ret;
1146}
1147
1148/*
1149 * the device information is stored in the chunk root
1150 * the btrfs_device struct should be fully filled in
1151 */
1152int btrfs_add_device(struct btrfs_trans_handle *trans,
1153 struct btrfs_root *root,
1154 struct btrfs_device *device)
1155{
1156 int ret;
1157 struct btrfs_path *path;
1158 struct btrfs_dev_item *dev_item;
1159 struct extent_buffer *leaf;
1160 struct btrfs_key key;
1161 unsigned long ptr;
1162
1163 root = root->fs_info->chunk_root;
1164
1165 path = btrfs_alloc_path();
1166 if (!path)
1167 return -ENOMEM;
1168
1169 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1170 key.type = BTRFS_DEV_ITEM_KEY;
1171 key.offset = device->devid;
1172
1173 ret = btrfs_insert_empty_item(trans, root, path, &key,
1174 sizeof(*dev_item));
1175 if (ret)
1176 goto out;
1177
1178 leaf = path->nodes[0];
1179 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1180
1181 btrfs_set_device_id(leaf, dev_item, device->devid);
1182 btrfs_set_device_generation(leaf, dev_item, 0);
1183 btrfs_set_device_type(leaf, dev_item, device->type);
1184 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1185 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1186 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1187 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1188 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1189 btrfs_set_device_group(leaf, dev_item, 0);
1190 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1191 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1192 btrfs_set_device_start_offset(leaf, dev_item, 0);
1193
1194 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1195 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1196 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1197 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1198 btrfs_mark_buffer_dirty(leaf);
1199
1200 ret = 0;
1201out:
1202 btrfs_free_path(path);
1203 return ret;
1204}
1205
1206static int btrfs_rm_dev_item(struct btrfs_root *root,
1207 struct btrfs_device *device)
1208{
1209 int ret;
1210 struct btrfs_path *path;
1211 struct btrfs_key key;
1212 struct btrfs_trans_handle *trans;
1213
1214 root = root->fs_info->chunk_root;
1215
1216 path = btrfs_alloc_path();
1217 if (!path)
1218 return -ENOMEM;
1219
1220 trans = btrfs_start_transaction(root, 0);
1221 if (IS_ERR(trans)) {
1222 btrfs_free_path(path);
1223 return PTR_ERR(trans);
1224 }
1225 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1226 key.type = BTRFS_DEV_ITEM_KEY;
1227 key.offset = device->devid;
1228 lock_chunks(root);
1229
1230 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1231 if (ret < 0)
1232 goto out;
1233
1234 if (ret > 0) {
1235 ret = -ENOENT;
1236 goto out;
1237 }
1238
1239 ret = btrfs_del_item(trans, root, path);
1240 if (ret)
1241 goto out;
1242out:
1243 btrfs_free_path(path);
1244 unlock_chunks(root);
1245 btrfs_commit_transaction(trans, root);
1246 return ret;
1247}
1248
1249int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1250{
1251 struct btrfs_device *device;
1252 struct btrfs_device *next_device;
1253 struct block_device *bdev;
1254 struct buffer_head *bh = NULL;
1255 struct btrfs_super_block *disk_super;
1256 struct btrfs_fs_devices *cur_devices;
1257 u64 all_avail;
1258 u64 devid;
1259 u64 num_devices;
1260 u8 *dev_uuid;
1261 int ret = 0;
1262 bool clear_super = false;
1263
1264 mutex_lock(&uuid_mutex);
1265 mutex_lock(&root->fs_info->volume_mutex);
1266
1267 all_avail = root->fs_info->avail_data_alloc_bits |
1268 root->fs_info->avail_system_alloc_bits |
1269 root->fs_info->avail_metadata_alloc_bits;
1270
1271 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1272 root->fs_info->fs_devices->num_devices <= 4) {
1273 printk(KERN_ERR "btrfs: unable to go below four devices "
1274 "on raid10\n");
1275 ret = -EINVAL;
1276 goto out;
1277 }
1278
1279 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1280 root->fs_info->fs_devices->num_devices <= 2) {
1281 printk(KERN_ERR "btrfs: unable to go below two "
1282 "devices on raid1\n");
1283 ret = -EINVAL;
1284 goto out;
1285 }
1286
1287 if (strcmp(device_path, "missing") == 0) {
1288 struct list_head *devices;
1289 struct btrfs_device *tmp;
1290
1291 device = NULL;
1292 devices = &root->fs_info->fs_devices->devices;
1293 /*
1294 * It is safe to read the devices since the volume_mutex
1295 * is held.
1296 */
1297 list_for_each_entry(tmp, devices, dev_list) {
1298 if (tmp->in_fs_metadata && !tmp->bdev) {
1299 device = tmp;
1300 break;
1301 }
1302 }
1303 bdev = NULL;
1304 bh = NULL;
1305 disk_super = NULL;
1306 if (!device) {
1307 printk(KERN_ERR "btrfs: no missing devices found to "
1308 "remove\n");
1309 goto out;
1310 }
1311 } else {
1312 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1313 root->fs_info->bdev_holder);
1314 if (IS_ERR(bdev)) {
1315 ret = PTR_ERR(bdev);
1316 goto out;
1317 }
1318
1319 set_blocksize(bdev, 4096);
1320 bh = btrfs_read_dev_super(bdev);
1321 if (!bh) {
1322 ret = -EINVAL;
1323 goto error_close;
1324 }
1325 disk_super = (struct btrfs_super_block *)bh->b_data;
1326 devid = btrfs_stack_device_id(&disk_super->dev_item);
1327 dev_uuid = disk_super->dev_item.uuid;
1328 device = btrfs_find_device(root, devid, dev_uuid,
1329 disk_super->fsid);
1330 if (!device) {
1331 ret = -ENOENT;
1332 goto error_brelse;
1333 }
1334 }
1335
1336 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1337 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1338 "device\n");
1339 ret = -EINVAL;
1340 goto error_brelse;
1341 }
1342
1343 if (device->writeable) {
1344 lock_chunks(root);
1345 list_del_init(&device->dev_alloc_list);
1346 unlock_chunks(root);
1347 root->fs_info->fs_devices->rw_devices--;
1348 clear_super = true;
1349 }
1350
1351 ret = btrfs_shrink_device(device, 0);
1352 if (ret)
1353 goto error_undo;
1354
1355 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1356 if (ret)
1357 goto error_undo;
1358
1359 device->in_fs_metadata = 0;
1360 btrfs_scrub_cancel_dev(root, device);
1361
1362 /*
1363 * the device list mutex makes sure that we don't change
1364 * the device list while someone else is writing out all
1365 * the device supers.
1366 */
1367
1368 cur_devices = device->fs_devices;
1369 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1370 list_del_rcu(&device->dev_list);
1371
1372 device->fs_devices->num_devices--;
1373
1374 if (device->missing)
1375 root->fs_info->fs_devices->missing_devices--;
1376
1377 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1378 struct btrfs_device, dev_list);
1379 if (device->bdev == root->fs_info->sb->s_bdev)
1380 root->fs_info->sb->s_bdev = next_device->bdev;
1381 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1382 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1383
1384 if (device->bdev)
1385 device->fs_devices->open_devices--;
1386
1387 call_rcu(&device->rcu, free_device);
1388 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1389
1390 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1391 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1392
1393 if (cur_devices->open_devices == 0) {
1394 struct btrfs_fs_devices *fs_devices;
1395 fs_devices = root->fs_info->fs_devices;
1396 while (fs_devices) {
1397 if (fs_devices->seed == cur_devices)
1398 break;
1399 fs_devices = fs_devices->seed;
1400 }
1401 fs_devices->seed = cur_devices->seed;
1402 cur_devices->seed = NULL;
1403 lock_chunks(root);
1404 __btrfs_close_devices(cur_devices);
1405 unlock_chunks(root);
1406 free_fs_devices(cur_devices);
1407 }
1408
1409 /*
1410 * at this point, the device is zero sized. We want to
1411 * remove it from the devices list and zero out the old super
1412 */
1413 if (clear_super) {
1414 /* make sure this device isn't detected as part of
1415 * the FS anymore
1416 */
1417 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1418 set_buffer_dirty(bh);
1419 sync_dirty_buffer(bh);
1420 }
1421
1422 ret = 0;
1423
1424error_brelse:
1425 brelse(bh);
1426error_close:
1427 if (bdev)
1428 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1429out:
1430 mutex_unlock(&root->fs_info->volume_mutex);
1431 mutex_unlock(&uuid_mutex);
1432 return ret;
1433error_undo:
1434 if (device->writeable) {
1435 lock_chunks(root);
1436 list_add(&device->dev_alloc_list,
1437 &root->fs_info->fs_devices->alloc_list);
1438 unlock_chunks(root);
1439 root->fs_info->fs_devices->rw_devices++;
1440 }
1441 goto error_brelse;
1442}
1443
1444/*
1445 * does all the dirty work required for changing file system's UUID.
1446 */
1447static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1448 struct btrfs_root *root)
1449{
1450 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1451 struct btrfs_fs_devices *old_devices;
1452 struct btrfs_fs_devices *seed_devices;
1453 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1454 struct btrfs_device *device;
1455 u64 super_flags;
1456
1457 BUG_ON(!mutex_is_locked(&uuid_mutex));
1458 if (!fs_devices->seeding)
1459 return -EINVAL;
1460
1461 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1462 if (!seed_devices)
1463 return -ENOMEM;
1464
1465 old_devices = clone_fs_devices(fs_devices);
1466 if (IS_ERR(old_devices)) {
1467 kfree(seed_devices);
1468 return PTR_ERR(old_devices);
1469 }
1470
1471 list_add(&old_devices->list, &fs_uuids);
1472
1473 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1474 seed_devices->opened = 1;
1475 INIT_LIST_HEAD(&seed_devices->devices);
1476 INIT_LIST_HEAD(&seed_devices->alloc_list);
1477 mutex_init(&seed_devices->device_list_mutex);
1478
1479 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1480 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1481 synchronize_rcu);
1482 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1483
1484 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1485 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1486 device->fs_devices = seed_devices;
1487 }
1488
1489 fs_devices->seeding = 0;
1490 fs_devices->num_devices = 0;
1491 fs_devices->open_devices = 0;
1492 fs_devices->seed = seed_devices;
1493
1494 generate_random_uuid(fs_devices->fsid);
1495 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1496 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1497 super_flags = btrfs_super_flags(disk_super) &
1498 ~BTRFS_SUPER_FLAG_SEEDING;
1499 btrfs_set_super_flags(disk_super, super_flags);
1500
1501 return 0;
1502}
1503
1504/*
1505 * strore the expected generation for seed devices in device items.
1506 */
1507static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1508 struct btrfs_root *root)
1509{
1510 struct btrfs_path *path;
1511 struct extent_buffer *leaf;
1512 struct btrfs_dev_item *dev_item;
1513 struct btrfs_device *device;
1514 struct btrfs_key key;
1515 u8 fs_uuid[BTRFS_UUID_SIZE];
1516 u8 dev_uuid[BTRFS_UUID_SIZE];
1517 u64 devid;
1518 int ret;
1519
1520 path = btrfs_alloc_path();
1521 if (!path)
1522 return -ENOMEM;
1523
1524 root = root->fs_info->chunk_root;
1525 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1526 key.offset = 0;
1527 key.type = BTRFS_DEV_ITEM_KEY;
1528
1529 while (1) {
1530 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1531 if (ret < 0)
1532 goto error;
1533
1534 leaf = path->nodes[0];
1535next_slot:
1536 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1537 ret = btrfs_next_leaf(root, path);
1538 if (ret > 0)
1539 break;
1540 if (ret < 0)
1541 goto error;
1542 leaf = path->nodes[0];
1543 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1544 btrfs_release_path(path);
1545 continue;
1546 }
1547
1548 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1549 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1550 key.type != BTRFS_DEV_ITEM_KEY)
1551 break;
1552
1553 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1554 struct btrfs_dev_item);
1555 devid = btrfs_device_id(leaf, dev_item);
1556 read_extent_buffer(leaf, dev_uuid,
1557 (unsigned long)btrfs_device_uuid(dev_item),
1558 BTRFS_UUID_SIZE);
1559 read_extent_buffer(leaf, fs_uuid,
1560 (unsigned long)btrfs_device_fsid(dev_item),
1561 BTRFS_UUID_SIZE);
1562 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1563 BUG_ON(!device);
1564
1565 if (device->fs_devices->seeding) {
1566 btrfs_set_device_generation(leaf, dev_item,
1567 device->generation);
1568 btrfs_mark_buffer_dirty(leaf);
1569 }
1570
1571 path->slots[0]++;
1572 goto next_slot;
1573 }
1574 ret = 0;
1575error:
1576 btrfs_free_path(path);
1577 return ret;
1578}
1579
1580int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1581{
1582 struct request_queue *q;
1583 struct btrfs_trans_handle *trans;
1584 struct btrfs_device *device;
1585 struct block_device *bdev;
1586 struct list_head *devices;
1587 struct super_block *sb = root->fs_info->sb;
1588 u64 total_bytes;
1589 int seeding_dev = 0;
1590 int ret = 0;
1591
1592 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1593 return -EINVAL;
1594
1595 bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
1596 root->fs_info->bdev_holder);
1597 if (IS_ERR(bdev))
1598 return PTR_ERR(bdev);
1599
1600 if (root->fs_info->fs_devices->seeding) {
1601 seeding_dev = 1;
1602 down_write(&sb->s_umount);
1603 mutex_lock(&uuid_mutex);
1604 }
1605
1606 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1607 mutex_lock(&root->fs_info->volume_mutex);
1608
1609 devices = &root->fs_info->fs_devices->devices;
1610 /*
1611 * we have the volume lock, so we don't need the extra
1612 * device list mutex while reading the list here.
1613 */
1614 list_for_each_entry(device, devices, dev_list) {
1615 if (device->bdev == bdev) {
1616 ret = -EEXIST;
1617 goto error;
1618 }
1619 }
1620
1621 device = kzalloc(sizeof(*device), GFP_NOFS);
1622 if (!device) {
1623 /* we can safely leave the fs_devices entry around */
1624 ret = -ENOMEM;
1625 goto error;
1626 }
1627
1628 device->name = kstrdup(device_path, GFP_NOFS);
1629 if (!device->name) {
1630 kfree(device);
1631 ret = -ENOMEM;
1632 goto error;
1633 }
1634
1635 ret = find_next_devid(root, &device->devid);
1636 if (ret) {
1637 kfree(device->name);
1638 kfree(device);
1639 goto error;
1640 }
1641
1642 trans = btrfs_start_transaction(root, 0);
1643 if (IS_ERR(trans)) {
1644 kfree(device->name);
1645 kfree(device);
1646 ret = PTR_ERR(trans);
1647 goto error;
1648 }
1649
1650 lock_chunks(root);
1651
1652 q = bdev_get_queue(bdev);
1653 if (blk_queue_discard(q))
1654 device->can_discard = 1;
1655 device->writeable = 1;
1656 device->work.func = pending_bios_fn;
1657 generate_random_uuid(device->uuid);
1658 spin_lock_init(&device->io_lock);
1659 device->generation = trans->transid;
1660 device->io_width = root->sectorsize;
1661 device->io_align = root->sectorsize;
1662 device->sector_size = root->sectorsize;
1663 device->total_bytes = i_size_read(bdev->bd_inode);
1664 device->disk_total_bytes = device->total_bytes;
1665 device->dev_root = root->fs_info->dev_root;
1666 device->bdev = bdev;
1667 device->in_fs_metadata = 1;
1668 device->mode = FMODE_EXCL;
1669 set_blocksize(device->bdev, 4096);
1670
1671 if (seeding_dev) {
1672 sb->s_flags &= ~MS_RDONLY;
1673 ret = btrfs_prepare_sprout(trans, root);
1674 BUG_ON(ret);
1675 }
1676
1677 device->fs_devices = root->fs_info->fs_devices;
1678
1679 /*
1680 * we don't want write_supers to jump in here with our device
1681 * half setup
1682 */
1683 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1684 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1685 list_add(&device->dev_alloc_list,
1686 &root->fs_info->fs_devices->alloc_list);
1687 root->fs_info->fs_devices->num_devices++;
1688 root->fs_info->fs_devices->open_devices++;
1689 root->fs_info->fs_devices->rw_devices++;
1690 if (device->can_discard)
1691 root->fs_info->fs_devices->num_can_discard++;
1692 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1693
1694 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1695 root->fs_info->fs_devices->rotating = 1;
1696
1697 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1698 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1699 total_bytes + device->total_bytes);
1700
1701 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1702 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1703 total_bytes + 1);
1704 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1705
1706 if (seeding_dev) {
1707 ret = init_first_rw_device(trans, root, device);
1708 BUG_ON(ret);
1709 ret = btrfs_finish_sprout(trans, root);
1710 BUG_ON(ret);
1711 } else {
1712 ret = btrfs_add_device(trans, root, device);
1713 }
1714
1715 /*
1716 * we've got more storage, clear any full flags on the space
1717 * infos
1718 */
1719 btrfs_clear_space_info_full(root->fs_info);
1720
1721 unlock_chunks(root);
1722 btrfs_commit_transaction(trans, root);
1723
1724 if (seeding_dev) {
1725 mutex_unlock(&uuid_mutex);
1726 up_write(&sb->s_umount);
1727
1728 ret = btrfs_relocate_sys_chunks(root);
1729 BUG_ON(ret);
1730 }
1731out:
1732 mutex_unlock(&root->fs_info->volume_mutex);
1733 return ret;
1734error:
1735 blkdev_put(bdev, FMODE_EXCL);
1736 if (seeding_dev) {
1737 mutex_unlock(&uuid_mutex);
1738 up_write(&sb->s_umount);
1739 }
1740 goto out;
1741}
1742
1743static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1744 struct btrfs_device *device)
1745{
1746 int ret;
1747 struct btrfs_path *path;
1748 struct btrfs_root *root;
1749 struct btrfs_dev_item *dev_item;
1750 struct extent_buffer *leaf;
1751 struct btrfs_key key;
1752
1753 root = device->dev_root->fs_info->chunk_root;
1754
1755 path = btrfs_alloc_path();
1756 if (!path)
1757 return -ENOMEM;
1758
1759 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1760 key.type = BTRFS_DEV_ITEM_KEY;
1761 key.offset = device->devid;
1762
1763 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1764 if (ret < 0)
1765 goto out;
1766
1767 if (ret > 0) {
1768 ret = -ENOENT;
1769 goto out;
1770 }
1771
1772 leaf = path->nodes[0];
1773 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1774
1775 btrfs_set_device_id(leaf, dev_item, device->devid);
1776 btrfs_set_device_type(leaf, dev_item, device->type);
1777 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1778 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1779 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1780 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1781 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1782 btrfs_mark_buffer_dirty(leaf);
1783
1784out:
1785 btrfs_free_path(path);
1786 return ret;
1787}
1788
1789static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1790 struct btrfs_device *device, u64 new_size)
1791{
1792 struct btrfs_super_block *super_copy =
1793 &device->dev_root->fs_info->super_copy;
1794 u64 old_total = btrfs_super_total_bytes(super_copy);
1795 u64 diff = new_size - device->total_bytes;
1796
1797 if (!device->writeable)
1798 return -EACCES;
1799 if (new_size <= device->total_bytes)
1800 return -EINVAL;
1801
1802 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1803 device->fs_devices->total_rw_bytes += diff;
1804
1805 device->total_bytes = new_size;
1806 device->disk_total_bytes = new_size;
1807 btrfs_clear_space_info_full(device->dev_root->fs_info);
1808
1809 return btrfs_update_device(trans, device);
1810}
1811
1812int btrfs_grow_device(struct btrfs_trans_handle *trans,
1813 struct btrfs_device *device, u64 new_size)
1814{
1815 int ret;
1816 lock_chunks(device->dev_root);
1817 ret = __btrfs_grow_device(trans, device, new_size);
1818 unlock_chunks(device->dev_root);
1819 return ret;
1820}
1821
1822static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1823 struct btrfs_root *root,
1824 u64 chunk_tree, u64 chunk_objectid,
1825 u64 chunk_offset)
1826{
1827 int ret;
1828 struct btrfs_path *path;
1829 struct btrfs_key key;
1830
1831 root = root->fs_info->chunk_root;
1832 path = btrfs_alloc_path();
1833 if (!path)
1834 return -ENOMEM;
1835
1836 key.objectid = chunk_objectid;
1837 key.offset = chunk_offset;
1838 key.type = BTRFS_CHUNK_ITEM_KEY;
1839
1840 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1841 BUG_ON(ret);
1842
1843 ret = btrfs_del_item(trans, root, path);
1844
1845 btrfs_free_path(path);
1846 return ret;
1847}
1848
1849static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1850 chunk_offset)
1851{
1852 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1853 struct btrfs_disk_key *disk_key;
1854 struct btrfs_chunk *chunk;
1855 u8 *ptr;
1856 int ret = 0;
1857 u32 num_stripes;
1858 u32 array_size;
1859 u32 len = 0;
1860 u32 cur;
1861 struct btrfs_key key;
1862
1863 array_size = btrfs_super_sys_array_size(super_copy);
1864
1865 ptr = super_copy->sys_chunk_array;
1866 cur = 0;
1867
1868 while (cur < array_size) {
1869 disk_key = (struct btrfs_disk_key *)ptr;
1870 btrfs_disk_key_to_cpu(&key, disk_key);
1871
1872 len = sizeof(*disk_key);
1873
1874 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1875 chunk = (struct btrfs_chunk *)(ptr + len);
1876 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1877 len += btrfs_chunk_item_size(num_stripes);
1878 } else {
1879 ret = -EIO;
1880 break;
1881 }
1882 if (key.objectid == chunk_objectid &&
1883 key.offset == chunk_offset) {
1884 memmove(ptr, ptr + len, array_size - (cur + len));
1885 array_size -= len;
1886 btrfs_set_super_sys_array_size(super_copy, array_size);
1887 } else {
1888 ptr += len;
1889 cur += len;
1890 }
1891 }
1892 return ret;
1893}
1894
1895static int btrfs_relocate_chunk(struct btrfs_root *root,
1896 u64 chunk_tree, u64 chunk_objectid,
1897 u64 chunk_offset)
1898{
1899 struct extent_map_tree *em_tree;
1900 struct btrfs_root *extent_root;
1901 struct btrfs_trans_handle *trans;
1902 struct extent_map *em;
1903 struct map_lookup *map;
1904 int ret;
1905 int i;
1906
1907 root = root->fs_info->chunk_root;
1908 extent_root = root->fs_info->extent_root;
1909 em_tree = &root->fs_info->mapping_tree.map_tree;
1910
1911 ret = btrfs_can_relocate(extent_root, chunk_offset);
1912 if (ret)
1913 return -ENOSPC;
1914
1915 /* step one, relocate all the extents inside this chunk */
1916 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1917 if (ret)
1918 return ret;
1919
1920 trans = btrfs_start_transaction(root, 0);
1921 BUG_ON(IS_ERR(trans));
1922
1923 lock_chunks(root);
1924
1925 /*
1926 * step two, delete the device extents and the
1927 * chunk tree entries
1928 */
1929 read_lock(&em_tree->lock);
1930 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1931 read_unlock(&em_tree->lock);
1932
1933 BUG_ON(em->start > chunk_offset ||
1934 em->start + em->len < chunk_offset);
1935 map = (struct map_lookup *)em->bdev;
1936
1937 for (i = 0; i < map->num_stripes; i++) {
1938 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1939 map->stripes[i].physical);
1940 BUG_ON(ret);
1941
1942 if (map->stripes[i].dev) {
1943 ret = btrfs_update_device(trans, map->stripes[i].dev);
1944 BUG_ON(ret);
1945 }
1946 }
1947 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1948 chunk_offset);
1949
1950 BUG_ON(ret);
1951
1952 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1953
1954 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1955 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1956 BUG_ON(ret);
1957 }
1958
1959 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1960 BUG_ON(ret);
1961
1962 write_lock(&em_tree->lock);
1963 remove_extent_mapping(em_tree, em);
1964 write_unlock(&em_tree->lock);
1965
1966 kfree(map);
1967 em->bdev = NULL;
1968
1969 /* once for the tree */
1970 free_extent_map(em);
1971 /* once for us */
1972 free_extent_map(em);
1973
1974 unlock_chunks(root);
1975 btrfs_end_transaction(trans, root);
1976 return 0;
1977}
1978
1979static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1980{
1981 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1982 struct btrfs_path *path;
1983 struct extent_buffer *leaf;
1984 struct btrfs_chunk *chunk;
1985 struct btrfs_key key;
1986 struct btrfs_key found_key;
1987 u64 chunk_tree = chunk_root->root_key.objectid;
1988 u64 chunk_type;
1989 bool retried = false;
1990 int failed = 0;
1991 int ret;
1992
1993 path = btrfs_alloc_path();
1994 if (!path)
1995 return -ENOMEM;
1996
1997again:
1998 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1999 key.offset = (u64)-1;
2000 key.type = BTRFS_CHUNK_ITEM_KEY;
2001
2002 while (1) {
2003 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2004 if (ret < 0)
2005 goto error;
2006 BUG_ON(ret == 0);
2007
2008 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2009 key.type);
2010 if (ret < 0)
2011 goto error;
2012 if (ret > 0)
2013 break;
2014
2015 leaf = path->nodes[0];
2016 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2017
2018 chunk = btrfs_item_ptr(leaf, path->slots[0],
2019 struct btrfs_chunk);
2020 chunk_type = btrfs_chunk_type(leaf, chunk);
2021 btrfs_release_path(path);
2022
2023 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2024 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2025 found_key.objectid,
2026 found_key.offset);
2027 if (ret == -ENOSPC)
2028 failed++;
2029 else if (ret)
2030 BUG();
2031 }
2032
2033 if (found_key.offset == 0)
2034 break;
2035 key.offset = found_key.offset - 1;
2036 }
2037 ret = 0;
2038 if (failed && !retried) {
2039 failed = 0;
2040 retried = true;
2041 goto again;
2042 } else if (failed && retried) {
2043 WARN_ON(1);
2044 ret = -ENOSPC;
2045 }
2046error:
2047 btrfs_free_path(path);
2048 return ret;
2049}
2050
2051static u64 div_factor(u64 num, int factor)
2052{
2053 if (factor == 10)
2054 return num;
2055 num *= factor;
2056 do_div(num, 10);
2057 return num;
2058}
2059
2060int btrfs_balance(struct btrfs_root *dev_root)
2061{
2062 int ret;
2063 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
2064 struct btrfs_device *device;
2065 u64 old_size;
2066 u64 size_to_free;
2067 struct btrfs_path *path;
2068 struct btrfs_key key;
2069 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
2070 struct btrfs_trans_handle *trans;
2071 struct btrfs_key found_key;
2072
2073 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
2074 return -EROFS;
2075
2076 if (!capable(CAP_SYS_ADMIN))
2077 return -EPERM;
2078
2079 mutex_lock(&dev_root->fs_info->volume_mutex);
2080 dev_root = dev_root->fs_info->dev_root;
2081
2082 /* step one make some room on all the devices */
2083 list_for_each_entry(device, devices, dev_list) {
2084 old_size = device->total_bytes;
2085 size_to_free = div_factor(old_size, 1);
2086 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2087 if (!device->writeable ||
2088 device->total_bytes - device->bytes_used > size_to_free)
2089 continue;
2090
2091 ret = btrfs_shrink_device(device, old_size - size_to_free);
2092 if (ret == -ENOSPC)
2093 break;
2094 BUG_ON(ret);
2095
2096 trans = btrfs_start_transaction(dev_root, 0);
2097 BUG_ON(IS_ERR(trans));
2098
2099 ret = btrfs_grow_device(trans, device, old_size);
2100 BUG_ON(ret);
2101
2102 btrfs_end_transaction(trans, dev_root);
2103 }
2104
2105 /* step two, relocate all the chunks */
2106 path = btrfs_alloc_path();
2107 if (!path) {
2108 ret = -ENOMEM;
2109 goto error;
2110 }
2111 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2112 key.offset = (u64)-1;
2113 key.type = BTRFS_CHUNK_ITEM_KEY;
2114
2115 while (1) {
2116 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2117 if (ret < 0)
2118 goto error;
2119
2120 /*
2121 * this shouldn't happen, it means the last relocate
2122 * failed
2123 */
2124 if (ret == 0)
2125 break;
2126
2127 ret = btrfs_previous_item(chunk_root, path, 0,
2128 BTRFS_CHUNK_ITEM_KEY);
2129 if (ret)
2130 break;
2131
2132 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2133 path->slots[0]);
2134 if (found_key.objectid != key.objectid)
2135 break;
2136
2137 /* chunk zero is special */
2138 if (found_key.offset == 0)
2139 break;
2140
2141 btrfs_release_path(path);
2142 ret = btrfs_relocate_chunk(chunk_root,
2143 chunk_root->root_key.objectid,
2144 found_key.objectid,
2145 found_key.offset);
2146 if (ret && ret != -ENOSPC)
2147 goto error;
2148 key.offset = found_key.offset - 1;
2149 }
2150 ret = 0;
2151error:
2152 btrfs_free_path(path);
2153 mutex_unlock(&dev_root->fs_info->volume_mutex);
2154 return ret;
2155}
2156
2157/*
2158 * shrinking a device means finding all of the device extents past
2159 * the new size, and then following the back refs to the chunks.
2160 * The chunk relocation code actually frees the device extent
2161 */
2162int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2163{
2164 struct btrfs_trans_handle *trans;
2165 struct btrfs_root *root = device->dev_root;
2166 struct btrfs_dev_extent *dev_extent = NULL;
2167 struct btrfs_path *path;
2168 u64 length;
2169 u64 chunk_tree;
2170 u64 chunk_objectid;
2171 u64 chunk_offset;
2172 int ret;
2173 int slot;
2174 int failed = 0;
2175 bool retried = false;
2176 struct extent_buffer *l;
2177 struct btrfs_key key;
2178 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2179 u64 old_total = btrfs_super_total_bytes(super_copy);
2180 u64 old_size = device->total_bytes;
2181 u64 diff = device->total_bytes - new_size;
2182
2183 if (new_size >= device->total_bytes)
2184 return -EINVAL;
2185
2186 path = btrfs_alloc_path();
2187 if (!path)
2188 return -ENOMEM;
2189
2190 path->reada = 2;
2191
2192 lock_chunks(root);
2193
2194 device->total_bytes = new_size;
2195 if (device->writeable)
2196 device->fs_devices->total_rw_bytes -= diff;
2197 unlock_chunks(root);
2198
2199again:
2200 key.objectid = device->devid;
2201 key.offset = (u64)-1;
2202 key.type = BTRFS_DEV_EXTENT_KEY;
2203
2204 while (1) {
2205 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2206 if (ret < 0)
2207 goto done;
2208
2209 ret = btrfs_previous_item(root, path, 0, key.type);
2210 if (ret < 0)
2211 goto done;
2212 if (ret) {
2213 ret = 0;
2214 btrfs_release_path(path);
2215 break;
2216 }
2217
2218 l = path->nodes[0];
2219 slot = path->slots[0];
2220 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2221
2222 if (key.objectid != device->devid) {
2223 btrfs_release_path(path);
2224 break;
2225 }
2226
2227 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2228 length = btrfs_dev_extent_length(l, dev_extent);
2229
2230 if (key.offset + length <= new_size) {
2231 btrfs_release_path(path);
2232 break;
2233 }
2234
2235 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2236 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2237 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2238 btrfs_release_path(path);
2239
2240 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2241 chunk_offset);
2242 if (ret && ret != -ENOSPC)
2243 goto done;
2244 if (ret == -ENOSPC)
2245 failed++;
2246 key.offset -= 1;
2247 }
2248
2249 if (failed && !retried) {
2250 failed = 0;
2251 retried = true;
2252 goto again;
2253 } else if (failed && retried) {
2254 ret = -ENOSPC;
2255 lock_chunks(root);
2256
2257 device->total_bytes = old_size;
2258 if (device->writeable)
2259 device->fs_devices->total_rw_bytes += diff;
2260 unlock_chunks(root);
2261 goto done;
2262 }
2263
2264 /* Shrinking succeeded, else we would be at "done". */
2265 trans = btrfs_start_transaction(root, 0);
2266 if (IS_ERR(trans)) {
2267 ret = PTR_ERR(trans);
2268 goto done;
2269 }
2270
2271 lock_chunks(root);
2272
2273 device->disk_total_bytes = new_size;
2274 /* Now btrfs_update_device() will change the on-disk size. */
2275 ret = btrfs_update_device(trans, device);
2276 if (ret) {
2277 unlock_chunks(root);
2278 btrfs_end_transaction(trans, root);
2279 goto done;
2280 }
2281 WARN_ON(diff > old_total);
2282 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2283 unlock_chunks(root);
2284 btrfs_end_transaction(trans, root);
2285done:
2286 btrfs_free_path(path);
2287 return ret;
2288}
2289
2290static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2291 struct btrfs_root *root,
2292 struct btrfs_key *key,
2293 struct btrfs_chunk *chunk, int item_size)
2294{
2295 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2296 struct btrfs_disk_key disk_key;
2297 u32 array_size;
2298 u8 *ptr;
2299
2300 array_size = btrfs_super_sys_array_size(super_copy);
2301 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2302 return -EFBIG;
2303
2304 ptr = super_copy->sys_chunk_array + array_size;
2305 btrfs_cpu_key_to_disk(&disk_key, key);
2306 memcpy(ptr, &disk_key, sizeof(disk_key));
2307 ptr += sizeof(disk_key);
2308 memcpy(ptr, chunk, item_size);
2309 item_size += sizeof(disk_key);
2310 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2311 return 0;
2312}
2313
2314/*
2315 * sort the devices in descending order by max_avail, total_avail
2316 */
2317static int btrfs_cmp_device_info(const void *a, const void *b)
2318{
2319 const struct btrfs_device_info *di_a = a;
2320 const struct btrfs_device_info *di_b = b;
2321
2322 if (di_a->max_avail > di_b->max_avail)
2323 return -1;
2324 if (di_a->max_avail < di_b->max_avail)
2325 return 1;
2326 if (di_a->total_avail > di_b->total_avail)
2327 return -1;
2328 if (di_a->total_avail < di_b->total_avail)
2329 return 1;
2330 return 0;
2331}
2332
2333static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2334 struct btrfs_root *extent_root,
2335 struct map_lookup **map_ret,
2336 u64 *num_bytes_out, u64 *stripe_size_out,
2337 u64 start, u64 type)
2338{
2339 struct btrfs_fs_info *info = extent_root->fs_info;
2340 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2341 struct list_head *cur;
2342 struct map_lookup *map = NULL;
2343 struct extent_map_tree *em_tree;
2344 struct extent_map *em;
2345 struct btrfs_device_info *devices_info = NULL;
2346 u64 total_avail;
2347 int num_stripes; /* total number of stripes to allocate */
2348 int sub_stripes; /* sub_stripes info for map */
2349 int dev_stripes; /* stripes per dev */
2350 int devs_max; /* max devs to use */
2351 int devs_min; /* min devs needed */
2352 int devs_increment; /* ndevs has to be a multiple of this */
2353 int ncopies; /* how many copies to data has */
2354 int ret;
2355 u64 max_stripe_size;
2356 u64 max_chunk_size;
2357 u64 stripe_size;
2358 u64 num_bytes;
2359 int ndevs;
2360 int i;
2361 int j;
2362
2363 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2364 (type & BTRFS_BLOCK_GROUP_DUP)) {
2365 WARN_ON(1);
2366 type &= ~BTRFS_BLOCK_GROUP_DUP;
2367 }
2368
2369 if (list_empty(&fs_devices->alloc_list))
2370 return -ENOSPC;
2371
2372 sub_stripes = 1;
2373 dev_stripes = 1;
2374 devs_increment = 1;
2375 ncopies = 1;
2376 devs_max = 0; /* 0 == as many as possible */
2377 devs_min = 1;
2378
2379 /*
2380 * define the properties of each RAID type.
2381 * FIXME: move this to a global table and use it in all RAID
2382 * calculation code
2383 */
2384 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2385 dev_stripes = 2;
2386 ncopies = 2;
2387 devs_max = 1;
2388 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2389 devs_min = 2;
2390 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2391 devs_increment = 2;
2392 ncopies = 2;
2393 devs_max = 2;
2394 devs_min = 2;
2395 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2396 sub_stripes = 2;
2397 devs_increment = 2;
2398 ncopies = 2;
2399 devs_min = 4;
2400 } else {
2401 devs_max = 1;
2402 }
2403
2404 if (type & BTRFS_BLOCK_GROUP_DATA) {
2405 max_stripe_size = 1024 * 1024 * 1024;
2406 max_chunk_size = 10 * max_stripe_size;
2407 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2408 max_stripe_size = 256 * 1024 * 1024;
2409 max_chunk_size = max_stripe_size;
2410 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2411 max_stripe_size = 8 * 1024 * 1024;
2412 max_chunk_size = 2 * max_stripe_size;
2413 } else {
2414 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2415 type);
2416 BUG_ON(1);
2417 }
2418
2419 /* we don't want a chunk larger than 10% of writeable space */
2420 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2421 max_chunk_size);
2422
2423 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2424 GFP_NOFS);
2425 if (!devices_info)
2426 return -ENOMEM;
2427
2428 cur = fs_devices->alloc_list.next;
2429
2430 /*
2431 * in the first pass through the devices list, we gather information
2432 * about the available holes on each device.
2433 */
2434 ndevs = 0;
2435 while (cur != &fs_devices->alloc_list) {
2436 struct btrfs_device *device;
2437 u64 max_avail;
2438 u64 dev_offset;
2439
2440 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2441
2442 cur = cur->next;
2443
2444 if (!device->writeable) {
2445 printk(KERN_ERR
2446 "btrfs: read-only device in alloc_list\n");
2447 WARN_ON(1);
2448 continue;
2449 }
2450
2451 if (!device->in_fs_metadata)
2452 continue;
2453
2454 if (device->total_bytes > device->bytes_used)
2455 total_avail = device->total_bytes - device->bytes_used;
2456 else
2457 total_avail = 0;
2458
2459 /* If there is no space on this device, skip it. */
2460 if (total_avail == 0)
2461 continue;
2462
2463 ret = find_free_dev_extent(trans, device,
2464 max_stripe_size * dev_stripes,
2465 &dev_offset, &max_avail);
2466 if (ret && ret != -ENOSPC)
2467 goto error;
2468
2469 if (ret == 0)
2470 max_avail = max_stripe_size * dev_stripes;
2471
2472 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2473 continue;
2474
2475 devices_info[ndevs].dev_offset = dev_offset;
2476 devices_info[ndevs].max_avail = max_avail;
2477 devices_info[ndevs].total_avail = total_avail;
2478 devices_info[ndevs].dev = device;
2479 ++ndevs;
2480 }
2481
2482 /*
2483 * now sort the devices by hole size / available space
2484 */
2485 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2486 btrfs_cmp_device_info, NULL);
2487
2488 /* round down to number of usable stripes */
2489 ndevs -= ndevs % devs_increment;
2490
2491 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2492 ret = -ENOSPC;
2493 goto error;
2494 }
2495
2496 if (devs_max && ndevs > devs_max)
2497 ndevs = devs_max;
2498 /*
2499 * the primary goal is to maximize the number of stripes, so use as many
2500 * devices as possible, even if the stripes are not maximum sized.
2501 */
2502 stripe_size = devices_info[ndevs-1].max_avail;
2503 num_stripes = ndevs * dev_stripes;
2504
2505 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2506 stripe_size = max_chunk_size * ncopies;
2507 do_div(stripe_size, num_stripes);
2508 }
2509
2510 do_div(stripe_size, dev_stripes);
2511 do_div(stripe_size, BTRFS_STRIPE_LEN);
2512 stripe_size *= BTRFS_STRIPE_LEN;
2513
2514 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2515 if (!map) {
2516 ret = -ENOMEM;
2517 goto error;
2518 }
2519 map->num_stripes = num_stripes;
2520
2521 for (i = 0; i < ndevs; ++i) {
2522 for (j = 0; j < dev_stripes; ++j) {
2523 int s = i * dev_stripes + j;
2524 map->stripes[s].dev = devices_info[i].dev;
2525 map->stripes[s].physical = devices_info[i].dev_offset +
2526 j * stripe_size;
2527 }
2528 }
2529 map->sector_size = extent_root->sectorsize;
2530 map->stripe_len = BTRFS_STRIPE_LEN;
2531 map->io_align = BTRFS_STRIPE_LEN;
2532 map->io_width = BTRFS_STRIPE_LEN;
2533 map->type = type;
2534 map->sub_stripes = sub_stripes;
2535
2536 *map_ret = map;
2537 num_bytes = stripe_size * (num_stripes / ncopies);
2538
2539 *stripe_size_out = stripe_size;
2540 *num_bytes_out = num_bytes;
2541
2542 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2543
2544 em = alloc_extent_map();
2545 if (!em) {
2546 ret = -ENOMEM;
2547 goto error;
2548 }
2549 em->bdev = (struct block_device *)map;
2550 em->start = start;
2551 em->len = num_bytes;
2552 em->block_start = 0;
2553 em->block_len = em->len;
2554
2555 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2556 write_lock(&em_tree->lock);
2557 ret = add_extent_mapping(em_tree, em);
2558 write_unlock(&em_tree->lock);
2559 BUG_ON(ret);
2560 free_extent_map(em);
2561
2562 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2563 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2564 start, num_bytes);
2565 BUG_ON(ret);
2566
2567 for (i = 0; i < map->num_stripes; ++i) {
2568 struct btrfs_device *device;
2569 u64 dev_offset;
2570
2571 device = map->stripes[i].dev;
2572 dev_offset = map->stripes[i].physical;
2573
2574 ret = btrfs_alloc_dev_extent(trans, device,
2575 info->chunk_root->root_key.objectid,
2576 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2577 start, dev_offset, stripe_size);
2578 BUG_ON(ret);
2579 }
2580
2581 kfree(devices_info);
2582 return 0;
2583
2584error:
2585 kfree(map);
2586 kfree(devices_info);
2587 return ret;
2588}
2589
2590static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2591 struct btrfs_root *extent_root,
2592 struct map_lookup *map, u64 chunk_offset,
2593 u64 chunk_size, u64 stripe_size)
2594{
2595 u64 dev_offset;
2596 struct btrfs_key key;
2597 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2598 struct btrfs_device *device;
2599 struct btrfs_chunk *chunk;
2600 struct btrfs_stripe *stripe;
2601 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2602 int index = 0;
2603 int ret;
2604
2605 chunk = kzalloc(item_size, GFP_NOFS);
2606 if (!chunk)
2607 return -ENOMEM;
2608
2609 index = 0;
2610 while (index < map->num_stripes) {
2611 device = map->stripes[index].dev;
2612 device->bytes_used += stripe_size;
2613 ret = btrfs_update_device(trans, device);
2614 BUG_ON(ret);
2615 index++;
2616 }
2617
2618 index = 0;
2619 stripe = &chunk->stripe;
2620 while (index < map->num_stripes) {
2621 device = map->stripes[index].dev;
2622 dev_offset = map->stripes[index].physical;
2623
2624 btrfs_set_stack_stripe_devid(stripe, device->devid);
2625 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2626 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2627 stripe++;
2628 index++;
2629 }
2630
2631 btrfs_set_stack_chunk_length(chunk, chunk_size);
2632 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2633 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2634 btrfs_set_stack_chunk_type(chunk, map->type);
2635 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2636 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2637 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2638 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2639 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2640
2641 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2642 key.type = BTRFS_CHUNK_ITEM_KEY;
2643 key.offset = chunk_offset;
2644
2645 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2646 BUG_ON(ret);
2647
2648 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2649 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2650 item_size);
2651 BUG_ON(ret);
2652 }
2653
2654 kfree(chunk);
2655 return 0;
2656}
2657
2658/*
2659 * Chunk allocation falls into two parts. The first part does works
2660 * that make the new allocated chunk useable, but not do any operation
2661 * that modifies the chunk tree. The second part does the works that
2662 * require modifying the chunk tree. This division is important for the
2663 * bootstrap process of adding storage to a seed btrfs.
2664 */
2665int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2666 struct btrfs_root *extent_root, u64 type)
2667{
2668 u64 chunk_offset;
2669 u64 chunk_size;
2670 u64 stripe_size;
2671 struct map_lookup *map;
2672 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2673 int ret;
2674
2675 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2676 &chunk_offset);
2677 if (ret)
2678 return ret;
2679
2680 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2681 &stripe_size, chunk_offset, type);
2682 if (ret)
2683 return ret;
2684
2685 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2686 chunk_size, stripe_size);
2687 BUG_ON(ret);
2688 return 0;
2689}
2690
2691static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2692 struct btrfs_root *root,
2693 struct btrfs_device *device)
2694{
2695 u64 chunk_offset;
2696 u64 sys_chunk_offset;
2697 u64 chunk_size;
2698 u64 sys_chunk_size;
2699 u64 stripe_size;
2700 u64 sys_stripe_size;
2701 u64 alloc_profile;
2702 struct map_lookup *map;
2703 struct map_lookup *sys_map;
2704 struct btrfs_fs_info *fs_info = root->fs_info;
2705 struct btrfs_root *extent_root = fs_info->extent_root;
2706 int ret;
2707
2708 ret = find_next_chunk(fs_info->chunk_root,
2709 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2710 if (ret)
2711 return ret;
2712
2713 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2714 (fs_info->metadata_alloc_profile &
2715 fs_info->avail_metadata_alloc_bits);
2716 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2717
2718 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2719 &stripe_size, chunk_offset, alloc_profile);
2720 BUG_ON(ret);
2721
2722 sys_chunk_offset = chunk_offset + chunk_size;
2723
2724 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2725 (fs_info->system_alloc_profile &
2726 fs_info->avail_system_alloc_bits);
2727 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2728
2729 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2730 &sys_chunk_size, &sys_stripe_size,
2731 sys_chunk_offset, alloc_profile);
2732 BUG_ON(ret);
2733
2734 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2735 BUG_ON(ret);
2736
2737 /*
2738 * Modifying chunk tree needs allocating new blocks from both
2739 * system block group and metadata block group. So we only can
2740 * do operations require modifying the chunk tree after both
2741 * block groups were created.
2742 */
2743 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2744 chunk_size, stripe_size);
2745 BUG_ON(ret);
2746
2747 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2748 sys_chunk_offset, sys_chunk_size,
2749 sys_stripe_size);
2750 BUG_ON(ret);
2751 return 0;
2752}
2753
2754int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2755{
2756 struct extent_map *em;
2757 struct map_lookup *map;
2758 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2759 int readonly = 0;
2760 int i;
2761
2762 read_lock(&map_tree->map_tree.lock);
2763 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2764 read_unlock(&map_tree->map_tree.lock);
2765 if (!em)
2766 return 1;
2767
2768 if (btrfs_test_opt(root, DEGRADED)) {
2769 free_extent_map(em);
2770 return 0;
2771 }
2772
2773 map = (struct map_lookup *)em->bdev;
2774 for (i = 0; i < map->num_stripes; i++) {
2775 if (!map->stripes[i].dev->writeable) {
2776 readonly = 1;
2777 break;
2778 }
2779 }
2780 free_extent_map(em);
2781 return readonly;
2782}
2783
2784void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2785{
2786 extent_map_tree_init(&tree->map_tree);
2787}
2788
2789void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2790{
2791 struct extent_map *em;
2792
2793 while (1) {
2794 write_lock(&tree->map_tree.lock);
2795 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2796 if (em)
2797 remove_extent_mapping(&tree->map_tree, em);
2798 write_unlock(&tree->map_tree.lock);
2799 if (!em)
2800 break;
2801 kfree(em->bdev);
2802 /* once for us */
2803 free_extent_map(em);
2804 /* once for the tree */
2805 free_extent_map(em);
2806 }
2807}
2808
2809int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2810{
2811 struct extent_map *em;
2812 struct map_lookup *map;
2813 struct extent_map_tree *em_tree = &map_tree->map_tree;
2814 int ret;
2815
2816 read_lock(&em_tree->lock);
2817 em = lookup_extent_mapping(em_tree, logical, len);
2818 read_unlock(&em_tree->lock);
2819 BUG_ON(!em);
2820
2821 BUG_ON(em->start > logical || em->start + em->len < logical);
2822 map = (struct map_lookup *)em->bdev;
2823 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2824 ret = map->num_stripes;
2825 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2826 ret = map->sub_stripes;
2827 else
2828 ret = 1;
2829 free_extent_map(em);
2830 return ret;
2831}
2832
2833static int find_live_mirror(struct map_lookup *map, int first, int num,
2834 int optimal)
2835{
2836 int i;
2837 if (map->stripes[optimal].dev->bdev)
2838 return optimal;
2839 for (i = first; i < first + num; i++) {
2840 if (map->stripes[i].dev->bdev)
2841 return i;
2842 }
2843 /* we couldn't find one that doesn't fail. Just return something
2844 * and the io error handling code will clean up eventually
2845 */
2846 return optimal;
2847}
2848
2849static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2850 u64 logical, u64 *length,
2851 struct btrfs_multi_bio **multi_ret,
2852 int mirror_num)
2853{
2854 struct extent_map *em;
2855 struct map_lookup *map;
2856 struct extent_map_tree *em_tree = &map_tree->map_tree;
2857 u64 offset;
2858 u64 stripe_offset;
2859 u64 stripe_end_offset;
2860 u64 stripe_nr;
2861 u64 stripe_nr_orig;
2862 u64 stripe_nr_end;
2863 int stripes_allocated = 8;
2864 int stripes_required = 1;
2865 int stripe_index;
2866 int i;
2867 int num_stripes;
2868 int max_errors = 0;
2869 struct btrfs_multi_bio *multi = NULL;
2870
2871 if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
2872 stripes_allocated = 1;
2873again:
2874 if (multi_ret) {
2875 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2876 GFP_NOFS);
2877 if (!multi)
2878 return -ENOMEM;
2879
2880 atomic_set(&multi->error, 0);
2881 }
2882
2883 read_lock(&em_tree->lock);
2884 em = lookup_extent_mapping(em_tree, logical, *length);
2885 read_unlock(&em_tree->lock);
2886
2887 if (!em) {
2888 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2889 (unsigned long long)logical,
2890 (unsigned long long)*length);
2891 BUG();
2892 }
2893
2894 BUG_ON(em->start > logical || em->start + em->len < logical);
2895 map = (struct map_lookup *)em->bdev;
2896 offset = logical - em->start;
2897
2898 if (mirror_num > map->num_stripes)
2899 mirror_num = 0;
2900
2901 /* if our multi bio struct is too small, back off and try again */
2902 if (rw & REQ_WRITE) {
2903 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2904 BTRFS_BLOCK_GROUP_DUP)) {
2905 stripes_required = map->num_stripes;
2906 max_errors = 1;
2907 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2908 stripes_required = map->sub_stripes;
2909 max_errors = 1;
2910 }
2911 }
2912 if (rw & REQ_DISCARD) {
2913 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2914 BTRFS_BLOCK_GROUP_RAID1 |
2915 BTRFS_BLOCK_GROUP_DUP |
2916 BTRFS_BLOCK_GROUP_RAID10)) {
2917 stripes_required = map->num_stripes;
2918 }
2919 }
2920 if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
2921 stripes_allocated < stripes_required) {
2922 stripes_allocated = map->num_stripes;
2923 free_extent_map(em);
2924 kfree(multi);
2925 goto again;
2926 }
2927 stripe_nr = offset;
2928 /*
2929 * stripe_nr counts the total number of stripes we have to stride
2930 * to get to this block
2931 */
2932 do_div(stripe_nr, map->stripe_len);
2933
2934 stripe_offset = stripe_nr * map->stripe_len;
2935 BUG_ON(offset < stripe_offset);
2936
2937 /* stripe_offset is the offset of this block in its stripe*/
2938 stripe_offset = offset - stripe_offset;
2939
2940 if (rw & REQ_DISCARD)
2941 *length = min_t(u64, em->len - offset, *length);
2942 else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2943 BTRFS_BLOCK_GROUP_RAID1 |
2944 BTRFS_BLOCK_GROUP_RAID10 |
2945 BTRFS_BLOCK_GROUP_DUP)) {
2946 /* we limit the length of each bio to what fits in a stripe */
2947 *length = min_t(u64, em->len - offset,
2948 map->stripe_len - stripe_offset);
2949 } else {
2950 *length = em->len - offset;
2951 }
2952
2953 if (!multi_ret)
2954 goto out;
2955
2956 num_stripes = 1;
2957 stripe_index = 0;
2958 stripe_nr_orig = stripe_nr;
2959 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
2960 (~(map->stripe_len - 1));
2961 do_div(stripe_nr_end, map->stripe_len);
2962 stripe_end_offset = stripe_nr_end * map->stripe_len -
2963 (offset + *length);
2964 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2965 if (rw & REQ_DISCARD)
2966 num_stripes = min_t(u64, map->num_stripes,
2967 stripe_nr_end - stripe_nr_orig);
2968 stripe_index = do_div(stripe_nr, map->num_stripes);
2969 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2970 if (rw & (REQ_WRITE | REQ_DISCARD))
2971 num_stripes = map->num_stripes;
2972 else if (mirror_num)
2973 stripe_index = mirror_num - 1;
2974 else {
2975 stripe_index = find_live_mirror(map, 0,
2976 map->num_stripes,
2977 current->pid % map->num_stripes);
2978 }
2979
2980 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2981 if (rw & (REQ_WRITE | REQ_DISCARD))
2982 num_stripes = map->num_stripes;
2983 else if (mirror_num)
2984 stripe_index = mirror_num - 1;
2985
2986 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2987 int factor = map->num_stripes / map->sub_stripes;
2988
2989 stripe_index = do_div(stripe_nr, factor);
2990 stripe_index *= map->sub_stripes;
2991
2992 if (rw & REQ_WRITE)
2993 num_stripes = map->sub_stripes;
2994 else if (rw & REQ_DISCARD)
2995 num_stripes = min_t(u64, map->sub_stripes *
2996 (stripe_nr_end - stripe_nr_orig),
2997 map->num_stripes);
2998 else if (mirror_num)
2999 stripe_index += mirror_num - 1;
3000 else {
3001 stripe_index = find_live_mirror(map, stripe_index,
3002 map->sub_stripes, stripe_index +
3003 current->pid % map->sub_stripes);
3004 }
3005 } else {
3006 /*
3007 * after this do_div call, stripe_nr is the number of stripes
3008 * on this device we have to walk to find the data, and
3009 * stripe_index is the number of our device in the stripe array
3010 */
3011 stripe_index = do_div(stripe_nr, map->num_stripes);
3012 }
3013 BUG_ON(stripe_index >= map->num_stripes);
3014
3015 if (rw & REQ_DISCARD) {
3016 for (i = 0; i < num_stripes; i++) {
3017 multi->stripes[i].physical =
3018 map->stripes[stripe_index].physical +
3019 stripe_offset + stripe_nr * map->stripe_len;
3020 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3021
3022 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3023 u64 stripes;
3024 u32 last_stripe = 0;
3025 int j;
3026
3027 div_u64_rem(stripe_nr_end - 1,
3028 map->num_stripes,
3029 &last_stripe);
3030
3031 for (j = 0; j < map->num_stripes; j++) {
3032 u32 test;
3033
3034 div_u64_rem(stripe_nr_end - 1 - j,
3035 map->num_stripes, &test);
3036 if (test == stripe_index)
3037 break;
3038 }
3039 stripes = stripe_nr_end - 1 - j;
3040 do_div(stripes, map->num_stripes);
3041 multi->stripes[i].length = map->stripe_len *
3042 (stripes - stripe_nr + 1);
3043
3044 if (i == 0) {
3045 multi->stripes[i].length -=
3046 stripe_offset;
3047 stripe_offset = 0;
3048 }
3049 if (stripe_index == last_stripe)
3050 multi->stripes[i].length -=
3051 stripe_end_offset;
3052 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3053 u64 stripes;
3054 int j;
3055 int factor = map->num_stripes /
3056 map->sub_stripes;
3057 u32 last_stripe = 0;
3058
3059 div_u64_rem(stripe_nr_end - 1,
3060 factor, &last_stripe);
3061 last_stripe *= map->sub_stripes;
3062
3063 for (j = 0; j < factor; j++) {
3064 u32 test;
3065
3066 div_u64_rem(stripe_nr_end - 1 - j,
3067 factor, &test);
3068
3069 if (test ==
3070 stripe_index / map->sub_stripes)
3071 break;
3072 }
3073 stripes = stripe_nr_end - 1 - j;
3074 do_div(stripes, factor);
3075 multi->stripes[i].length = map->stripe_len *
3076 (stripes - stripe_nr + 1);
3077
3078 if (i < map->sub_stripes) {
3079 multi->stripes[i].length -=
3080 stripe_offset;
3081 if (i == map->sub_stripes - 1)
3082 stripe_offset = 0;
3083 }
3084 if (stripe_index >= last_stripe &&
3085 stripe_index <= (last_stripe +
3086 map->sub_stripes - 1)) {
3087 multi->stripes[i].length -=
3088 stripe_end_offset;
3089 }
3090 } else
3091 multi->stripes[i].length = *length;
3092
3093 stripe_index++;
3094 if (stripe_index == map->num_stripes) {
3095 /* This could only happen for RAID0/10 */
3096 stripe_index = 0;
3097 stripe_nr++;
3098 }
3099 }
3100 } else {
3101 for (i = 0; i < num_stripes; i++) {
3102 multi->stripes[i].physical =
3103 map->stripes[stripe_index].physical +
3104 stripe_offset +
3105 stripe_nr * map->stripe_len;
3106 multi->stripes[i].dev =
3107 map->stripes[stripe_index].dev;
3108 stripe_index++;
3109 }
3110 }
3111 if (multi_ret) {
3112 *multi_ret = multi;
3113 multi->num_stripes = num_stripes;
3114 multi->max_errors = max_errors;
3115 }
3116out:
3117 free_extent_map(em);
3118 return 0;
3119}
3120
3121int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3122 u64 logical, u64 *length,
3123 struct btrfs_multi_bio **multi_ret, int mirror_num)
3124{
3125 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3126 mirror_num);
3127}
3128
3129int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3130 u64 chunk_start, u64 physical, u64 devid,
3131 u64 **logical, int *naddrs, int *stripe_len)
3132{
3133 struct extent_map_tree *em_tree = &map_tree->map_tree;
3134 struct extent_map *em;
3135 struct map_lookup *map;
3136 u64 *buf;
3137 u64 bytenr;
3138 u64 length;
3139 u64 stripe_nr;
3140 int i, j, nr = 0;
3141
3142 read_lock(&em_tree->lock);
3143 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3144 read_unlock(&em_tree->lock);
3145
3146 BUG_ON(!em || em->start != chunk_start);
3147 map = (struct map_lookup *)em->bdev;
3148
3149 length = em->len;
3150 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3151 do_div(length, map->num_stripes / map->sub_stripes);
3152 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3153 do_div(length, map->num_stripes);
3154
3155 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3156 BUG_ON(!buf);
3157
3158 for (i = 0; i < map->num_stripes; i++) {
3159 if (devid && map->stripes[i].dev->devid != devid)
3160 continue;
3161 if (map->stripes[i].physical > physical ||
3162 map->stripes[i].physical + length <= physical)
3163 continue;
3164
3165 stripe_nr = physical - map->stripes[i].physical;
3166 do_div(stripe_nr, map->stripe_len);
3167
3168 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3169 stripe_nr = stripe_nr * map->num_stripes + i;
3170 do_div(stripe_nr, map->sub_stripes);
3171 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3172 stripe_nr = stripe_nr * map->num_stripes + i;
3173 }
3174 bytenr = chunk_start + stripe_nr * map->stripe_len;
3175 WARN_ON(nr >= map->num_stripes);
3176 for (j = 0; j < nr; j++) {
3177 if (buf[j] == bytenr)
3178 break;
3179 }
3180 if (j == nr) {
3181 WARN_ON(nr >= map->num_stripes);
3182 buf[nr++] = bytenr;
3183 }
3184 }
3185
3186 *logical = buf;
3187 *naddrs = nr;
3188 *stripe_len = map->stripe_len;
3189
3190 free_extent_map(em);
3191 return 0;
3192}
3193
3194static void end_bio_multi_stripe(struct bio *bio, int err)
3195{
3196 struct btrfs_multi_bio *multi = bio->bi_private;
3197 int is_orig_bio = 0;
3198
3199 if (err)
3200 atomic_inc(&multi->error);
3201
3202 if (bio == multi->orig_bio)
3203 is_orig_bio = 1;
3204
3205 if (atomic_dec_and_test(&multi->stripes_pending)) {
3206 if (!is_orig_bio) {
3207 bio_put(bio);
3208 bio = multi->orig_bio;
3209 }
3210 bio->bi_private = multi->private;
3211 bio->bi_end_io = multi->end_io;
3212 /* only send an error to the higher layers if it is
3213 * beyond the tolerance of the multi-bio
3214 */
3215 if (atomic_read(&multi->error) > multi->max_errors) {
3216 err = -EIO;
3217 } else if (err) {
3218 /*
3219 * this bio is actually up to date, we didn't
3220 * go over the max number of errors
3221 */
3222 set_bit(BIO_UPTODATE, &bio->bi_flags);
3223 err = 0;
3224 }
3225 kfree(multi);
3226
3227 bio_endio(bio, err);
3228 } else if (!is_orig_bio) {
3229 bio_put(bio);
3230 }
3231}
3232
3233struct async_sched {
3234 struct bio *bio;
3235 int rw;
3236 struct btrfs_fs_info *info;
3237 struct btrfs_work work;
3238};
3239
3240/*
3241 * see run_scheduled_bios for a description of why bios are collected for
3242 * async submit.
3243 *
3244 * This will add one bio to the pending list for a device and make sure
3245 * the work struct is scheduled.
3246 */
3247static noinline int schedule_bio(struct btrfs_root *root,
3248 struct btrfs_device *device,
3249 int rw, struct bio *bio)
3250{
3251 int should_queue = 1;
3252 struct btrfs_pending_bios *pending_bios;
3253
3254 /* don't bother with additional async steps for reads, right now */
3255 if (!(rw & REQ_WRITE)) {
3256 bio_get(bio);
3257 submit_bio(rw, bio);
3258 bio_put(bio);
3259 return 0;
3260 }
3261
3262 /*
3263 * nr_async_bios allows us to reliably return congestion to the
3264 * higher layers. Otherwise, the async bio makes it appear we have
3265 * made progress against dirty pages when we've really just put it
3266 * on a queue for later
3267 */
3268 atomic_inc(&root->fs_info->nr_async_bios);
3269 WARN_ON(bio->bi_next);
3270 bio->bi_next = NULL;
3271 bio->bi_rw |= rw;
3272
3273 spin_lock(&device->io_lock);
3274 if (bio->bi_rw & REQ_SYNC)
3275 pending_bios = &device->pending_sync_bios;
3276 else
3277 pending_bios = &device->pending_bios;
3278
3279 if (pending_bios->tail)
3280 pending_bios->tail->bi_next = bio;
3281
3282 pending_bios->tail = bio;
3283 if (!pending_bios->head)
3284 pending_bios->head = bio;
3285 if (device->running_pending)
3286 should_queue = 0;
3287
3288 spin_unlock(&device->io_lock);
3289
3290 if (should_queue)
3291 btrfs_queue_worker(&root->fs_info->submit_workers,
3292 &device->work);
3293 return 0;
3294}
3295
3296int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3297 int mirror_num, int async_submit)
3298{
3299 struct btrfs_mapping_tree *map_tree;
3300 struct btrfs_device *dev;
3301 struct bio *first_bio = bio;
3302 u64 logical = (u64)bio->bi_sector << 9;
3303 u64 length = 0;
3304 u64 map_length;
3305 struct btrfs_multi_bio *multi = NULL;
3306 int ret;
3307 int dev_nr = 0;
3308 int total_devs = 1;
3309
3310 length = bio->bi_size;
3311 map_tree = &root->fs_info->mapping_tree;
3312 map_length = length;
3313
3314 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3315 mirror_num);
3316 BUG_ON(ret);
3317
3318 total_devs = multi->num_stripes;
3319 if (map_length < length) {
3320 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3321 "len %llu\n", (unsigned long long)logical,
3322 (unsigned long long)length,
3323 (unsigned long long)map_length);
3324 BUG();
3325 }
3326 multi->end_io = first_bio->bi_end_io;
3327 multi->private = first_bio->bi_private;
3328 multi->orig_bio = first_bio;
3329 atomic_set(&multi->stripes_pending, multi->num_stripes);
3330
3331 while (dev_nr < total_devs) {
3332 if (total_devs > 1) {
3333 if (dev_nr < total_devs - 1) {
3334 bio = bio_clone(first_bio, GFP_NOFS);
3335 BUG_ON(!bio);
3336 } else {
3337 bio = first_bio;
3338 }
3339 bio->bi_private = multi;
3340 bio->bi_end_io = end_bio_multi_stripe;
3341 }
3342 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3343 dev = multi->stripes[dev_nr].dev;
3344 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3345 bio->bi_bdev = dev->bdev;
3346 if (async_submit)
3347 schedule_bio(root, dev, rw, bio);
3348 else
3349 submit_bio(rw, bio);
3350 } else {
3351 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3352 bio->bi_sector = logical >> 9;
3353 bio_endio(bio, -EIO);
3354 }
3355 dev_nr++;
3356 }
3357 if (total_devs == 1)
3358 kfree(multi);
3359 return 0;
3360}
3361
3362struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3363 u8 *uuid, u8 *fsid)
3364{
3365 struct btrfs_device *device;
3366 struct btrfs_fs_devices *cur_devices;
3367
3368 cur_devices = root->fs_info->fs_devices;
3369 while (cur_devices) {
3370 if (!fsid ||
3371 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3372 device = __find_device(&cur_devices->devices,
3373 devid, uuid);
3374 if (device)
3375 return device;
3376 }
3377 cur_devices = cur_devices->seed;
3378 }
3379 return NULL;
3380}
3381
3382static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3383 u64 devid, u8 *dev_uuid)
3384{
3385 struct btrfs_device *device;
3386 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3387
3388 device = kzalloc(sizeof(*device), GFP_NOFS);
3389 if (!device)
3390 return NULL;
3391 list_add(&device->dev_list,
3392 &fs_devices->devices);
3393 device->dev_root = root->fs_info->dev_root;
3394 device->devid = devid;
3395 device->work.func = pending_bios_fn;
3396 device->fs_devices = fs_devices;
3397 device->missing = 1;
3398 fs_devices->num_devices++;
3399 fs_devices->missing_devices++;
3400 spin_lock_init(&device->io_lock);
3401 INIT_LIST_HEAD(&device->dev_alloc_list);
3402 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3403 return device;
3404}
3405
3406static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3407 struct extent_buffer *leaf,
3408 struct btrfs_chunk *chunk)
3409{
3410 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3411 struct map_lookup *map;
3412 struct extent_map *em;
3413 u64 logical;
3414 u64 length;
3415 u64 devid;
3416 u8 uuid[BTRFS_UUID_SIZE];
3417 int num_stripes;
3418 int ret;
3419 int i;
3420
3421 logical = key->offset;
3422 length = btrfs_chunk_length(leaf, chunk);
3423
3424 read_lock(&map_tree->map_tree.lock);
3425 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3426 read_unlock(&map_tree->map_tree.lock);
3427
3428 /* already mapped? */
3429 if (em && em->start <= logical && em->start + em->len > logical) {
3430 free_extent_map(em);
3431 return 0;
3432 } else if (em) {
3433 free_extent_map(em);
3434 }
3435
3436 em = alloc_extent_map();
3437 if (!em)
3438 return -ENOMEM;
3439 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3440 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3441 if (!map) {
3442 free_extent_map(em);
3443 return -ENOMEM;
3444 }
3445
3446 em->bdev = (struct block_device *)map;
3447 em->start = logical;
3448 em->len = length;
3449 em->block_start = 0;
3450 em->block_len = em->len;
3451
3452 map->num_stripes = num_stripes;
3453 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3454 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3455 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3456 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3457 map->type = btrfs_chunk_type(leaf, chunk);
3458 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3459 for (i = 0; i < num_stripes; i++) {
3460 map->stripes[i].physical =
3461 btrfs_stripe_offset_nr(leaf, chunk, i);
3462 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3463 read_extent_buffer(leaf, uuid, (unsigned long)
3464 btrfs_stripe_dev_uuid_nr(chunk, i),
3465 BTRFS_UUID_SIZE);
3466 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3467 NULL);
3468 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3469 kfree(map);
3470 free_extent_map(em);
3471 return -EIO;
3472 }
3473 if (!map->stripes[i].dev) {
3474 map->stripes[i].dev =
3475 add_missing_dev(root, devid, uuid);
3476 if (!map->stripes[i].dev) {
3477 kfree(map);
3478 free_extent_map(em);
3479 return -EIO;
3480 }
3481 }
3482 map->stripes[i].dev->in_fs_metadata = 1;
3483 }
3484
3485 write_lock(&map_tree->map_tree.lock);
3486 ret = add_extent_mapping(&map_tree->map_tree, em);
3487 write_unlock(&map_tree->map_tree.lock);
3488 BUG_ON(ret);
3489 free_extent_map(em);
3490
3491 return 0;
3492}
3493
3494static int fill_device_from_item(struct extent_buffer *leaf,
3495 struct btrfs_dev_item *dev_item,
3496 struct btrfs_device *device)
3497{
3498 unsigned long ptr;
3499
3500 device->devid = btrfs_device_id(leaf, dev_item);
3501 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3502 device->total_bytes = device->disk_total_bytes;
3503 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3504 device->type = btrfs_device_type(leaf, dev_item);
3505 device->io_align = btrfs_device_io_align(leaf, dev_item);
3506 device->io_width = btrfs_device_io_width(leaf, dev_item);
3507 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3508
3509 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3510 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3511
3512 return 0;
3513}
3514
3515static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3516{
3517 struct btrfs_fs_devices *fs_devices;
3518 int ret;
3519
3520 mutex_lock(&uuid_mutex);
3521
3522 fs_devices = root->fs_info->fs_devices->seed;
3523 while (fs_devices) {
3524 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3525 ret = 0;
3526 goto out;
3527 }
3528 fs_devices = fs_devices->seed;
3529 }
3530
3531 fs_devices = find_fsid(fsid);
3532 if (!fs_devices) {
3533 ret = -ENOENT;
3534 goto out;
3535 }
3536
3537 fs_devices = clone_fs_devices(fs_devices);
3538 if (IS_ERR(fs_devices)) {
3539 ret = PTR_ERR(fs_devices);
3540 goto out;
3541 }
3542
3543 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3544 root->fs_info->bdev_holder);
3545 if (ret)
3546 goto out;
3547
3548 if (!fs_devices->seeding) {
3549 __btrfs_close_devices(fs_devices);
3550 free_fs_devices(fs_devices);
3551 ret = -EINVAL;
3552 goto out;
3553 }
3554
3555 fs_devices->seed = root->fs_info->fs_devices->seed;
3556 root->fs_info->fs_devices->seed = fs_devices;
3557out:
3558 mutex_unlock(&uuid_mutex);
3559 return ret;
3560}
3561
3562static int read_one_dev(struct btrfs_root *root,
3563 struct extent_buffer *leaf,
3564 struct btrfs_dev_item *dev_item)
3565{
3566 struct btrfs_device *device;
3567 u64 devid;
3568 int ret;
3569 u8 fs_uuid[BTRFS_UUID_SIZE];
3570 u8 dev_uuid[BTRFS_UUID_SIZE];
3571
3572 devid = btrfs_device_id(leaf, dev_item);
3573 read_extent_buffer(leaf, dev_uuid,
3574 (unsigned long)btrfs_device_uuid(dev_item),
3575 BTRFS_UUID_SIZE);
3576 read_extent_buffer(leaf, fs_uuid,
3577 (unsigned long)btrfs_device_fsid(dev_item),
3578 BTRFS_UUID_SIZE);
3579
3580 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3581 ret = open_seed_devices(root, fs_uuid);
3582 if (ret && !btrfs_test_opt(root, DEGRADED))
3583 return ret;
3584 }
3585
3586 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3587 if (!device || !device->bdev) {
3588 if (!btrfs_test_opt(root, DEGRADED))
3589 return -EIO;
3590
3591 if (!device) {
3592 printk(KERN_WARNING "warning devid %llu missing\n",
3593 (unsigned long long)devid);
3594 device = add_missing_dev(root, devid, dev_uuid);
3595 if (!device)
3596 return -ENOMEM;
3597 } else if (!device->missing) {
3598 /*
3599 * this happens when a device that was properly setup
3600 * in the device info lists suddenly goes bad.
3601 * device->bdev is NULL, and so we have to set
3602 * device->missing to one here
3603 */
3604 root->fs_info->fs_devices->missing_devices++;
3605 device->missing = 1;
3606 }
3607 }
3608
3609 if (device->fs_devices != root->fs_info->fs_devices) {
3610 BUG_ON(device->writeable);
3611 if (device->generation !=
3612 btrfs_device_generation(leaf, dev_item))
3613 return -EINVAL;
3614 }
3615
3616 fill_device_from_item(leaf, dev_item, device);
3617 device->dev_root = root->fs_info->dev_root;
3618 device->in_fs_metadata = 1;
3619 if (device->writeable)
3620 device->fs_devices->total_rw_bytes += device->total_bytes;
3621 ret = 0;
3622 return ret;
3623}
3624
3625int btrfs_read_sys_array(struct btrfs_root *root)
3626{
3627 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3628 struct extent_buffer *sb;
3629 struct btrfs_disk_key *disk_key;
3630 struct btrfs_chunk *chunk;
3631 u8 *ptr;
3632 unsigned long sb_ptr;
3633 int ret = 0;
3634 u32 num_stripes;
3635 u32 array_size;
3636 u32 len = 0;
3637 u32 cur;
3638 struct btrfs_key key;
3639
3640 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3641 BTRFS_SUPER_INFO_SIZE);
3642 if (!sb)
3643 return -ENOMEM;
3644 btrfs_set_buffer_uptodate(sb);
3645 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
3646
3647 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3648 array_size = btrfs_super_sys_array_size(super_copy);
3649
3650 ptr = super_copy->sys_chunk_array;
3651 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3652 cur = 0;
3653
3654 while (cur < array_size) {
3655 disk_key = (struct btrfs_disk_key *)ptr;
3656 btrfs_disk_key_to_cpu(&key, disk_key);
3657
3658 len = sizeof(*disk_key); ptr += len;
3659 sb_ptr += len;
3660 cur += len;
3661
3662 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3663 chunk = (struct btrfs_chunk *)sb_ptr;
3664 ret = read_one_chunk(root, &key, sb, chunk);
3665 if (ret)
3666 break;
3667 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3668 len = btrfs_chunk_item_size(num_stripes);
3669 } else {
3670 ret = -EIO;
3671 break;
3672 }
3673 ptr += len;
3674 sb_ptr += len;
3675 cur += len;
3676 }
3677 free_extent_buffer(sb);
3678 return ret;
3679}
3680
3681int btrfs_read_chunk_tree(struct btrfs_root *root)
3682{
3683 struct btrfs_path *path;
3684 struct extent_buffer *leaf;
3685 struct btrfs_key key;
3686 struct btrfs_key found_key;
3687 int ret;
3688 int slot;
3689
3690 root = root->fs_info->chunk_root;
3691
3692 path = btrfs_alloc_path();
3693 if (!path)
3694 return -ENOMEM;
3695
3696 /* first we search for all of the device items, and then we
3697 * read in all of the chunk items. This way we can create chunk
3698 * mappings that reference all of the devices that are afound
3699 */
3700 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3701 key.offset = 0;
3702 key.type = 0;
3703again:
3704 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3705 if (ret < 0)
3706 goto error;
3707 while (1) {
3708 leaf = path->nodes[0];
3709 slot = path->slots[0];
3710 if (slot >= btrfs_header_nritems(leaf)) {
3711 ret = btrfs_next_leaf(root, path);
3712 if (ret == 0)
3713 continue;
3714 if (ret < 0)
3715 goto error;
3716 break;
3717 }
3718 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3719 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3720 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3721 break;
3722 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3723 struct btrfs_dev_item *dev_item;
3724 dev_item = btrfs_item_ptr(leaf, slot,
3725 struct btrfs_dev_item);
3726 ret = read_one_dev(root, leaf, dev_item);
3727 if (ret)
3728 goto error;
3729 }
3730 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3731 struct btrfs_chunk *chunk;
3732 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3733 ret = read_one_chunk(root, &found_key, leaf, chunk);
3734 if (ret)
3735 goto error;
3736 }
3737 path->slots[0]++;
3738 }
3739 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3740 key.objectid = 0;
3741 btrfs_release_path(path);
3742 goto again;
3743 }
3744 ret = 0;
3745error:
3746 btrfs_free_path(path);
3747 return ret;
3748}