Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcachefs setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcachefs.h"
11#include "alloc_background.h"
12#include "alloc_foreground.h"
13#include "bkey_sort.h"
14#include "btree_cache.h"
15#include "btree_gc.h"
16#include "btree_journal_iter.h"
17#include "btree_key_cache.h"
18#include "btree_update_interior.h"
19#include "btree_io.h"
20#include "btree_write_buffer.h"
21#include "buckets_waiting_for_journal.h"
22#include "chardev.h"
23#include "checksum.h"
24#include "clock.h"
25#include "compress.h"
26#include "debug.h"
27#include "disk_groups.h"
28#include "ec.h"
29#include "errcode.h"
30#include "error.h"
31#include "fs.h"
32#include "fs-io.h"
33#include "fs-io-buffered.h"
34#include "fs-io-direct.h"
35#include "fsck.h"
36#include "inode.h"
37#include "io_read.h"
38#include "io_write.h"
39#include "journal.h"
40#include "journal_reclaim.h"
41#include "journal_seq_blacklist.h"
42#include "move.h"
43#include "migrate.h"
44#include "movinggc.h"
45#include "nocow_locking.h"
46#include "quota.h"
47#include "rebalance.h"
48#include "recovery.h"
49#include "replicas.h"
50#include "sb-clean.h"
51#include "sb-counters.h"
52#include "sb-errors.h"
53#include "sb-members.h"
54#include "snapshot.h"
55#include "subvolume.h"
56#include "super.h"
57#include "super-io.h"
58#include "sysfs.h"
59#include "trace.h"
60
61#include <linux/backing-dev.h>
62#include <linux/blkdev.h>
63#include <linux/debugfs.h>
64#include <linux/device.h>
65#include <linux/idr.h>
66#include <linux/module.h>
67#include <linux/percpu.h>
68#include <linux/random.h>
69#include <linux/sysfs.h>
70#include <crypto/hash.h>
71
72MODULE_LICENSE("GPL");
73MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
74MODULE_DESCRIPTION("bcachefs filesystem");
75MODULE_SOFTDEP("pre: crc32c");
76MODULE_SOFTDEP("pre: crc64");
77MODULE_SOFTDEP("pre: sha256");
78MODULE_SOFTDEP("pre: chacha20");
79MODULE_SOFTDEP("pre: poly1305");
80MODULE_SOFTDEP("pre: xxhash");
81
82const char * const bch2_fs_flag_strs[] = {
83#define x(n) #n,
84 BCH_FS_FLAGS()
85#undef x
86 NULL
87};
88
89void __bch2_print(struct bch_fs *c, const char *fmt, ...)
90{
91 struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
92
93 va_list args;
94 va_start(args, fmt);
95 if (likely(!stdio)) {
96 vprintk(fmt, args);
97 } else {
98 unsigned long flags;
99
100 if (fmt[0] == KERN_SOH[0])
101 fmt += 2;
102
103 spin_lock_irqsave(&stdio->output_lock, flags);
104 prt_vprintf(&stdio->output_buf, fmt, args);
105 spin_unlock_irqrestore(&stdio->output_lock, flags);
106
107 wake_up(&stdio->output_wait);
108 }
109 va_end(args);
110}
111
112#define KTYPE(type) \
113static const struct attribute_group type ## _group = { \
114 .attrs = type ## _files \
115}; \
116 \
117static const struct attribute_group *type ## _groups[] = { \
118 &type ## _group, \
119 NULL \
120}; \
121 \
122static const struct kobj_type type ## _ktype = { \
123 .release = type ## _release, \
124 .sysfs_ops = &type ## _sysfs_ops, \
125 .default_groups = type ## _groups \
126}
127
128static void bch2_fs_release(struct kobject *);
129static void bch2_dev_release(struct kobject *);
130static void bch2_fs_counters_release(struct kobject *k)
131{
132}
133
134static void bch2_fs_internal_release(struct kobject *k)
135{
136}
137
138static void bch2_fs_opts_dir_release(struct kobject *k)
139{
140}
141
142static void bch2_fs_time_stats_release(struct kobject *k)
143{
144}
145
146KTYPE(bch2_fs);
147KTYPE(bch2_fs_counters);
148KTYPE(bch2_fs_internal);
149KTYPE(bch2_fs_opts_dir);
150KTYPE(bch2_fs_time_stats);
151KTYPE(bch2_dev);
152
153static struct kset *bcachefs_kset;
154static LIST_HEAD(bch_fs_list);
155static DEFINE_MUTEX(bch_fs_list_lock);
156
157DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
158
159static void bch2_dev_free(struct bch_dev *);
160static int bch2_dev_alloc(struct bch_fs *, unsigned);
161static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
162static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
163
164struct bch_fs *bch2_dev_to_fs(dev_t dev)
165{
166 struct bch_fs *c;
167
168 mutex_lock(&bch_fs_list_lock);
169 rcu_read_lock();
170
171 list_for_each_entry(c, &bch_fs_list, list)
172 for_each_member_device_rcu(c, ca, NULL)
173 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
174 closure_get(&c->cl);
175 goto found;
176 }
177 c = NULL;
178found:
179 rcu_read_unlock();
180 mutex_unlock(&bch_fs_list_lock);
181
182 return c;
183}
184
185static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
186{
187 struct bch_fs *c;
188
189 lockdep_assert_held(&bch_fs_list_lock);
190
191 list_for_each_entry(c, &bch_fs_list, list)
192 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
193 return c;
194
195 return NULL;
196}
197
198struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
199{
200 struct bch_fs *c;
201
202 mutex_lock(&bch_fs_list_lock);
203 c = __bch2_uuid_to_fs(uuid);
204 if (c)
205 closure_get(&c->cl);
206 mutex_unlock(&bch_fs_list_lock);
207
208 return c;
209}
210
211static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
212{
213 unsigned nr = 0, u64s =
214 ((sizeof(struct jset_entry_dev_usage) +
215 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
216 sizeof(u64);
217
218 rcu_read_lock();
219 for_each_member_device_rcu(c, ca, NULL)
220 nr++;
221 rcu_read_unlock();
222
223 bch2_journal_entry_res_resize(&c->journal,
224 &c->dev_usage_journal_res, u64s * nr);
225}
226
227/* Filesystem RO/RW: */
228
229/*
230 * For startup/shutdown of RW stuff, the dependencies are:
231 *
232 * - foreground writes depend on copygc and rebalance (to free up space)
233 *
234 * - copygc and rebalance depend on mark and sweep gc (they actually probably
235 * don't because they either reserve ahead of time or don't block if
236 * allocations fail, but allocations can require mark and sweep gc to run
237 * because of generation number wraparound)
238 *
239 * - all of the above depends on the allocator threads
240 *
241 * - allocator depends on the journal (when it rewrites prios and gens)
242 */
243
244static void __bch2_fs_read_only(struct bch_fs *c)
245{
246 unsigned clean_passes = 0;
247 u64 seq = 0;
248
249 bch2_fs_ec_stop(c);
250 bch2_open_buckets_stop(c, NULL, true);
251 bch2_rebalance_stop(c);
252 bch2_copygc_stop(c);
253 bch2_gc_thread_stop(c);
254 bch2_fs_ec_flush(c);
255
256 bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
257 journal_cur_seq(&c->journal));
258
259 do {
260 clean_passes++;
261
262 if (bch2_btree_interior_updates_flush(c) ||
263 bch2_journal_flush_all_pins(&c->journal) ||
264 bch2_btree_flush_all_writes(c) ||
265 seq != atomic64_read(&c->journal.seq)) {
266 seq = atomic64_read(&c->journal.seq);
267 clean_passes = 0;
268 }
269 } while (clean_passes < 2);
270
271 bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
272 journal_cur_seq(&c->journal));
273
274 if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
275 !test_bit(BCH_FS_emergency_ro, &c->flags))
276 set_bit(BCH_FS_clean_shutdown, &c->flags);
277 bch2_fs_journal_stop(&c->journal);
278
279 /*
280 * After stopping journal:
281 */
282 for_each_member_device(c, ca)
283 bch2_dev_allocator_remove(c, ca);
284}
285
286#ifndef BCH_WRITE_REF_DEBUG
287static void bch2_writes_disabled(struct percpu_ref *writes)
288{
289 struct bch_fs *c = container_of(writes, struct bch_fs, writes);
290
291 set_bit(BCH_FS_write_disable_complete, &c->flags);
292 wake_up(&bch2_read_only_wait);
293}
294#endif
295
296void bch2_fs_read_only(struct bch_fs *c)
297{
298 if (!test_bit(BCH_FS_rw, &c->flags)) {
299 bch2_journal_reclaim_stop(&c->journal);
300 return;
301 }
302
303 BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
304
305 bch_verbose(c, "going read-only");
306
307 /*
308 * Block new foreground-end write operations from starting - any new
309 * writes will return -EROFS:
310 */
311 set_bit(BCH_FS_going_ro, &c->flags);
312#ifndef BCH_WRITE_REF_DEBUG
313 percpu_ref_kill(&c->writes);
314#else
315 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
316 bch2_write_ref_put(c, i);
317#endif
318
319 /*
320 * If we're not doing an emergency shutdown, we want to wait on
321 * outstanding writes to complete so they don't see spurious errors due
322 * to shutting down the allocator:
323 *
324 * If we are doing an emergency shutdown outstanding writes may
325 * hang until we shutdown the allocator so we don't want to wait
326 * on outstanding writes before shutting everything down - but
327 * we do need to wait on them before returning and signalling
328 * that going RO is complete:
329 */
330 wait_event(bch2_read_only_wait,
331 test_bit(BCH_FS_write_disable_complete, &c->flags) ||
332 test_bit(BCH_FS_emergency_ro, &c->flags));
333
334 bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
335 if (writes_disabled)
336 bch_verbose(c, "finished waiting for writes to stop");
337
338 __bch2_fs_read_only(c);
339
340 wait_event(bch2_read_only_wait,
341 test_bit(BCH_FS_write_disable_complete, &c->flags));
342
343 if (!writes_disabled)
344 bch_verbose(c, "finished waiting for writes to stop");
345
346 clear_bit(BCH_FS_write_disable_complete, &c->flags);
347 clear_bit(BCH_FS_going_ro, &c->flags);
348 clear_bit(BCH_FS_rw, &c->flags);
349
350 if (!bch2_journal_error(&c->journal) &&
351 !test_bit(BCH_FS_error, &c->flags) &&
352 !test_bit(BCH_FS_emergency_ro, &c->flags) &&
353 test_bit(BCH_FS_started, &c->flags) &&
354 test_bit(BCH_FS_clean_shutdown, &c->flags) &&
355 !c->opts.norecovery) {
356 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
357 BUG_ON(atomic_read(&c->btree_cache.dirty));
358 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
359 BUG_ON(c->btree_write_buffer.inc.keys.nr);
360 BUG_ON(c->btree_write_buffer.flushing.keys.nr);
361
362 bch_verbose(c, "marking filesystem clean");
363 bch2_fs_mark_clean(c);
364 } else {
365 bch_verbose(c, "done going read-only, filesystem not clean");
366 }
367}
368
369static void bch2_fs_read_only_work(struct work_struct *work)
370{
371 struct bch_fs *c =
372 container_of(work, struct bch_fs, read_only_work);
373
374 down_write(&c->state_lock);
375 bch2_fs_read_only(c);
376 up_write(&c->state_lock);
377}
378
379static void bch2_fs_read_only_async(struct bch_fs *c)
380{
381 queue_work(system_long_wq, &c->read_only_work);
382}
383
384bool bch2_fs_emergency_read_only(struct bch_fs *c)
385{
386 bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
387
388 bch2_journal_halt(&c->journal);
389 bch2_fs_read_only_async(c);
390
391 wake_up(&bch2_read_only_wait);
392 return ret;
393}
394
395static int bch2_fs_read_write_late(struct bch_fs *c)
396{
397 int ret;
398
399 /*
400 * Data move operations can't run until after check_snapshots has
401 * completed, and bch2_snapshot_is_ancestor() is available.
402 *
403 * Ideally we'd start copygc/rebalance earlier instead of waiting for
404 * all of recovery/fsck to complete:
405 */
406 ret = bch2_copygc_start(c);
407 if (ret) {
408 bch_err(c, "error starting copygc thread");
409 return ret;
410 }
411
412 ret = bch2_rebalance_start(c);
413 if (ret) {
414 bch_err(c, "error starting rebalance thread");
415 return ret;
416 }
417
418 return 0;
419}
420
421static int __bch2_fs_read_write(struct bch_fs *c, bool early)
422{
423 int ret;
424
425 if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
426 bch_err(c, "cannot go rw, unfixed btree errors");
427 return -BCH_ERR_erofs_unfixed_errors;
428 }
429
430 if (test_bit(BCH_FS_rw, &c->flags))
431 return 0;
432
433 bch_info(c, "going read-write");
434
435 ret = bch2_sb_members_v2_init(c);
436 if (ret)
437 goto err;
438
439 ret = bch2_fs_mark_dirty(c);
440 if (ret)
441 goto err;
442
443 clear_bit(BCH_FS_clean_shutdown, &c->flags);
444
445 /*
446 * First journal write must be a flush write: after a clean shutdown we
447 * don't read the journal, so the first journal write may end up
448 * overwriting whatever was there previously, and there must always be
449 * at least one non-flush write in the journal or recovery will fail:
450 */
451 set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
452
453 for_each_rw_member(c, ca)
454 bch2_dev_allocator_add(c, ca);
455 bch2_recalc_capacity(c);
456
457 set_bit(BCH_FS_rw, &c->flags);
458 set_bit(BCH_FS_was_rw, &c->flags);
459
460#ifndef BCH_WRITE_REF_DEBUG
461 percpu_ref_reinit(&c->writes);
462#else
463 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
464 BUG_ON(atomic_long_read(&c->writes[i]));
465 atomic_long_inc(&c->writes[i]);
466 }
467#endif
468
469 ret = bch2_gc_thread_start(c);
470 if (ret) {
471 bch_err(c, "error starting gc thread");
472 return ret;
473 }
474
475 ret = bch2_journal_reclaim_start(&c->journal);
476 if (ret)
477 goto err;
478
479 if (!early) {
480 ret = bch2_fs_read_write_late(c);
481 if (ret)
482 goto err;
483 }
484
485 bch2_do_discards(c);
486 bch2_do_invalidates(c);
487 bch2_do_stripe_deletes(c);
488 bch2_do_pending_node_rewrites(c);
489 return 0;
490err:
491 if (test_bit(BCH_FS_rw, &c->flags))
492 bch2_fs_read_only(c);
493 else
494 __bch2_fs_read_only(c);
495 return ret;
496}
497
498int bch2_fs_read_write(struct bch_fs *c)
499{
500 if (c->opts.norecovery)
501 return -BCH_ERR_erofs_norecovery;
502
503 if (c->opts.nochanges)
504 return -BCH_ERR_erofs_nochanges;
505
506 return __bch2_fs_read_write(c, false);
507}
508
509int bch2_fs_read_write_early(struct bch_fs *c)
510{
511 lockdep_assert_held(&c->state_lock);
512
513 return __bch2_fs_read_write(c, true);
514}
515
516/* Filesystem startup/shutdown: */
517
518static void __bch2_fs_free(struct bch_fs *c)
519{
520 unsigned i;
521
522 for (i = 0; i < BCH_TIME_STAT_NR; i++)
523 bch2_time_stats_exit(&c->times[i]);
524
525 bch2_free_pending_node_rewrites(c);
526 bch2_fs_sb_errors_exit(c);
527 bch2_fs_counters_exit(c);
528 bch2_fs_snapshots_exit(c);
529 bch2_fs_quota_exit(c);
530 bch2_fs_fs_io_direct_exit(c);
531 bch2_fs_fs_io_buffered_exit(c);
532 bch2_fs_fsio_exit(c);
533 bch2_fs_ec_exit(c);
534 bch2_fs_encryption_exit(c);
535 bch2_fs_nocow_locking_exit(c);
536 bch2_fs_io_write_exit(c);
537 bch2_fs_io_read_exit(c);
538 bch2_fs_buckets_waiting_for_journal_exit(c);
539 bch2_fs_btree_interior_update_exit(c);
540 bch2_fs_btree_iter_exit(c);
541 bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
542 bch2_fs_btree_cache_exit(c);
543 bch2_fs_replicas_exit(c);
544 bch2_fs_journal_exit(&c->journal);
545 bch2_io_clock_exit(&c->io_clock[WRITE]);
546 bch2_io_clock_exit(&c->io_clock[READ]);
547 bch2_fs_compress_exit(c);
548 bch2_journal_keys_put_initial(c);
549 BUG_ON(atomic_read(&c->journal_keys.ref));
550 bch2_fs_btree_write_buffer_exit(c);
551 percpu_free_rwsem(&c->mark_lock);
552 free_percpu(c->online_reserved);
553
554 darray_exit(&c->btree_roots_extra);
555 free_percpu(c->pcpu);
556 mempool_exit(&c->large_bkey_pool);
557 mempool_exit(&c->btree_bounce_pool);
558 bioset_exit(&c->btree_bio);
559 mempool_exit(&c->fill_iter);
560#ifndef BCH_WRITE_REF_DEBUG
561 percpu_ref_exit(&c->writes);
562#endif
563 kfree(rcu_dereference_protected(c->disk_groups, 1));
564 kfree(c->journal_seq_blacklist_table);
565 kfree(c->unused_inode_hints);
566
567 if (c->write_ref_wq)
568 destroy_workqueue(c->write_ref_wq);
569 if (c->io_complete_wq)
570 destroy_workqueue(c->io_complete_wq);
571 if (c->copygc_wq)
572 destroy_workqueue(c->copygc_wq);
573 if (c->btree_io_complete_wq)
574 destroy_workqueue(c->btree_io_complete_wq);
575 if (c->btree_update_wq)
576 destroy_workqueue(c->btree_update_wq);
577
578 bch2_free_super(&c->disk_sb);
579 kvpfree(c, sizeof(*c));
580 module_put(THIS_MODULE);
581}
582
583static void bch2_fs_release(struct kobject *kobj)
584{
585 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
586
587 __bch2_fs_free(c);
588}
589
590void __bch2_fs_stop(struct bch_fs *c)
591{
592 bch_verbose(c, "shutting down");
593
594 set_bit(BCH_FS_stopping, &c->flags);
595
596 cancel_work_sync(&c->journal_seq_blacklist_gc_work);
597
598 down_write(&c->state_lock);
599 bch2_fs_read_only(c);
600 up_write(&c->state_lock);
601
602 for_each_member_device(c, ca)
603 if (ca->kobj.state_in_sysfs &&
604 ca->disk_sb.bdev)
605 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
606
607 if (c->kobj.state_in_sysfs)
608 kobject_del(&c->kobj);
609
610 bch2_fs_debug_exit(c);
611 bch2_fs_chardev_exit(c);
612
613 bch2_ro_ref_put(c);
614 wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
615
616 kobject_put(&c->counters_kobj);
617 kobject_put(&c->time_stats);
618 kobject_put(&c->opts_dir);
619 kobject_put(&c->internal);
620
621 /* btree prefetch might have kicked off reads in the background: */
622 bch2_btree_flush_all_reads(c);
623
624 for_each_member_device(c, ca)
625 cancel_work_sync(&ca->io_error_work);
626
627 cancel_work_sync(&c->read_only_work);
628}
629
630void bch2_fs_free(struct bch_fs *c)
631{
632 unsigned i;
633
634 mutex_lock(&bch_fs_list_lock);
635 list_del(&c->list);
636 mutex_unlock(&bch_fs_list_lock);
637
638 closure_sync(&c->cl);
639 closure_debug_destroy(&c->cl);
640
641 for (i = 0; i < c->sb.nr_devices; i++) {
642 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
643
644 if (ca) {
645 bch2_free_super(&ca->disk_sb);
646 bch2_dev_free(ca);
647 }
648 }
649
650 bch_verbose(c, "shutdown complete");
651
652 kobject_put(&c->kobj);
653}
654
655void bch2_fs_stop(struct bch_fs *c)
656{
657 __bch2_fs_stop(c);
658 bch2_fs_free(c);
659}
660
661static int bch2_fs_online(struct bch_fs *c)
662{
663 int ret = 0;
664
665 lockdep_assert_held(&bch_fs_list_lock);
666
667 if (__bch2_uuid_to_fs(c->sb.uuid)) {
668 bch_err(c, "filesystem UUID already open");
669 return -EINVAL;
670 }
671
672 ret = bch2_fs_chardev_init(c);
673 if (ret) {
674 bch_err(c, "error creating character device");
675 return ret;
676 }
677
678 bch2_fs_debug_init(c);
679
680 ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
681 kobject_add(&c->internal, &c->kobj, "internal") ?:
682 kobject_add(&c->opts_dir, &c->kobj, "options") ?:
683#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
684 kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
685#endif
686 kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
687 bch2_opts_create_sysfs_files(&c->opts_dir);
688 if (ret) {
689 bch_err(c, "error creating sysfs objects");
690 return ret;
691 }
692
693 down_write(&c->state_lock);
694
695 for_each_member_device(c, ca) {
696 ret = bch2_dev_sysfs_online(c, ca);
697 if (ret) {
698 bch_err(c, "error creating sysfs objects");
699 percpu_ref_put(&ca->ref);
700 goto err;
701 }
702 }
703
704 BUG_ON(!list_empty(&c->list));
705 list_add(&c->list, &bch_fs_list);
706err:
707 up_write(&c->state_lock);
708 return ret;
709}
710
711static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
712{
713 struct bch_fs *c;
714 struct printbuf name = PRINTBUF;
715 unsigned i, iter_size;
716 int ret = 0;
717
718 c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
719 if (!c) {
720 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
721 goto out;
722 }
723
724 c->stdio = (void *)(unsigned long) opts.stdio;
725
726 __module_get(THIS_MODULE);
727
728 closure_init(&c->cl, NULL);
729
730 c->kobj.kset = bcachefs_kset;
731 kobject_init(&c->kobj, &bch2_fs_ktype);
732 kobject_init(&c->internal, &bch2_fs_internal_ktype);
733 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
734 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
735 kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
736
737 c->minor = -1;
738 c->disk_sb.fs_sb = true;
739
740 init_rwsem(&c->state_lock);
741 mutex_init(&c->sb_lock);
742 mutex_init(&c->replicas_gc_lock);
743 mutex_init(&c->btree_root_lock);
744 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
745
746 refcount_set(&c->ro_ref, 1);
747 init_waitqueue_head(&c->ro_ref_wait);
748 sema_init(&c->online_fsck_mutex, 1);
749
750 init_rwsem(&c->gc_lock);
751 mutex_init(&c->gc_gens_lock);
752 atomic_set(&c->journal_keys.ref, 1);
753 c->journal_keys.initial_ref_held = true;
754
755 for (i = 0; i < BCH_TIME_STAT_NR; i++)
756 bch2_time_stats_init(&c->times[i]);
757
758 bch2_fs_copygc_init(c);
759 bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
760 bch2_fs_btree_iter_init_early(c);
761 bch2_fs_btree_interior_update_init_early(c);
762 bch2_fs_allocator_background_init(c);
763 bch2_fs_allocator_foreground_init(c);
764 bch2_fs_rebalance_init(c);
765 bch2_fs_quota_init(c);
766 bch2_fs_ec_init_early(c);
767 bch2_fs_move_init(c);
768 bch2_fs_sb_errors_init_early(c);
769
770 INIT_LIST_HEAD(&c->list);
771
772 mutex_init(&c->usage_scratch_lock);
773
774 mutex_init(&c->bio_bounce_pages_lock);
775 mutex_init(&c->snapshot_table_lock);
776 init_rwsem(&c->snapshot_create_lock);
777
778 spin_lock_init(&c->btree_write_error_lock);
779
780 INIT_WORK(&c->journal_seq_blacklist_gc_work,
781 bch2_blacklist_entries_gc);
782
783 INIT_LIST_HEAD(&c->journal_iters);
784
785 INIT_LIST_HEAD(&c->fsck_error_msgs);
786 mutex_init(&c->fsck_error_msgs_lock);
787
788 seqcount_init(&c->gc_pos_lock);
789
790 seqcount_init(&c->usage_lock);
791
792 sema_init(&c->io_in_flight, 128);
793
794 INIT_LIST_HEAD(&c->vfs_inodes_list);
795 mutex_init(&c->vfs_inodes_lock);
796
797 c->copy_gc_enabled = 1;
798 c->rebalance.enabled = 1;
799 c->promote_whole_extents = true;
800
801 c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
802 c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
803 c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
804
805 bch2_fs_btree_cache_init_early(&c->btree_cache);
806
807 mutex_init(&c->sectors_available_lock);
808
809 ret = percpu_init_rwsem(&c->mark_lock);
810 if (ret)
811 goto err;
812
813 mutex_lock(&c->sb_lock);
814 ret = bch2_sb_to_fs(c, sb);
815 mutex_unlock(&c->sb_lock);
816
817 if (ret)
818 goto err;
819
820 pr_uuid(&name, c->sb.user_uuid.b);
821 strscpy(c->name, name.buf, sizeof(c->name));
822 printbuf_exit(&name);
823
824 ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
825 if (ret)
826 goto err;
827
828 /* Compat: */
829 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
830 !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
831 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
832
833 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
834 !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
835 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
836
837 c->opts = bch2_opts_default;
838 ret = bch2_opts_from_sb(&c->opts, sb);
839 if (ret)
840 goto err;
841
842 bch2_opts_apply(&c->opts, opts);
843
844 c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
845 if (c->opts.inodes_use_key_cache)
846 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
847 c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
848
849 c->block_bits = ilog2(block_sectors(c));
850 c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
851
852 if (bch2_fs_init_fault("fs_alloc")) {
853 bch_err(c, "fs_alloc fault injected");
854 ret = -EFAULT;
855 goto err;
856 }
857
858 iter_size = sizeof(struct sort_iter) +
859 (btree_blocks(c) + 1) * 2 *
860 sizeof(struct sort_iter_set);
861
862 c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
863
864 if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
865 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
866 !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
867 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
868 !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
869 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
870 !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
871 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 512)) ||
872 !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
873 WQ_FREEZABLE, 0)) ||
874#ifndef BCH_WRITE_REF_DEBUG
875 percpu_ref_init(&c->writes, bch2_writes_disabled,
876 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
877#endif
878 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
879 bioset_init(&c->btree_bio, 1,
880 max(offsetof(struct btree_read_bio, bio),
881 offsetof(struct btree_write_bio, wbio.bio)),
882 BIOSET_NEED_BVECS) ||
883 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
884 !(c->online_reserved = alloc_percpu(u64)) ||
885 mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
886 c->opts.btree_node_size) ||
887 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
888 !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
889 sizeof(u64), GFP_KERNEL))) {
890 ret = -BCH_ERR_ENOMEM_fs_other_alloc;
891 goto err;
892 }
893
894 ret = bch2_fs_counters_init(c) ?:
895 bch2_fs_sb_errors_init(c) ?:
896 bch2_io_clock_init(&c->io_clock[READ]) ?:
897 bch2_io_clock_init(&c->io_clock[WRITE]) ?:
898 bch2_fs_journal_init(&c->journal) ?:
899 bch2_fs_replicas_init(c) ?:
900 bch2_fs_btree_cache_init(c) ?:
901 bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
902 bch2_fs_btree_iter_init(c) ?:
903 bch2_fs_btree_interior_update_init(c) ?:
904 bch2_fs_buckets_waiting_for_journal_init(c) ?:
905 bch2_fs_btree_write_buffer_init(c) ?:
906 bch2_fs_subvolumes_init(c) ?:
907 bch2_fs_io_read_init(c) ?:
908 bch2_fs_io_write_init(c) ?:
909 bch2_fs_nocow_locking_init(c) ?:
910 bch2_fs_encryption_init(c) ?:
911 bch2_fs_compress_init(c) ?:
912 bch2_fs_ec_init(c) ?:
913 bch2_fs_fsio_init(c) ?:
914 bch2_fs_fs_io_buffered_init(c) ?:
915 bch2_fs_fs_io_direct_init(c);
916 if (ret)
917 goto err;
918
919 for (i = 0; i < c->sb.nr_devices; i++)
920 if (bch2_dev_exists(c->disk_sb.sb, i) &&
921 bch2_dev_alloc(c, i)) {
922 ret = -EEXIST;
923 goto err;
924 }
925
926 bch2_journal_entry_res_resize(&c->journal,
927 &c->btree_root_journal_res,
928 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
929 bch2_dev_usage_journal_reserve(c);
930 bch2_journal_entry_res_resize(&c->journal,
931 &c->clock_journal_res,
932 (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
933
934 mutex_lock(&bch_fs_list_lock);
935 ret = bch2_fs_online(c);
936 mutex_unlock(&bch_fs_list_lock);
937
938 if (ret)
939 goto err;
940out:
941 return c;
942err:
943 bch2_fs_free(c);
944 c = ERR_PTR(ret);
945 goto out;
946}
947
948noinline_for_stack
949static void print_mount_opts(struct bch_fs *c)
950{
951 enum bch_opt_id i;
952 struct printbuf p = PRINTBUF;
953 bool first = true;
954
955 prt_str(&p, "mounting version ");
956 bch2_version_to_text(&p, c->sb.version);
957
958 if (c->opts.read_only) {
959 prt_str(&p, " opts=");
960 first = false;
961 prt_printf(&p, "ro");
962 }
963
964 for (i = 0; i < bch2_opts_nr; i++) {
965 const struct bch_option *opt = &bch2_opt_table[i];
966 u64 v = bch2_opt_get_by_id(&c->opts, i);
967
968 if (!(opt->flags & OPT_MOUNT))
969 continue;
970
971 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
972 continue;
973
974 prt_str(&p, first ? " opts=" : ",");
975 first = false;
976 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
977 }
978
979 bch_info(c, "%s", p.buf);
980 printbuf_exit(&p);
981}
982
983int bch2_fs_start(struct bch_fs *c)
984{
985 time64_t now = ktime_get_real_seconds();
986 int ret;
987
988 print_mount_opts(c);
989
990 down_write(&c->state_lock);
991
992 BUG_ON(test_bit(BCH_FS_started, &c->flags));
993
994 mutex_lock(&c->sb_lock);
995
996 ret = bch2_sb_members_v2_init(c);
997 if (ret) {
998 mutex_unlock(&c->sb_lock);
999 goto err;
1000 }
1001
1002 for_each_online_member(c, ca)
1003 bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
1004
1005 mutex_unlock(&c->sb_lock);
1006
1007 for_each_rw_member(c, ca)
1008 bch2_dev_allocator_add(c, ca);
1009 bch2_recalc_capacity(c);
1010
1011 ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
1012 ? bch2_fs_recovery(c)
1013 : bch2_fs_initialize(c);
1014 if (ret)
1015 goto err;
1016
1017 ret = bch2_opts_check_may_set(c);
1018 if (ret)
1019 goto err;
1020
1021 if (bch2_fs_init_fault("fs_start")) {
1022 bch_err(c, "fs_start fault injected");
1023 ret = -EINVAL;
1024 goto err;
1025 }
1026
1027 set_bit(BCH_FS_started, &c->flags);
1028
1029 if (c->opts.read_only) {
1030 bch2_fs_read_only(c);
1031 } else {
1032 ret = !test_bit(BCH_FS_rw, &c->flags)
1033 ? bch2_fs_read_write(c)
1034 : bch2_fs_read_write_late(c);
1035 if (ret)
1036 goto err;
1037 }
1038
1039 ret = 0;
1040err:
1041 if (ret)
1042 bch_err_msg(c, ret, "starting filesystem");
1043 else
1044 bch_verbose(c, "done starting filesystem");
1045 up_write(&c->state_lock);
1046 return ret;
1047}
1048
1049static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
1050{
1051 struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
1052
1053 if (le16_to_cpu(sb->block_size) != block_sectors(c))
1054 return -BCH_ERR_mismatched_block_size;
1055
1056 if (le16_to_cpu(m.bucket_size) <
1057 BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
1058 return -BCH_ERR_bucket_size_too_small;
1059
1060 return 0;
1061}
1062
1063static int bch2_dev_in_fs(struct bch_sb_handle *fs,
1064 struct bch_sb_handle *sb)
1065{
1066 if (fs == sb)
1067 return 0;
1068
1069 if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
1070 return -BCH_ERR_device_not_a_member_of_filesystem;
1071
1072 if (!bch2_dev_exists(fs->sb, sb->sb->dev_idx))
1073 return -BCH_ERR_device_has_been_removed;
1074
1075 if (fs->sb->block_size != sb->sb->block_size)
1076 return -BCH_ERR_mismatched_block_size;
1077
1078 if (le16_to_cpu(fs->sb->version) < bcachefs_metadata_version_member_seq ||
1079 le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_member_seq)
1080 return 0;
1081
1082 if (fs->sb->seq == sb->sb->seq &&
1083 fs->sb->write_time != sb->sb->write_time) {
1084 struct printbuf buf = PRINTBUF;
1085
1086 prt_str(&buf, "Split brain detected between ");
1087 prt_bdevname(&buf, sb->bdev);
1088 prt_str(&buf, " and ");
1089 prt_bdevname(&buf, fs->bdev);
1090 prt_char(&buf, ':');
1091 prt_newline(&buf);
1092 prt_printf(&buf, "seq=%llu but write_time different, got", le64_to_cpu(sb->sb->seq));
1093 prt_newline(&buf);
1094
1095 prt_bdevname(&buf, fs->bdev);
1096 prt_char(&buf, ' ');
1097 bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time));;
1098 prt_newline(&buf);
1099
1100 prt_bdevname(&buf, sb->bdev);
1101 prt_char(&buf, ' ');
1102 bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time));;
1103 prt_newline(&buf);
1104
1105 prt_printf(&buf, "Not using older sb");
1106
1107 pr_err("%s", buf.buf);
1108 printbuf_exit(&buf);
1109 return -BCH_ERR_device_splitbrain;
1110 }
1111
1112 struct bch_member m = bch2_sb_member_get(fs->sb, sb->sb->dev_idx);
1113 u64 seq_from_fs = le64_to_cpu(m.seq);
1114 u64 seq_from_member = le64_to_cpu(sb->sb->seq);
1115
1116 if (seq_from_fs && seq_from_fs < seq_from_member) {
1117 struct printbuf buf = PRINTBUF;
1118
1119 prt_str(&buf, "Split brain detected between ");
1120 prt_bdevname(&buf, sb->bdev);
1121 prt_str(&buf, " and ");
1122 prt_bdevname(&buf, fs->bdev);
1123 prt_char(&buf, ':');
1124 prt_newline(&buf);
1125
1126 prt_bdevname(&buf, fs->bdev);
1127 prt_str(&buf, "believes seq of ");
1128 prt_bdevname(&buf, sb->bdev);
1129 prt_printf(&buf, " to be %llu, but ", seq_from_fs);
1130 prt_bdevname(&buf, sb->bdev);
1131 prt_printf(&buf, " has %llu\n", seq_from_member);
1132 prt_str(&buf, "Not using ");
1133 prt_bdevname(&buf, sb->bdev);
1134
1135 pr_err("%s", buf.buf);
1136 printbuf_exit(&buf);
1137 return -BCH_ERR_device_splitbrain;
1138 }
1139
1140 return 0;
1141}
1142
1143/* Device startup/shutdown: */
1144
1145static void bch2_dev_release(struct kobject *kobj)
1146{
1147 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1148
1149 kfree(ca);
1150}
1151
1152static void bch2_dev_free(struct bch_dev *ca)
1153{
1154 cancel_work_sync(&ca->io_error_work);
1155
1156 if (ca->kobj.state_in_sysfs &&
1157 ca->disk_sb.bdev)
1158 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1159
1160 if (ca->kobj.state_in_sysfs)
1161 kobject_del(&ca->kobj);
1162
1163 bch2_free_super(&ca->disk_sb);
1164 bch2_dev_journal_exit(ca);
1165
1166 free_percpu(ca->io_done);
1167 bioset_exit(&ca->replica_set);
1168 bch2_dev_buckets_free(ca);
1169 free_page((unsigned long) ca->sb_read_scratch);
1170
1171 bch2_time_stats_exit(&ca->io_latency[WRITE]);
1172 bch2_time_stats_exit(&ca->io_latency[READ]);
1173
1174 percpu_ref_exit(&ca->io_ref);
1175 percpu_ref_exit(&ca->ref);
1176 kobject_put(&ca->kobj);
1177}
1178
1179static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1180{
1181
1182 lockdep_assert_held(&c->state_lock);
1183
1184 if (percpu_ref_is_zero(&ca->io_ref))
1185 return;
1186
1187 __bch2_dev_read_only(c, ca);
1188
1189 reinit_completion(&ca->io_ref_completion);
1190 percpu_ref_kill(&ca->io_ref);
1191 wait_for_completion(&ca->io_ref_completion);
1192
1193 if (ca->kobj.state_in_sysfs) {
1194 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1195 sysfs_remove_link(&ca->kobj, "block");
1196 }
1197
1198 bch2_free_super(&ca->disk_sb);
1199 bch2_dev_journal_exit(ca);
1200}
1201
1202static void bch2_dev_ref_complete(struct percpu_ref *ref)
1203{
1204 struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1205
1206 complete(&ca->ref_completion);
1207}
1208
1209static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1210{
1211 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1212
1213 complete(&ca->io_ref_completion);
1214}
1215
1216static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1217{
1218 int ret;
1219
1220 if (!c->kobj.state_in_sysfs)
1221 return 0;
1222
1223 if (!ca->kobj.state_in_sysfs) {
1224 ret = kobject_add(&ca->kobj, &c->kobj,
1225 "dev-%u", ca->dev_idx);
1226 if (ret)
1227 return ret;
1228 }
1229
1230 if (ca->disk_sb.bdev) {
1231 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1232
1233 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1234 if (ret)
1235 return ret;
1236
1237 ret = sysfs_create_link(&ca->kobj, block, "block");
1238 if (ret)
1239 return ret;
1240 }
1241
1242 return 0;
1243}
1244
1245static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1246 struct bch_member *member)
1247{
1248 struct bch_dev *ca;
1249 unsigned i;
1250
1251 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1252 if (!ca)
1253 return NULL;
1254
1255 kobject_init(&ca->kobj, &bch2_dev_ktype);
1256 init_completion(&ca->ref_completion);
1257 init_completion(&ca->io_ref_completion);
1258
1259 init_rwsem(&ca->bucket_lock);
1260
1261 INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1262
1263 bch2_time_stats_init(&ca->io_latency[READ]);
1264 bch2_time_stats_init(&ca->io_latency[WRITE]);
1265
1266 ca->mi = bch2_mi_to_cpu(member);
1267
1268 for (i = 0; i < ARRAY_SIZE(member->errors); i++)
1269 atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
1270
1271 ca->uuid = member->uuid;
1272
1273 ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1274 ca->mi.bucket_size / btree_sectors(c));
1275
1276 if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1277 0, GFP_KERNEL) ||
1278 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1279 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1280 !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1281 bch2_dev_buckets_alloc(c, ca) ||
1282 bioset_init(&ca->replica_set, 4,
1283 offsetof(struct bch_write_bio, bio), 0) ||
1284 !(ca->io_done = alloc_percpu(*ca->io_done)))
1285 goto err;
1286
1287 return ca;
1288err:
1289 bch2_dev_free(ca);
1290 return NULL;
1291}
1292
1293static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1294 unsigned dev_idx)
1295{
1296 ca->dev_idx = dev_idx;
1297 __set_bit(ca->dev_idx, ca->self.d);
1298 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1299
1300 ca->fs = c;
1301 rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1302
1303 if (bch2_dev_sysfs_online(c, ca))
1304 pr_warn("error creating sysfs objects");
1305}
1306
1307static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1308{
1309 struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
1310 struct bch_dev *ca = NULL;
1311 int ret = 0;
1312
1313 if (bch2_fs_init_fault("dev_alloc"))
1314 goto err;
1315
1316 ca = __bch2_dev_alloc(c, &member);
1317 if (!ca)
1318 goto err;
1319
1320 ca->fs = c;
1321
1322 bch2_dev_attach(c, ca, dev_idx);
1323 return ret;
1324err:
1325 if (ca)
1326 bch2_dev_free(ca);
1327 return -BCH_ERR_ENOMEM_dev_alloc;
1328}
1329
1330static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1331{
1332 unsigned ret;
1333
1334 if (bch2_dev_is_online(ca)) {
1335 bch_err(ca, "already have device online in slot %u",
1336 sb->sb->dev_idx);
1337 return -BCH_ERR_device_already_online;
1338 }
1339
1340 if (get_capacity(sb->bdev->bd_disk) <
1341 ca->mi.bucket_size * ca->mi.nbuckets) {
1342 bch_err(ca, "cannot online: device too small");
1343 return -BCH_ERR_device_size_too_small;
1344 }
1345
1346 BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1347
1348 ret = bch2_dev_journal_init(ca, sb->sb);
1349 if (ret)
1350 return ret;
1351
1352 /* Commit: */
1353 ca->disk_sb = *sb;
1354 memset(sb, 0, sizeof(*sb));
1355
1356 ca->dev = ca->disk_sb.bdev->bd_dev;
1357
1358 percpu_ref_reinit(&ca->io_ref);
1359
1360 return 0;
1361}
1362
1363static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1364{
1365 struct bch_dev *ca;
1366 int ret;
1367
1368 lockdep_assert_held(&c->state_lock);
1369
1370 if (le64_to_cpu(sb->sb->seq) >
1371 le64_to_cpu(c->disk_sb.sb->seq))
1372 bch2_sb_to_fs(c, sb->sb);
1373
1374 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1375 !c->devs[sb->sb->dev_idx]);
1376
1377 ca = bch_dev_locked(c, sb->sb->dev_idx);
1378
1379 ret = __bch2_dev_attach_bdev(ca, sb);
1380 if (ret)
1381 return ret;
1382
1383 bch2_dev_sysfs_online(c, ca);
1384
1385 struct printbuf name = PRINTBUF;
1386 prt_bdevname(&name, ca->disk_sb.bdev);
1387
1388 if (c->sb.nr_devices == 1)
1389 strscpy(c->name, name.buf, sizeof(c->name));
1390 strscpy(ca->name, name.buf, sizeof(ca->name));
1391
1392 printbuf_exit(&name);
1393
1394 rebalance_wakeup(c);
1395 return 0;
1396}
1397
1398/* Device management: */
1399
1400/*
1401 * Note: this function is also used by the error paths - when a particular
1402 * device sees an error, we call it to determine whether we can just set the
1403 * device RO, or - if this function returns false - we'll set the whole
1404 * filesystem RO:
1405 *
1406 * XXX: maybe we should be more explicit about whether we're changing state
1407 * because we got an error or what have you?
1408 */
1409bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1410 enum bch_member_state new_state, int flags)
1411{
1412 struct bch_devs_mask new_online_devs;
1413 int nr_rw = 0, required;
1414
1415 lockdep_assert_held(&c->state_lock);
1416
1417 switch (new_state) {
1418 case BCH_MEMBER_STATE_rw:
1419 return true;
1420 case BCH_MEMBER_STATE_ro:
1421 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1422 return true;
1423
1424 /* do we have enough devices to write to? */
1425 for_each_member_device(c, ca2)
1426 if (ca2 != ca)
1427 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1428
1429 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1430 ? c->opts.metadata_replicas
1431 : metadata_replicas_required(c),
1432 !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1433 ? c->opts.data_replicas
1434 : data_replicas_required(c));
1435
1436 return nr_rw >= required;
1437 case BCH_MEMBER_STATE_failed:
1438 case BCH_MEMBER_STATE_spare:
1439 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1440 ca->mi.state != BCH_MEMBER_STATE_ro)
1441 return true;
1442
1443 /* do we have enough devices to read from? */
1444 new_online_devs = bch2_online_devs(c);
1445 __clear_bit(ca->dev_idx, new_online_devs.d);
1446
1447 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1448 default:
1449 BUG();
1450 }
1451}
1452
1453static bool bch2_fs_may_start(struct bch_fs *c)
1454{
1455 struct bch_dev *ca;
1456 unsigned i, flags = 0;
1457
1458 if (c->opts.very_degraded)
1459 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1460
1461 if (c->opts.degraded)
1462 flags |= BCH_FORCE_IF_DEGRADED;
1463
1464 if (!c->opts.degraded &&
1465 !c->opts.very_degraded) {
1466 mutex_lock(&c->sb_lock);
1467
1468 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1469 if (!bch2_dev_exists(c->disk_sb.sb, i))
1470 continue;
1471
1472 ca = bch_dev_locked(c, i);
1473
1474 if (!bch2_dev_is_online(ca) &&
1475 (ca->mi.state == BCH_MEMBER_STATE_rw ||
1476 ca->mi.state == BCH_MEMBER_STATE_ro)) {
1477 mutex_unlock(&c->sb_lock);
1478 return false;
1479 }
1480 }
1481 mutex_unlock(&c->sb_lock);
1482 }
1483
1484 return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1485}
1486
1487static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1488{
1489 /*
1490 * The allocator thread itself allocates btree nodes, so stop it first:
1491 */
1492 bch2_dev_allocator_remove(c, ca);
1493 bch2_dev_journal_stop(&c->journal, ca);
1494}
1495
1496static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1497{
1498 lockdep_assert_held(&c->state_lock);
1499
1500 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1501
1502 bch2_dev_allocator_add(c, ca);
1503 bch2_recalc_capacity(c);
1504}
1505
1506int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1507 enum bch_member_state new_state, int flags)
1508{
1509 struct bch_member *m;
1510 int ret = 0;
1511
1512 if (ca->mi.state == new_state)
1513 return 0;
1514
1515 if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1516 return -BCH_ERR_device_state_not_allowed;
1517
1518 if (new_state != BCH_MEMBER_STATE_rw)
1519 __bch2_dev_read_only(c, ca);
1520
1521 bch_notice(ca, "%s", bch2_member_states[new_state]);
1522
1523 mutex_lock(&c->sb_lock);
1524 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1525 SET_BCH_MEMBER_STATE(m, new_state);
1526 bch2_write_super(c);
1527 mutex_unlock(&c->sb_lock);
1528
1529 if (new_state == BCH_MEMBER_STATE_rw)
1530 __bch2_dev_read_write(c, ca);
1531
1532 rebalance_wakeup(c);
1533
1534 return ret;
1535}
1536
1537int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1538 enum bch_member_state new_state, int flags)
1539{
1540 int ret;
1541
1542 down_write(&c->state_lock);
1543 ret = __bch2_dev_set_state(c, ca, new_state, flags);
1544 up_write(&c->state_lock);
1545
1546 return ret;
1547}
1548
1549/* Device add/removal: */
1550
1551static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1552{
1553 struct bpos start = POS(ca->dev_idx, 0);
1554 struct bpos end = POS(ca->dev_idx, U64_MAX);
1555 int ret;
1556
1557 /*
1558 * We clear the LRU and need_discard btrees first so that we don't race
1559 * with bch2_do_invalidates() and bch2_do_discards()
1560 */
1561 ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1562 BTREE_TRIGGER_NORUN, NULL) ?:
1563 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1564 BTREE_TRIGGER_NORUN, NULL) ?:
1565 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1566 BTREE_TRIGGER_NORUN, NULL) ?:
1567 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1568 BTREE_TRIGGER_NORUN, NULL) ?:
1569 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1570 BTREE_TRIGGER_NORUN, NULL) ?:
1571 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
1572 BTREE_TRIGGER_NORUN, NULL);
1573 bch_err_msg(c, ret, "removing dev alloc info");
1574 return ret;
1575}
1576
1577int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1578{
1579 struct bch_member *m;
1580 unsigned dev_idx = ca->dev_idx, data;
1581 int ret;
1582
1583 down_write(&c->state_lock);
1584
1585 /*
1586 * We consume a reference to ca->ref, regardless of whether we succeed
1587 * or fail:
1588 */
1589 percpu_ref_put(&ca->ref);
1590
1591 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1592 bch_err(ca, "Cannot remove without losing data");
1593 ret = -BCH_ERR_device_state_not_allowed;
1594 goto err;
1595 }
1596
1597 __bch2_dev_read_only(c, ca);
1598
1599 ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1600 bch_err_msg(ca, ret, "dropping data");
1601 if (ret)
1602 goto err;
1603
1604 ret = bch2_dev_remove_alloc(c, ca);
1605 bch_err_msg(ca, ret, "deleting alloc info");
1606 if (ret)
1607 goto err;
1608
1609 ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1610 bch_err_msg(ca, ret, "flushing journal");
1611 if (ret)
1612 goto err;
1613
1614 ret = bch2_journal_flush(&c->journal);
1615 bch_err(ca, "journal error");
1616 if (ret)
1617 goto err;
1618
1619 ret = bch2_replicas_gc2(c);
1620 bch_err_msg(ca, ret, "in replicas_gc2()");
1621 if (ret)
1622 goto err;
1623
1624 data = bch2_dev_has_data(c, ca);
1625 if (data) {
1626 struct printbuf data_has = PRINTBUF;
1627
1628 prt_bitflags(&data_has, __bch2_data_types, data);
1629 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1630 printbuf_exit(&data_has);
1631 ret = -EBUSY;
1632 goto err;
1633 }
1634
1635 __bch2_dev_offline(c, ca);
1636
1637 mutex_lock(&c->sb_lock);
1638 rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1639 mutex_unlock(&c->sb_lock);
1640
1641 percpu_ref_kill(&ca->ref);
1642 wait_for_completion(&ca->ref_completion);
1643
1644 bch2_dev_free(ca);
1645
1646 /*
1647 * At this point the device object has been removed in-core, but the
1648 * on-disk journal might still refer to the device index via sb device
1649 * usage entries. Recovery fails if it sees usage information for an
1650 * invalid device. Flush journal pins to push the back of the journal
1651 * past now invalid device index references before we update the
1652 * superblock, but after the device object has been removed so any
1653 * further journal writes elide usage info for the device.
1654 */
1655 bch2_journal_flush_all_pins(&c->journal);
1656
1657 /*
1658 * Free this device's slot in the bch_member array - all pointers to
1659 * this device must be gone:
1660 */
1661 mutex_lock(&c->sb_lock);
1662 m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1663 memset(&m->uuid, 0, sizeof(m->uuid));
1664
1665 bch2_write_super(c);
1666
1667 mutex_unlock(&c->sb_lock);
1668 up_write(&c->state_lock);
1669
1670 bch2_dev_usage_journal_reserve(c);
1671 return 0;
1672err:
1673 if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1674 !percpu_ref_is_zero(&ca->io_ref))
1675 __bch2_dev_read_write(c, ca);
1676 up_write(&c->state_lock);
1677 return ret;
1678}
1679
1680/* Add new device to running filesystem: */
1681int bch2_dev_add(struct bch_fs *c, const char *path)
1682{
1683 struct bch_opts opts = bch2_opts_empty();
1684 struct bch_sb_handle sb;
1685 struct bch_dev *ca = NULL;
1686 struct bch_sb_field_members_v2 *mi;
1687 struct bch_member dev_mi;
1688 unsigned dev_idx, nr_devices, u64s;
1689 struct printbuf errbuf = PRINTBUF;
1690 struct printbuf label = PRINTBUF;
1691 int ret;
1692
1693 ret = bch2_read_super(path, &opts, &sb);
1694 bch_err_msg(c, ret, "reading super");
1695 if (ret)
1696 goto err;
1697
1698 dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
1699
1700 if (BCH_MEMBER_GROUP(&dev_mi)) {
1701 bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1702 if (label.allocation_failure) {
1703 ret = -ENOMEM;
1704 goto err;
1705 }
1706 }
1707
1708 ret = bch2_dev_may_add(sb.sb, c);
1709 if (ret)
1710 goto err;
1711
1712 ca = __bch2_dev_alloc(c, &dev_mi);
1713 if (!ca) {
1714 ret = -ENOMEM;
1715 goto err;
1716 }
1717
1718 bch2_dev_usage_init(ca);
1719
1720 ret = __bch2_dev_attach_bdev(ca, &sb);
1721 if (ret)
1722 goto err;
1723
1724 ret = bch2_dev_journal_alloc(ca);
1725 bch_err_msg(c, ret, "allocating journal");
1726 if (ret)
1727 goto err;
1728
1729 down_write(&c->state_lock);
1730 mutex_lock(&c->sb_lock);
1731
1732 ret = bch2_sb_from_fs(c, ca);
1733 bch_err_msg(c, ret, "setting up new superblock");
1734 if (ret)
1735 goto err_unlock;
1736
1737 if (dynamic_fault("bcachefs:add:no_slot"))
1738 goto no_slot;
1739
1740 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1741 if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
1742 goto have_slot;
1743no_slot:
1744 ret = -BCH_ERR_ENOSPC_sb_members;
1745 bch_err_msg(c, ret, "setting up new superblock");
1746 goto err_unlock;
1747
1748have_slot:
1749 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1750
1751 mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
1752 u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
1753 le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
1754
1755 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
1756 if (!mi) {
1757 ret = -BCH_ERR_ENOSPC_sb_members;
1758 bch_err_msg(c, ret, "setting up new superblock");
1759 goto err_unlock;
1760 }
1761 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1762
1763 /* success: */
1764
1765 *m = dev_mi;
1766 m->last_mount = cpu_to_le64(ktime_get_real_seconds());
1767 c->disk_sb.sb->nr_devices = nr_devices;
1768
1769 ca->disk_sb.sb->dev_idx = dev_idx;
1770 bch2_dev_attach(c, ca, dev_idx);
1771
1772 if (BCH_MEMBER_GROUP(&dev_mi)) {
1773 ret = __bch2_dev_group_set(c, ca, label.buf);
1774 bch_err_msg(c, ret, "creating new label");
1775 if (ret)
1776 goto err_unlock;
1777 }
1778
1779 bch2_write_super(c);
1780 mutex_unlock(&c->sb_lock);
1781
1782 bch2_dev_usage_journal_reserve(c);
1783
1784 ret = bch2_trans_mark_dev_sb(c, ca);
1785 bch_err_msg(ca, ret, "marking new superblock");
1786 if (ret)
1787 goto err_late;
1788
1789 ret = bch2_fs_freespace_init(c);
1790 bch_err_msg(ca, ret, "initializing free space");
1791 if (ret)
1792 goto err_late;
1793
1794 ca->new_fs_bucket_idx = 0;
1795
1796 if (ca->mi.state == BCH_MEMBER_STATE_rw)
1797 __bch2_dev_read_write(c, ca);
1798
1799 up_write(&c->state_lock);
1800 return 0;
1801
1802err_unlock:
1803 mutex_unlock(&c->sb_lock);
1804 up_write(&c->state_lock);
1805err:
1806 if (ca)
1807 bch2_dev_free(ca);
1808 bch2_free_super(&sb);
1809 printbuf_exit(&label);
1810 printbuf_exit(&errbuf);
1811 bch_err_fn(c, ret);
1812 return ret;
1813err_late:
1814 up_write(&c->state_lock);
1815 ca = NULL;
1816 goto err;
1817}
1818
1819/* Hot add existing device to running filesystem: */
1820int bch2_dev_online(struct bch_fs *c, const char *path)
1821{
1822 struct bch_opts opts = bch2_opts_empty();
1823 struct bch_sb_handle sb = { NULL };
1824 struct bch_dev *ca;
1825 unsigned dev_idx;
1826 int ret;
1827
1828 down_write(&c->state_lock);
1829
1830 ret = bch2_read_super(path, &opts, &sb);
1831 if (ret) {
1832 up_write(&c->state_lock);
1833 return ret;
1834 }
1835
1836 dev_idx = sb.sb->dev_idx;
1837
1838 ret = bch2_dev_in_fs(&c->disk_sb, &sb);
1839 bch_err_msg(c, ret, "bringing %s online", path);
1840 if (ret)
1841 goto err;
1842
1843 ret = bch2_dev_attach_bdev(c, &sb);
1844 if (ret)
1845 goto err;
1846
1847 ca = bch_dev_locked(c, dev_idx);
1848
1849 ret = bch2_trans_mark_dev_sb(c, ca);
1850 bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
1851 if (ret)
1852 goto err;
1853
1854 if (ca->mi.state == BCH_MEMBER_STATE_rw)
1855 __bch2_dev_read_write(c, ca);
1856
1857 if (!ca->mi.freespace_initialized) {
1858 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
1859 bch_err_msg(ca, ret, "initializing free space");
1860 if (ret)
1861 goto err;
1862 }
1863
1864 if (!ca->journal.nr) {
1865 ret = bch2_dev_journal_alloc(ca);
1866 bch_err_msg(ca, ret, "allocating journal");
1867 if (ret)
1868 goto err;
1869 }
1870
1871 mutex_lock(&c->sb_lock);
1872 bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
1873 cpu_to_le64(ktime_get_real_seconds());
1874 bch2_write_super(c);
1875 mutex_unlock(&c->sb_lock);
1876
1877 up_write(&c->state_lock);
1878 return 0;
1879err:
1880 up_write(&c->state_lock);
1881 bch2_free_super(&sb);
1882 return ret;
1883}
1884
1885int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1886{
1887 down_write(&c->state_lock);
1888
1889 if (!bch2_dev_is_online(ca)) {
1890 bch_err(ca, "Already offline");
1891 up_write(&c->state_lock);
1892 return 0;
1893 }
1894
1895 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1896 bch_err(ca, "Cannot offline required disk");
1897 up_write(&c->state_lock);
1898 return -BCH_ERR_device_state_not_allowed;
1899 }
1900
1901 __bch2_dev_offline(c, ca);
1902
1903 up_write(&c->state_lock);
1904 return 0;
1905}
1906
1907int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1908{
1909 struct bch_member *m;
1910 u64 old_nbuckets;
1911 int ret = 0;
1912
1913 down_write(&c->state_lock);
1914 old_nbuckets = ca->mi.nbuckets;
1915
1916 if (nbuckets < ca->mi.nbuckets) {
1917 bch_err(ca, "Cannot shrink yet");
1918 ret = -EINVAL;
1919 goto err;
1920 }
1921
1922 if (bch2_dev_is_online(ca) &&
1923 get_capacity(ca->disk_sb.bdev->bd_disk) <
1924 ca->mi.bucket_size * nbuckets) {
1925 bch_err(ca, "New size larger than device");
1926 ret = -BCH_ERR_device_size_too_small;
1927 goto err;
1928 }
1929
1930 ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1931 bch_err_msg(ca, ret, "resizing buckets");
1932 if (ret)
1933 goto err;
1934
1935 ret = bch2_trans_mark_dev_sb(c, ca);
1936 if (ret)
1937 goto err;
1938
1939 mutex_lock(&c->sb_lock);
1940 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1941 m->nbuckets = cpu_to_le64(nbuckets);
1942
1943 bch2_write_super(c);
1944 mutex_unlock(&c->sb_lock);
1945
1946 if (ca->mi.freespace_initialized) {
1947 ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
1948 if (ret)
1949 goto err;
1950
1951 /*
1952 * XXX: this is all wrong transactionally - we'll be able to do
1953 * this correctly after the disk space accounting rewrite
1954 */
1955 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
1956 }
1957
1958 bch2_recalc_capacity(c);
1959err:
1960 up_write(&c->state_lock);
1961 return ret;
1962}
1963
1964/* return with ref on ca->ref: */
1965struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1966{
1967 rcu_read_lock();
1968 for_each_member_device_rcu(c, ca, NULL)
1969 if (!strcmp(name, ca->name)) {
1970 rcu_read_unlock();
1971 return ca;
1972 }
1973 rcu_read_unlock();
1974 return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
1975}
1976
1977/* Filesystem open: */
1978
1979static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r)
1980{
1981 return cmp_int(le64_to_cpu(l->seq), le64_to_cpu(r->seq)) ?:
1982 cmp_int(le64_to_cpu(l->write_time), le64_to_cpu(r->write_time));
1983}
1984
1985struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1986 struct bch_opts opts)
1987{
1988 DARRAY(struct bch_sb_handle) sbs = { 0 };
1989 struct bch_fs *c = NULL;
1990 struct bch_sb_handle *best = NULL;
1991 struct printbuf errbuf = PRINTBUF;
1992 int ret = 0;
1993
1994 if (!try_module_get(THIS_MODULE))
1995 return ERR_PTR(-ENODEV);
1996
1997 if (!nr_devices) {
1998 ret = -EINVAL;
1999 goto err;
2000 }
2001
2002 ret = darray_make_room(&sbs, nr_devices);
2003 if (ret)
2004 goto err;
2005
2006 for (unsigned i = 0; i < nr_devices; i++) {
2007 struct bch_sb_handle sb = { NULL };
2008
2009 ret = bch2_read_super(devices[i], &opts, &sb);
2010 if (ret)
2011 goto err;
2012
2013 BUG_ON(darray_push(&sbs, sb));
2014 }
2015
2016 if (opts.nochanges && !opts.read_only) {
2017 ret = -BCH_ERR_erofs_nochanges;
2018 goto err_print;
2019 }
2020
2021 darray_for_each(sbs, sb)
2022 if (!best || sb_cmp(sb->sb, best->sb) > 0)
2023 best = sb;
2024
2025 darray_for_each_reverse(sbs, sb) {
2026 ret = bch2_dev_in_fs(best, sb);
2027
2028 if (ret == -BCH_ERR_device_has_been_removed ||
2029 ret == -BCH_ERR_device_splitbrain) {
2030 bch2_free_super(sb);
2031 darray_remove_item(&sbs, sb);
2032 best -= best > sb;
2033 ret = 0;
2034 continue;
2035 }
2036
2037 if (ret)
2038 goto err_print;
2039 }
2040
2041 c = bch2_fs_alloc(best->sb, opts);
2042 ret = PTR_ERR_OR_ZERO(c);
2043 if (ret)
2044 goto err;
2045
2046 down_write(&c->state_lock);
2047 darray_for_each(sbs, sb) {
2048 ret = bch2_dev_attach_bdev(c, sb);
2049 if (ret) {
2050 up_write(&c->state_lock);
2051 goto err;
2052 }
2053 }
2054 up_write(&c->state_lock);
2055
2056 if (!bch2_fs_may_start(c)) {
2057 ret = -BCH_ERR_insufficient_devices_to_start;
2058 goto err_print;
2059 }
2060
2061 if (!c->opts.nostart) {
2062 ret = bch2_fs_start(c);
2063 if (ret)
2064 goto err;
2065 }
2066out:
2067 darray_for_each(sbs, sb)
2068 bch2_free_super(sb);
2069 darray_exit(&sbs);
2070 printbuf_exit(&errbuf);
2071 module_put(THIS_MODULE);
2072 return c;
2073err_print:
2074 pr_err("bch_fs_open err opening %s: %s",
2075 devices[0], bch2_err_str(ret));
2076err:
2077 if (!IS_ERR_OR_NULL(c))
2078 bch2_fs_stop(c);
2079 c = ERR_PTR(ret);
2080 goto out;
2081}
2082
2083/* Global interfaces/init */
2084
2085static void bcachefs_exit(void)
2086{
2087 bch2_debug_exit();
2088 bch2_vfs_exit();
2089 bch2_chardev_exit();
2090 bch2_btree_key_cache_exit();
2091 if (bcachefs_kset)
2092 kset_unregister(bcachefs_kset);
2093}
2094
2095static int __init bcachefs_init(void)
2096{
2097 bch2_bkey_pack_test();
2098
2099 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
2100 bch2_btree_key_cache_init() ||
2101 bch2_chardev_init() ||
2102 bch2_vfs_init() ||
2103 bch2_debug_init())
2104 goto err;
2105
2106 return 0;
2107err:
2108 bcachefs_exit();
2109 return -ENOMEM;
2110}
2111
2112#define BCH_DEBUG_PARAM(name, description) \
2113 bool bch2_##name; \
2114 module_param_named(name, bch2_##name, bool, 0644); \
2115 MODULE_PARM_DESC(name, description);
2116BCH_DEBUG_PARAMS()
2117#undef BCH_DEBUG_PARAM
2118
2119__maybe_unused
2120static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
2121module_param_named(version, bch2_metadata_version, uint, 0400);
2122
2123module_exit(bcachefs_exit);
2124module_init(bcachefs_init);