Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Code for manipulating bucket marks for garbage collection.
   4 *
   5 * Copyright 2014 Datera, Inc.
   6 */
   7
   8#include "bcachefs.h"
   9#include "alloc_background.h"
  10#include "backpointers.h"
  11#include "bset.h"
  12#include "btree_gc.h"
  13#include "btree_update.h"
  14#include "buckets.h"
  15#include "buckets_waiting_for_journal.h"
  16#include "ec.h"
  17#include "error.h"
  18#include "inode.h"
  19#include "movinggc.h"
  20#include "recovery.h"
  21#include "reflink.h"
  22#include "replicas.h"
  23#include "subvolume.h"
  24#include "trace.h"
  25
  26#include <linux/preempt.h>
  27
  28static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
  29					      enum bch_data_type data_type,
  30					      s64 sectors)
  31{
  32	switch (data_type) {
  33	case BCH_DATA_btree:
  34		fs_usage->btree		+= sectors;
  35		break;
  36	case BCH_DATA_user:
  37	case BCH_DATA_parity:
  38		fs_usage->data		+= sectors;
  39		break;
  40	case BCH_DATA_cached:
  41		fs_usage->cached	+= sectors;
  42		break;
  43	default:
  44		break;
  45	}
  46}
  47
  48void bch2_fs_usage_initialize(struct bch_fs *c)
  49{
  50	percpu_down_write(&c->mark_lock);
  51	struct bch_fs_usage *usage = c->usage_base;
  52
  53	for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
  54		bch2_fs_usage_acc_to_base(c, i);
  55
  56	for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
  57		usage->b.reserved += usage->persistent_reserved[i];
  58
  59	for (unsigned i = 0; i < c->replicas.nr; i++) {
  60		struct bch_replicas_entry_v1 *e =
  61			cpu_replicas_entry(&c->replicas, i);
  62
  63		fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]);
  64	}
  65
  66	for_each_member_device(c, ca) {
  67		struct bch_dev_usage dev = bch2_dev_usage_read(ca);
  68
  69		usage->b.hidden += (dev.d[BCH_DATA_sb].buckets +
  70				    dev.d[BCH_DATA_journal].buckets) *
  71			ca->mi.bucket_size;
  72	}
  73
  74	percpu_up_write(&c->mark_lock);
  75}
  76
  77static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
  78						  unsigned journal_seq,
  79						  bool gc)
  80{
  81	BUG_ON(!gc && !journal_seq);
  82
  83	return this_cpu_ptr(gc
  84			    ? ca->usage_gc
  85			    : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
  86}
  87
  88void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
  89{
  90	struct bch_fs *c = ca->fs;
  91	unsigned seq, i, u64s = dev_usage_u64s();
  92
  93	do {
  94		seq = read_seqcount_begin(&c->usage_lock);
  95		memcpy(usage, ca->usage_base, u64s * sizeof(u64));
  96		for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
  97			acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
  98	} while (read_seqcount_retry(&c->usage_lock, seq));
  99}
 100
 101u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
 102{
 103	ssize_t offset = v - (u64 *) c->usage_base;
 104	unsigned i, seq;
 105	u64 ret;
 106
 107	BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
 108	percpu_rwsem_assert_held(&c->mark_lock);
 109
 110	do {
 111		seq = read_seqcount_begin(&c->usage_lock);
 112		ret = *v;
 113
 114		for (i = 0; i < ARRAY_SIZE(c->usage); i++)
 115			ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
 116	} while (read_seqcount_retry(&c->usage_lock, seq));
 117
 118	return ret;
 119}
 120
 121struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
 122{
 123	struct bch_fs_usage_online *ret;
 124	unsigned nr_replicas = READ_ONCE(c->replicas.nr);
 125	unsigned seq, i;
 126retry:
 127	ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
 128	if (unlikely(!ret))
 129		return NULL;
 130
 131	percpu_down_read(&c->mark_lock);
 132
 133	if (nr_replicas != c->replicas.nr) {
 134		nr_replicas = c->replicas.nr;
 135		percpu_up_read(&c->mark_lock);
 136		kfree(ret);
 137		goto retry;
 138	}
 139
 140	ret->online_reserved = percpu_u64_get(c->online_reserved);
 141
 142	do {
 143		seq = read_seqcount_begin(&c->usage_lock);
 144		unsafe_memcpy(&ret->u, c->usage_base,
 145			      __fs_usage_u64s(nr_replicas) * sizeof(u64),
 146			      "embedded variable length struct");
 147		for (i = 0; i < ARRAY_SIZE(c->usage); i++)
 148			acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
 149					__fs_usage_u64s(nr_replicas));
 150	} while (read_seqcount_retry(&c->usage_lock, seq));
 151
 152	return ret;
 153}
 154
 155void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
 156{
 157	unsigned u64s = fs_usage_u64s(c);
 158
 159	BUG_ON(idx >= ARRAY_SIZE(c->usage));
 160
 161	preempt_disable();
 162	write_seqcount_begin(&c->usage_lock);
 163
 164	acc_u64s_percpu((u64 *) c->usage_base,
 165			(u64 __percpu *) c->usage[idx], u64s);
 166	percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
 167
 168	rcu_read_lock();
 169	for_each_member_device_rcu(c, ca, NULL) {
 170		u64s = dev_usage_u64s();
 171
 172		acc_u64s_percpu((u64 *) ca->usage_base,
 173				(u64 __percpu *) ca->usage[idx], u64s);
 174		percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
 175	}
 176	rcu_read_unlock();
 177
 178	write_seqcount_end(&c->usage_lock);
 179	preempt_enable();
 180}
 181
 182void bch2_fs_usage_to_text(struct printbuf *out,
 183			   struct bch_fs *c,
 184			   struct bch_fs_usage_online *fs_usage)
 185{
 186	unsigned i;
 187
 188	prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
 189
 190	prt_printf(out, "hidden:\t\t\t\t%llu\n",
 191	       fs_usage->u.b.hidden);
 192	prt_printf(out, "data:\t\t\t\t%llu\n",
 193	       fs_usage->u.b.data);
 194	prt_printf(out, "cached:\t\t\t\t%llu\n",
 195	       fs_usage->u.b.cached);
 196	prt_printf(out, "reserved:\t\t\t%llu\n",
 197	       fs_usage->u.b.reserved);
 198	prt_printf(out, "nr_inodes:\t\t\t%llu\n",
 199	       fs_usage->u.b.nr_inodes);
 200	prt_printf(out, "online reserved:\t\t%llu\n",
 201	       fs_usage->online_reserved);
 202
 203	for (i = 0;
 204	     i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
 205	     i++) {
 206		prt_printf(out, "%u replicas:\n", i + 1);
 207		prt_printf(out, "\treserved:\t\t%llu\n",
 208		       fs_usage->u.persistent_reserved[i]);
 209	}
 210
 211	for (i = 0; i < c->replicas.nr; i++) {
 212		struct bch_replicas_entry_v1 *e =
 213			cpu_replicas_entry(&c->replicas, i);
 214
 215		prt_printf(out, "\t");
 216		bch2_replicas_entry_to_text(out, e);
 217		prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
 218	}
 219}
 220
 221static u64 reserve_factor(u64 r)
 222{
 223	return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
 224}
 225
 226u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
 227{
 228	return min(fs_usage->u.b.hidden +
 229		   fs_usage->u.b.btree +
 230		   fs_usage->u.b.data +
 231		   reserve_factor(fs_usage->u.b.reserved +
 232				  fs_usage->online_reserved),
 233		   c->capacity);
 234}
 235
 236static struct bch_fs_usage_short
 237__bch2_fs_usage_read_short(struct bch_fs *c)
 238{
 239	struct bch_fs_usage_short ret;
 240	u64 data, reserved;
 241
 242	ret.capacity = c->capacity -
 243		bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
 244
 245	data		= bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
 246		bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
 247	reserved	= bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
 248		percpu_u64_get(c->online_reserved);
 249
 250	ret.used	= min(ret.capacity, data + reserve_factor(reserved));
 251	ret.free	= ret.capacity - ret.used;
 252
 253	ret.nr_inodes	= bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
 254
 255	return ret;
 256}
 257
 258struct bch_fs_usage_short
 259bch2_fs_usage_read_short(struct bch_fs *c)
 260{
 261	struct bch_fs_usage_short ret;
 262
 263	percpu_down_read(&c->mark_lock);
 264	ret = __bch2_fs_usage_read_short(c);
 265	percpu_up_read(&c->mark_lock);
 266
 267	return ret;
 268}
 269
 270void bch2_dev_usage_init(struct bch_dev *ca)
 271{
 272	ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
 273}
 274
 275void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
 276{
 277	prt_tab(out);
 278	prt_str(out, "buckets");
 279	prt_tab_rjust(out);
 280	prt_str(out, "sectors");
 281	prt_tab_rjust(out);
 282	prt_str(out, "fragmented");
 283	prt_tab_rjust(out);
 284	prt_newline(out);
 285
 286	for (unsigned i = 0; i < BCH_DATA_NR; i++) {
 287		bch2_prt_data_type(out, i);
 288		prt_tab(out);
 289		prt_u64(out, usage->d[i].buckets);
 290		prt_tab_rjust(out);
 291		prt_u64(out, usage->d[i].sectors);
 292		prt_tab_rjust(out);
 293		prt_u64(out, usage->d[i].fragmented);
 294		prt_tab_rjust(out);
 295		prt_newline(out);
 296	}
 297}
 298
 299void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
 300			   const struct bch_alloc_v4 *old,
 301			   const struct bch_alloc_v4 *new,
 302			   u64 journal_seq, bool gc)
 303{
 304	struct bch_fs_usage *fs_usage;
 305	struct bch_dev_usage *u;
 306
 307	preempt_disable();
 308	fs_usage = fs_usage_ptr(c, journal_seq, gc);
 309
 310	if (data_type_is_hidden(old->data_type))
 311		fs_usage->b.hidden -= ca->mi.bucket_size;
 312	if (data_type_is_hidden(new->data_type))
 313		fs_usage->b.hidden += ca->mi.bucket_size;
 314
 315	u = dev_usage_ptr(ca, journal_seq, gc);
 316
 317	u->d[old->data_type].buckets--;
 318	u->d[new->data_type].buckets++;
 319
 320	u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old);
 321	u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new);
 322
 323	u->d[BCH_DATA_cached].sectors += new->cached_sectors;
 324	u->d[BCH_DATA_cached].sectors -= old->cached_sectors;
 325
 326	u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old);
 327	u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new);
 328
 329	preempt_enable();
 330}
 331
 332static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
 333{
 334	return (struct bch_alloc_v4) {
 335		.gen		= b.gen,
 336		.data_type	= b.data_type,
 337		.dirty_sectors	= b.dirty_sectors,
 338		.cached_sectors	= b.cached_sectors,
 339		.stripe		= b.stripe,
 340	};
 341}
 342
 343void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
 344			     struct bucket *old, struct bucket *new)
 345{
 346	struct bch_alloc_v4 old_a = bucket_m_to_alloc(*old);
 347	struct bch_alloc_v4 new_a = bucket_m_to_alloc(*new);
 348
 349	bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true);
 350}
 351
 352static inline int __update_replicas(struct bch_fs *c,
 353				    struct bch_fs_usage *fs_usage,
 354				    struct bch_replicas_entry_v1 *r,
 355				    s64 sectors)
 356{
 357	int idx = bch2_replicas_entry_idx(c, r);
 358
 359	if (idx < 0)
 360		return -1;
 361
 362	fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
 363	fs_usage->replicas[idx]		+= sectors;
 364	return 0;
 365}
 366
 367int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
 368			 struct bch_replicas_entry_v1 *r, s64 sectors,
 369			 unsigned journal_seq, bool gc)
 370{
 371	struct bch_fs_usage *fs_usage;
 372	int idx, ret = 0;
 373	struct printbuf buf = PRINTBUF;
 374
 375	percpu_down_read(&c->mark_lock);
 376
 377	idx = bch2_replicas_entry_idx(c, r);
 378	if (idx < 0 &&
 379	    fsck_err(c, ptr_to_missing_replicas_entry,
 380		     "no replicas entry\n  while marking %s",
 381		     (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
 382		percpu_up_read(&c->mark_lock);
 383		ret = bch2_mark_replicas(c, r);
 384		percpu_down_read(&c->mark_lock);
 385
 386		if (ret)
 387			goto err;
 388		idx = bch2_replicas_entry_idx(c, r);
 389	}
 390	if (idx < 0) {
 391		ret = -1;
 392		goto err;
 393	}
 394
 395	preempt_disable();
 396	fs_usage = fs_usage_ptr(c, journal_seq, gc);
 397	fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
 398	fs_usage->replicas[idx]		+= sectors;
 399	preempt_enable();
 400err:
 401fsck_err:
 402	percpu_up_read(&c->mark_lock);
 403	printbuf_exit(&buf);
 404	return ret;
 405}
 406
 407static inline int update_cached_sectors(struct bch_fs *c,
 408			struct bkey_s_c k,
 409			unsigned dev, s64 sectors,
 410			unsigned journal_seq, bool gc)
 411{
 412	struct bch_replicas_padded r;
 413
 414	bch2_replicas_entry_cached(&r.e, dev);
 415
 416	return bch2_update_replicas(c, k, &r.e, sectors, journal_seq, gc);
 417}
 418
 419static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
 420				     gfp_t gfp)
 421{
 422	struct replicas_delta_list *d = trans->fs_usage_deltas;
 423	unsigned new_size = d ? (d->size + more) * 2 : 128;
 424	unsigned alloc_size = sizeof(*d) + new_size;
 425
 426	WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
 427
 428	if (!d || d->used + more > d->size) {
 429		d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
 430
 431		if (unlikely(!d)) {
 432			if (alloc_size > REPLICAS_DELTA_LIST_MAX)
 433				return -ENOMEM;
 434
 435			d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
 436			if (!d)
 437				return -ENOMEM;
 438
 439			memset(d, 0, REPLICAS_DELTA_LIST_MAX);
 440
 441			if (trans->fs_usage_deltas)
 442				memcpy(d, trans->fs_usage_deltas,
 443				       trans->fs_usage_deltas->size + sizeof(*d));
 444
 445			new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
 446			kfree(trans->fs_usage_deltas);
 447		}
 448
 449		d->size = new_size;
 450		trans->fs_usage_deltas = d;
 451	}
 452
 453	return 0;
 454}
 455
 456int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
 457{
 458	return allocate_dropping_locks_errcode(trans,
 459				__replicas_deltas_realloc(trans, more, _gfp));
 460}
 461
 462int bch2_update_replicas_list(struct btree_trans *trans,
 463			 struct bch_replicas_entry_v1 *r,
 464			 s64 sectors)
 465{
 466	struct replicas_delta_list *d;
 467	struct replicas_delta *n;
 468	unsigned b;
 469	int ret;
 470
 471	if (!sectors)
 472		return 0;
 473
 474	b = replicas_entry_bytes(r) + 8;
 475	ret = bch2_replicas_deltas_realloc(trans, b);
 476	if (ret)
 477		return ret;
 478
 479	d = trans->fs_usage_deltas;
 480	n = (void *) d->d + d->used;
 481	n->delta = sectors;
 482	unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
 483		      r, replicas_entry_bytes(r),
 484		      "flexible array member embedded in strcuct with padding");
 485	bch2_replicas_entry_sort(&n->r);
 486	d->used += b;
 487	return 0;
 488}
 489
 490int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors)
 491{
 492	struct bch_replicas_padded r;
 493
 494	bch2_replicas_entry_cached(&r.e, dev);
 495
 496	return bch2_update_replicas_list(trans, &r.e, sectors);
 497}
 498
 499int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
 500			      size_t b, enum bch_data_type data_type,
 501			      unsigned sectors, struct gc_pos pos,
 502			      unsigned flags)
 503{
 504	struct bucket old, new, *g;
 505	int ret = 0;
 506
 507	BUG_ON(!(flags & BTREE_TRIGGER_GC));
 508	BUG_ON(data_type != BCH_DATA_sb &&
 509	       data_type != BCH_DATA_journal);
 510
 511	/*
 512	 * Backup superblock might be past the end of our normal usable space:
 513	 */
 514	if (b >= ca->mi.nbuckets)
 515		return 0;
 516
 517	percpu_down_read(&c->mark_lock);
 518	g = gc_bucket(ca, b);
 519
 520	bucket_lock(g);
 521	old = *g;
 522
 523	if (bch2_fs_inconsistent_on(g->data_type &&
 524			g->data_type != data_type, c,
 525			"different types of data in same bucket: %s, %s",
 526			bch2_data_type_str(g->data_type),
 527			bch2_data_type_str(data_type))) {
 528		ret = -EIO;
 529		goto err;
 530	}
 531
 532	if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
 533			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
 534			ca->dev_idx, b, g->gen,
 535			bch2_data_type_str(g->data_type ?: data_type),
 536			g->dirty_sectors, sectors)) {
 537		ret = -EIO;
 538		goto err;
 539	}
 540
 541	g->data_type = data_type;
 542	g->dirty_sectors += sectors;
 543	new = *g;
 544err:
 545	bucket_unlock(g);
 546	if (!ret)
 547		bch2_dev_usage_update_m(c, ca, &old, &new);
 548	percpu_up_read(&c->mark_lock);
 549	return ret;
 550}
 551
 552int bch2_check_bucket_ref(struct btree_trans *trans,
 553			  struct bkey_s_c k,
 554			  const struct bch_extent_ptr *ptr,
 555			  s64 sectors, enum bch_data_type ptr_data_type,
 556			  u8 b_gen, u8 bucket_data_type,
 557			  u32 bucket_sectors)
 558{
 559	struct bch_fs *c = trans->c;
 560	struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
 561	size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
 562	struct printbuf buf = PRINTBUF;
 563	int ret = 0;
 564
 565	if (bucket_data_type == BCH_DATA_cached)
 566		bucket_data_type = BCH_DATA_user;
 567
 568	if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
 569	    (bucket_data_type == BCH_DATA_user   && ptr_data_type == BCH_DATA_stripe))
 570		bucket_data_type = ptr_data_type = BCH_DATA_stripe;
 571
 572	if (gen_after(ptr->gen, b_gen)) {
 573		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
 574			      BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
 575			"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
 576			"while marking %s",
 577			ptr->dev, bucket_nr, b_gen,
 578			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
 579			ptr->gen,
 580			(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
 581		ret = -EIO;
 582		goto err;
 583	}
 584
 585	if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
 586		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
 587			      BCH_FSCK_ERR_ptr_too_stale,
 588			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
 589			"while marking %s",
 590			ptr->dev, bucket_nr, b_gen,
 591			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
 592			ptr->gen,
 593			(printbuf_reset(&buf),
 594			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
 595		ret = -EIO;
 596		goto err;
 597	}
 598
 599	if (b_gen != ptr->gen && !ptr->cached) {
 600		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
 601			      BCH_FSCK_ERR_stale_dirty_ptr,
 602			"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
 603			"while marking %s",
 604			ptr->dev, bucket_nr, b_gen,
 605			*bucket_gen(ca, bucket_nr),
 606			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
 607			ptr->gen,
 608			(printbuf_reset(&buf),
 609			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
 610		ret = -EIO;
 611		goto err;
 612	}
 613
 614	if (b_gen != ptr->gen) {
 615		ret = 1;
 616		goto out;
 617	}
 618
 619	if (!data_type_is_empty(bucket_data_type) &&
 620	    ptr_data_type &&
 621	    bucket_data_type != ptr_data_type) {
 622		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
 623			      BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
 624			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
 625			"while marking %s",
 626			ptr->dev, bucket_nr, b_gen,
 627			bch2_data_type_str(bucket_data_type),
 628			bch2_data_type_str(ptr_data_type),
 629			(printbuf_reset(&buf),
 630			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
 631		ret = -EIO;
 632		goto err;
 633	}
 634
 635	if ((u64) bucket_sectors + sectors > U32_MAX) {
 636		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
 637			      BCH_FSCK_ERR_bucket_sector_count_overflow,
 638			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
 639			"while marking %s",
 640			ptr->dev, bucket_nr, b_gen,
 641			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
 642			bucket_sectors, sectors,
 643			(printbuf_reset(&buf),
 644			 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
 645		ret = -EIO;
 646		goto err;
 647	}
 648out:
 649	printbuf_exit(&buf);
 650	return ret;
 651err:
 652	bch2_dump_trans_updates(trans);
 653	goto out;
 654}
 655
 656void bch2_trans_fs_usage_revert(struct btree_trans *trans,
 657				struct replicas_delta_list *deltas)
 658{
 659	struct bch_fs *c = trans->c;
 660	struct bch_fs_usage *dst;
 661	struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
 662	s64 added = 0;
 663	unsigned i;
 664
 665	percpu_down_read(&c->mark_lock);
 666	preempt_disable();
 667	dst = fs_usage_ptr(c, trans->journal_res.seq, false);
 668
 669	/* revert changes: */
 670	for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
 671		switch (d->r.data_type) {
 672		case BCH_DATA_btree:
 673		case BCH_DATA_user:
 674		case BCH_DATA_parity:
 675			added += d->delta;
 676		}
 677		BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
 678	}
 679
 680	dst->b.nr_inodes -= deltas->nr_inodes;
 681
 682	for (i = 0; i < BCH_REPLICAS_MAX; i++) {
 683		added				-= deltas->persistent_reserved[i];
 684		dst->b.reserved			-= deltas->persistent_reserved[i];
 685		dst->persistent_reserved[i]	-= deltas->persistent_reserved[i];
 686	}
 687
 688	if (added > 0) {
 689		trans->disk_res->sectors += added;
 690		this_cpu_add(*c->online_reserved, added);
 691	}
 692
 693	preempt_enable();
 694	percpu_up_read(&c->mark_lock);
 695}
 696
 697void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
 698{
 699	struct bch_fs *c = trans->c;
 700	u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
 701	static int warned_disk_usage = 0;
 702	bool warn = false;
 703
 704	percpu_down_read(&c->mark_lock);
 705	preempt_disable();
 706	struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b;
 707	struct bch_fs_usage_base *src = &trans->fs_usage_delta;
 708
 709	s64 added = src->btree + src->data + src->reserved;
 710
 711	/*
 712	 * Not allowed to reduce sectors_available except by getting a
 713	 * reservation:
 714	 */
 715	s64 should_not_have_added = added - (s64) disk_res_sectors;
 716	if (unlikely(should_not_have_added > 0)) {
 717		u64 old, new, v = atomic64_read(&c->sectors_available);
 718
 719		do {
 720			old = v;
 721			new = max_t(s64, 0, old - should_not_have_added);
 722		} while ((v = atomic64_cmpxchg(&c->sectors_available,
 723					       old, new)) != old);
 724
 725		added -= should_not_have_added;
 726		warn = true;
 727	}
 728
 729	if (added > 0) {
 730		trans->disk_res->sectors -= added;
 731		this_cpu_sub(*c->online_reserved, added);
 732	}
 733
 734	dst->hidden	+= src->hidden;
 735	dst->btree	+= src->btree;
 736	dst->data	+= src->data;
 737	dst->cached	+= src->cached;
 738	dst->reserved	+= src->reserved;
 739	dst->nr_inodes	+= src->nr_inodes;
 740
 741	preempt_enable();
 742	percpu_up_read(&c->mark_lock);
 743
 744	if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
 745		bch2_trans_inconsistent(trans,
 746					"disk usage increased %lli more than %llu sectors reserved)",
 747					should_not_have_added, disk_res_sectors);
 748}
 749
 750int bch2_trans_fs_usage_apply(struct btree_trans *trans,
 751			      struct replicas_delta_list *deltas)
 752{
 753	struct bch_fs *c = trans->c;
 754	struct replicas_delta *d, *d2;
 755	struct replicas_delta *top = (void *) deltas->d + deltas->used;
 756	struct bch_fs_usage *dst;
 757	unsigned i;
 758
 759	percpu_down_read(&c->mark_lock);
 760	preempt_disable();
 761	dst = fs_usage_ptr(c, trans->journal_res.seq, false);
 762
 763	for (d = deltas->d; d != top; d = replicas_delta_next(d))
 764		if (__update_replicas(c, dst, &d->r, d->delta))
 765			goto need_mark;
 766
 767	dst->b.nr_inodes += deltas->nr_inodes;
 768
 769	for (i = 0; i < BCH_REPLICAS_MAX; i++) {
 770		dst->b.reserved			+= deltas->persistent_reserved[i];
 771		dst->persistent_reserved[i]	+= deltas->persistent_reserved[i];
 772	}
 773
 774	preempt_enable();
 775	percpu_up_read(&c->mark_lock);
 776	return 0;
 777need_mark:
 778	/* revert changes: */
 779	for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
 780		BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
 781
 782	preempt_enable();
 783	percpu_up_read(&c->mark_lock);
 784	return -1;
 785}
 786
 787/* KEY_TYPE_extent: */
 788
 789static int __mark_pointer(struct btree_trans *trans,
 790			  struct bkey_s_c k,
 791			  const struct bch_extent_ptr *ptr,
 792			  s64 sectors, enum bch_data_type ptr_data_type,
 793			  u8 bucket_gen, u8 *bucket_data_type,
 794			  u32 *dirty_sectors, u32 *cached_sectors)
 795{
 796	u32 *dst_sectors = !ptr->cached
 797		? dirty_sectors
 798		: cached_sectors;
 799	int ret = bch2_check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
 800				   bucket_gen, *bucket_data_type, *dst_sectors);
 801
 802	if (ret)
 803		return ret;
 804
 805	*dst_sectors += sectors;
 806
 807	if (!*dirty_sectors && !*cached_sectors)
 808		*bucket_data_type = 0;
 809	else if (*bucket_data_type != BCH_DATA_stripe)
 810		*bucket_data_type = ptr_data_type;
 811
 812	return 0;
 813}
 814
 815static int bch2_trigger_pointer(struct btree_trans *trans,
 816			enum btree_id btree_id, unsigned level,
 817			struct bkey_s_c k, struct extent_ptr_decoded p,
 818			s64 *sectors,
 819			unsigned flags)
 820{
 821	bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
 822	struct bpos bucket;
 823	struct bch_backpointer bp;
 824
 825	bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
 826	*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
 827
 828	if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
 829		struct btree_iter iter;
 830		struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket);
 831		int ret = PTR_ERR_OR_ZERO(a);
 832		if (ret)
 833			return ret;
 834
 835		ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type,
 836				     a->v.gen, &a->v.data_type,
 837				     &a->v.dirty_sectors, &a->v.cached_sectors) ?:
 838			bch2_trans_update(trans, &iter, &a->k_i, 0);
 839		bch2_trans_iter_exit(trans, &iter);
 840
 841		if (ret)
 842			return ret;
 843
 844		if (!p.ptr.cached) {
 845			ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
 846			if (ret)
 847				return ret;
 848		}
 849	}
 850
 851	if (flags & BTREE_TRIGGER_GC) {
 852		struct bch_fs *c = trans->c;
 853		struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
 854		enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
 855
 856		percpu_down_read(&c->mark_lock);
 857		struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
 858		bucket_lock(g);
 859		struct bucket old = *g;
 860
 861		u8 bucket_data_type = g->data_type;
 862		int ret = __mark_pointer(trans, k, &p.ptr, *sectors,
 863				     data_type, g->gen,
 864				     &bucket_data_type,
 865				     &g->dirty_sectors,
 866				     &g->cached_sectors);
 867		if (ret) {
 868			bucket_unlock(g);
 869			percpu_up_read(&c->mark_lock);
 870			return ret;
 871		}
 872
 873		g->data_type = bucket_data_type;
 874		struct bucket new = *g;
 875		bucket_unlock(g);
 876		bch2_dev_usage_update_m(c, ca, &old, &new);
 877		percpu_up_read(&c->mark_lock);
 878	}
 879
 880	return 0;
 881}
 882
 883static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
 884				struct bkey_s_c k,
 885				struct extent_ptr_decoded p,
 886				enum bch_data_type data_type,
 887				s64 sectors, unsigned flags)
 888{
 889	if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
 890		struct btree_iter iter;
 891		struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
 892				BTREE_ID_stripes, POS(0, p.ec.idx),
 893				BTREE_ITER_WITH_UPDATES, stripe);
 894		int ret = PTR_ERR_OR_ZERO(s);
 895		if (unlikely(ret)) {
 896			bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
 897				"pointer to nonexistent stripe %llu",
 898				(u64) p.ec.idx);
 899			goto err;
 900		}
 901
 902		if (!bch2_ptr_matches_stripe(&s->v, p)) {
 903			bch2_trans_inconsistent(trans,
 904				"stripe pointer doesn't match stripe %llu",
 905				(u64) p.ec.idx);
 906			ret = -EIO;
 907			goto err;
 908		}
 909
 910		stripe_blockcount_set(&s->v, p.ec.block,
 911			stripe_blockcount_get(&s->v, p.ec.block) +
 912			sectors);
 913
 914		struct bch_replicas_padded r;
 915		bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
 916		r.e.data_type = data_type;
 917		ret = bch2_update_replicas_list(trans, &r.e, sectors);
 918err:
 919		bch2_trans_iter_exit(trans, &iter);
 920		return ret;
 921	}
 922
 923	if (flags & BTREE_TRIGGER_GC) {
 924		struct bch_fs *c = trans->c;
 925
 926		BUG_ON(!(flags & BTREE_TRIGGER_GC));
 927
 928		struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
 929		if (!m) {
 930			bch_err(c, "error allocating memory for gc_stripes, idx %llu",
 931				(u64) p.ec.idx);
 932			return -BCH_ERR_ENOMEM_mark_stripe_ptr;
 933		}
 934
 935		mutex_lock(&c->ec_stripes_heap_lock);
 936
 937		if (!m || !m->alive) {
 938			mutex_unlock(&c->ec_stripes_heap_lock);
 939			struct printbuf buf = PRINTBUF;
 940			bch2_bkey_val_to_text(&buf, c, k);
 941			bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n  while marking %s",
 942					    (u64) p.ec.idx, buf.buf);
 943			printbuf_exit(&buf);
 944			bch2_inconsistent_error(c);
 945			return -EIO;
 946		}
 947
 948		m->block_sectors[p.ec.block] += sectors;
 949
 950		struct bch_replicas_padded r = m->r;
 951		mutex_unlock(&c->ec_stripes_heap_lock);
 952
 953		r.e.data_type = data_type;
 954		bch2_update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
 955	}
 956
 957	return 0;
 958}
 959
 960static int __trigger_extent(struct btree_trans *trans,
 961			    enum btree_id btree_id, unsigned level,
 962			    struct bkey_s_c k, unsigned flags)
 963{
 964	bool gc = flags & BTREE_TRIGGER_GC;
 965	struct bch_fs *c = trans->c;
 966	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
 967	const union bch_extent_entry *entry;
 968	struct extent_ptr_decoded p;
 969	struct bch_replicas_padded r;
 970	enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
 971		? BCH_DATA_btree
 972		: BCH_DATA_user;
 973	s64 dirty_sectors = 0;
 974	int ret = 0;
 975
 976	r.e.data_type	= data_type;
 977	r.e.nr_devs	= 0;
 978	r.e.nr_required	= 1;
 979
 980	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
 981		s64 disk_sectors;
 982		ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
 983		if (ret < 0)
 984			return ret;
 985
 986		bool stale = ret > 0;
 987
 988		if (p.ptr.cached) {
 989			if (!stale) {
 990				ret = !gc
 991					? bch2_update_cached_sectors_list(trans, p.ptr.dev, disk_sectors)
 992					: update_cached_sectors(c, k, p.ptr.dev, disk_sectors, 0, true);
 993				bch2_fs_fatal_err_on(ret && gc, c, "%s(): no replicas entry while updating cached sectors",
 994						     __func__);
 995				if (ret)
 996					return ret;
 997			}
 998		} else if (!p.has_ec) {
 999			dirty_sectors	       += disk_sectors;
1000			r.e.devs[r.e.nr_devs++]	= p.ptr.dev;
1001		} else {
1002			ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
1003			if (ret)
1004				return ret;
1005
1006			/*
1007			 * There may be other dirty pointers in this extent, but
1008			 * if so they're not required for mounting if we have an
1009			 * erasure coded pointer in this extent:
1010			 */
1011			r.e.nr_required = 0;
1012		}
1013	}
1014
1015	if (r.e.nr_devs) {
1016		ret = !gc
1017			? bch2_update_replicas_list(trans, &r.e, dirty_sectors)
1018			: bch2_update_replicas(c, k, &r.e, dirty_sectors, 0, true);
1019		if (unlikely(ret && gc)) {
1020			struct printbuf buf = PRINTBUF;
1021
1022			bch2_bkey_val_to_text(&buf, c, k);
1023			bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
1024			printbuf_exit(&buf);
1025		}
1026		if (ret)
1027			return ret;
1028	}
1029
1030	return 0;
1031}
1032
1033int bch2_trigger_extent(struct btree_trans *trans,
1034			enum btree_id btree_id, unsigned level,
1035			struct bkey_s_c old, struct bkey_s new,
1036			unsigned flags)
1037{
1038	struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
1039	struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
1040	unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
1041	unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
1042
1043	/* if pointers aren't changing - nothing to do: */
1044	if (new_ptrs_bytes == old_ptrs_bytes &&
1045	    !memcmp(new_ptrs.start,
1046		    old_ptrs.start,
1047		    new_ptrs_bytes))
1048		return 0;
1049
1050	if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
1051		struct bch_fs *c = trans->c;
1052		int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
1053			  (int) bch2_bkey_needs_rebalance(c, old);
1054
1055		if (mod) {
1056			int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new.k->p, mod > 0);
1057			if (ret)
1058				return ret;
1059		}
1060	}
1061
1062	if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
1063		return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
1064
1065	return 0;
1066}
1067
1068/* KEY_TYPE_reservation */
1069
1070static int __trigger_reservation(struct btree_trans *trans,
1071				 enum btree_id btree_id, unsigned level,
1072				 struct bkey_s_c k, unsigned flags)
1073{
1074	struct bch_fs *c = trans->c;
1075	unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1076	s64 sectors = (s64) k.k->size * replicas;
1077
1078	if (flags & BTREE_TRIGGER_OVERWRITE)
1079		sectors = -sectors;
1080
1081	if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
1082		int ret = bch2_replicas_deltas_realloc(trans, 0);
1083		if (ret)
1084			return ret;
1085
1086		struct replicas_delta_list *d = trans->fs_usage_deltas;
1087		replicas = min(replicas, ARRAY_SIZE(d->persistent_reserved));
1088
1089		d->persistent_reserved[replicas - 1] += sectors;
1090	}
1091
1092	if (flags & BTREE_TRIGGER_GC) {
1093		percpu_down_read(&c->mark_lock);
1094		preempt_disable();
1095
1096		struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
1097
1098		replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
1099		fs_usage->b.reserved				+= sectors;
1100		fs_usage->persistent_reserved[replicas - 1]	+= sectors;
1101
1102		preempt_enable();
1103		percpu_up_read(&c->mark_lock);
1104	}
1105
1106	return 0;
1107}
1108
1109int bch2_trigger_reservation(struct btree_trans *trans,
1110			  enum btree_id btree_id, unsigned level,
1111			  struct bkey_s_c old, struct bkey_s new,
1112			  unsigned flags)
1113{
1114	return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
1115}
1116
1117/* Mark superblocks: */
1118
1119static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1120				    struct bch_dev *ca, size_t b,
1121				    enum bch_data_type type,
1122				    unsigned sectors)
1123{
1124	struct bch_fs *c = trans->c;
1125	struct btree_iter iter;
1126	struct bkey_i_alloc_v4 *a;
1127	int ret = 0;
1128
1129	/*
1130	 * Backup superblock might be past the end of our normal usable space:
1131	 */
1132	if (b >= ca->mi.nbuckets)
1133		return 0;
1134
1135	a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1136	if (IS_ERR(a))
1137		return PTR_ERR(a);
1138
1139	if (a->v.data_type && type && a->v.data_type != type) {
1140		bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1141			      BCH_FSCK_ERR_bucket_metadata_type_mismatch,
1142			"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1143			"while marking %s",
1144			iter.pos.inode, iter.pos.offset, a->v.gen,
1145			bch2_data_type_str(a->v.data_type),
1146			bch2_data_type_str(type),
1147			bch2_data_type_str(type));
1148		ret = -EIO;
1149		goto err;
1150	}
1151
1152	if (a->v.data_type	!= type ||
1153	    a->v.dirty_sectors	!= sectors) {
1154		a->v.data_type		= type;
1155		a->v.dirty_sectors	= sectors;
1156		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1157	}
1158err:
1159	bch2_trans_iter_exit(trans, &iter);
1160	return ret;
1161}
1162
1163int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1164				    struct bch_dev *ca, size_t b,
1165				    enum bch_data_type type,
1166				    unsigned sectors)
1167{
1168	return commit_do(trans, NULL, NULL, 0,
1169			__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1170}
1171
1172static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1173					    struct bch_dev *ca,
1174					    u64 start, u64 end,
1175					    enum bch_data_type type,
1176					    u64 *bucket, unsigned *bucket_sectors)
1177{
1178	do {
1179		u64 b = sector_to_bucket(ca, start);
1180		unsigned sectors =
1181			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1182
1183		if (b != *bucket && *bucket_sectors) {
1184			int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1185								  type, *bucket_sectors);
1186			if (ret)
1187				return ret;
1188
1189			*bucket_sectors = 0;
1190		}
1191
1192		*bucket		= b;
1193		*bucket_sectors	+= sectors;
1194		start += sectors;
1195	} while (start < end);
1196
1197	return 0;
1198}
1199
1200static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1201				    struct bch_dev *ca)
1202{
1203	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1204	u64 bucket = 0;
1205	unsigned i, bucket_sectors = 0;
1206	int ret;
1207
1208	for (i = 0; i < layout->nr_superblocks; i++) {
1209		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1210
1211		if (offset == BCH_SB_SECTOR) {
1212			ret = bch2_trans_mark_metadata_sectors(trans, ca,
1213						0, BCH_SB_SECTOR,
1214						BCH_DATA_sb, &bucket, &bucket_sectors);
1215			if (ret)
1216				return ret;
1217		}
1218
1219		ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1220				      offset + (1 << layout->sb_max_size_bits),
1221				      BCH_DATA_sb, &bucket, &bucket_sectors);
1222		if (ret)
1223			return ret;
1224	}
1225
1226	if (bucket_sectors) {
1227		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1228				bucket, BCH_DATA_sb, bucket_sectors);
1229		if (ret)
1230			return ret;
1231	}
1232
1233	for (i = 0; i < ca->journal.nr; i++) {
1234		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1235				ca->journal.buckets[i],
1236				BCH_DATA_journal, ca->mi.bucket_size);
1237		if (ret)
1238			return ret;
1239	}
1240
1241	return 0;
1242}
1243
1244int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1245{
1246	int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
1247
1248	bch_err_fn(c, ret);
1249	return ret;
1250}
1251
1252int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1253{
1254	for_each_online_member(c, ca) {
1255		int ret = bch2_trans_mark_dev_sb(c, ca);
1256		if (ret) {
1257			percpu_ref_put(&ca->ref);
1258			return ret;
1259		}
1260	}
1261
1262	return 0;
1263}
1264
1265/* Disk reservations: */
1266
1267#define SECTORS_CACHE	1024
1268
1269int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1270			      u64 sectors, int flags)
1271{
1272	struct bch_fs_pcpu *pcpu;
1273	u64 old, v, get;
1274	s64 sectors_available;
1275	int ret;
1276
1277	percpu_down_read(&c->mark_lock);
1278	preempt_disable();
1279	pcpu = this_cpu_ptr(c->pcpu);
1280
1281	if (sectors <= pcpu->sectors_available)
1282		goto out;
1283
1284	v = atomic64_read(&c->sectors_available);
1285	do {
1286		old = v;
1287		get = min((u64) sectors + SECTORS_CACHE, old);
1288
1289		if (get < sectors) {
1290			preempt_enable();
1291			goto recalculate;
1292		}
1293	} while ((v = atomic64_cmpxchg(&c->sectors_available,
1294				       old, old - get)) != old);
1295
1296	pcpu->sectors_available		+= get;
1297
1298out:
1299	pcpu->sectors_available		-= sectors;
1300	this_cpu_add(*c->online_reserved, sectors);
1301	res->sectors			+= sectors;
1302
1303	preempt_enable();
1304	percpu_up_read(&c->mark_lock);
1305	return 0;
1306
1307recalculate:
1308	mutex_lock(&c->sectors_available_lock);
1309
1310	percpu_u64_set(&c->pcpu->sectors_available, 0);
1311	sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1312
1313	if (sectors <= sectors_available ||
1314	    (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1315		atomic64_set(&c->sectors_available,
1316			     max_t(s64, 0, sectors_available - sectors));
1317		this_cpu_add(*c->online_reserved, sectors);
1318		res->sectors			+= sectors;
1319		ret = 0;
1320	} else {
1321		atomic64_set(&c->sectors_available, sectors_available);
1322		ret = -BCH_ERR_ENOSPC_disk_reservation;
1323	}
1324
1325	mutex_unlock(&c->sectors_available_lock);
1326	percpu_up_read(&c->mark_lock);
1327
1328	return ret;
1329}
1330
1331/* Startup/shutdown: */
1332
1333static void bucket_gens_free_rcu(struct rcu_head *rcu)
1334{
1335	struct bucket_gens *buckets =
1336		container_of(rcu, struct bucket_gens, rcu);
1337
1338	kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
1339}
1340
1341int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1342{
1343	struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1344	unsigned long *buckets_nouse = NULL;
1345	bool resize = ca->bucket_gens != NULL;
1346	int ret;
1347
1348	if (!(bucket_gens	= kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
1349					    GFP_KERNEL|__GFP_ZERO))) {
1350		ret = -BCH_ERR_ENOMEM_bucket_gens;
1351		goto err;
1352	}
1353
1354	if ((c->opts.buckets_nouse &&
1355	     !(buckets_nouse	= kvpmalloc(BITS_TO_LONGS(nbuckets) *
1356					    sizeof(unsigned long),
1357					    GFP_KERNEL|__GFP_ZERO)))) {
1358		ret = -BCH_ERR_ENOMEM_buckets_nouse;
1359		goto err;
1360	}
1361
1362	bucket_gens->first_bucket = ca->mi.first_bucket;
1363	bucket_gens->nbuckets	= nbuckets;
1364
1365	if (resize) {
1366		down_write(&c->gc_lock);
1367		down_write(&ca->bucket_lock);
1368		percpu_down_write(&c->mark_lock);
1369	}
1370
1371	old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1372
1373	if (resize) {
1374		size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
1375
1376		memcpy(bucket_gens->b,
1377		       old_bucket_gens->b,
1378		       n);
1379		if (buckets_nouse)
1380			memcpy(buckets_nouse,
1381			       ca->buckets_nouse,
1382			       BITS_TO_LONGS(n) * sizeof(unsigned long));
1383	}
1384
1385	rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1386	bucket_gens	= old_bucket_gens;
1387
1388	swap(ca->buckets_nouse, buckets_nouse);
1389
1390	nbuckets = ca->mi.nbuckets;
1391
1392	if (resize) {
1393		percpu_up_write(&c->mark_lock);
1394		up_write(&ca->bucket_lock);
1395		up_write(&c->gc_lock);
1396	}
1397
1398	ret = 0;
1399err:
1400	kvpfree(buckets_nouse,
1401		BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1402	if (bucket_gens)
1403		call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1404
1405	return ret;
1406}
1407
1408void bch2_dev_buckets_free(struct bch_dev *ca)
1409{
1410	unsigned i;
1411
1412	kvpfree(ca->buckets_nouse,
1413		BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1414	kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
1415		sizeof(struct bucket_gens) + ca->mi.nbuckets);
1416
1417	for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
1418		free_percpu(ca->usage[i]);
1419	kfree(ca->usage_base);
1420}
1421
1422int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1423{
1424	unsigned i;
1425
1426	ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
1427	if (!ca->usage_base)
1428		return -BCH_ERR_ENOMEM_usage_init;
1429
1430	for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
1431		ca->usage[i] = alloc_percpu(struct bch_dev_usage);
1432		if (!ca->usage[i])
1433			return -BCH_ERR_ENOMEM_usage_init;
1434	}
1435
1436	return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1437}