Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2012 Google, Inc.
   4 *
   5 * Foreground allocator code: allocate buckets from freelist, and allocate in
   6 * sector granularity from writepoints.
   7 *
   8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
   9 *
  10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
  11 * in a given filesystem.
  12 */
  13
  14#include "bcachefs.h"
  15#include "alloc_background.h"
  16#include "alloc_foreground.h"
  17#include "backpointers.h"
  18#include "btree_iter.h"
  19#include "btree_update.h"
  20#include "btree_gc.h"
  21#include "buckets.h"
  22#include "buckets_waiting_for_journal.h"
  23#include "clock.h"
  24#include "debug.h"
  25#include "disk_groups.h"
  26#include "ec.h"
  27#include "error.h"
  28#include "io_write.h"
  29#include "journal.h"
  30#include "movinggc.h"
  31#include "nocow_locking.h"
  32#include "trace.h"
  33
  34#include <linux/math64.h>
  35#include <linux/rculist.h>
  36#include <linux/rcupdate.h>
  37
  38static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
  39					   struct mutex *lock)
  40{
  41	if (!mutex_trylock(lock)) {
  42		bch2_trans_unlock(trans);
  43		mutex_lock(lock);
  44	}
  45}
  46
  47const char * const bch2_watermarks[] = {
  48#define x(t) #t,
  49	BCH_WATERMARKS()
  50#undef x
  51	NULL
  52};
  53
  54/*
  55 * Open buckets represent a bucket that's currently being allocated from.  They
  56 * serve two purposes:
  57 *
  58 *  - They track buckets that have been partially allocated, allowing for
  59 *    sub-bucket sized allocations - they're used by the sector allocator below
  60 *
  61 *  - They provide a reference to the buckets they own that mark and sweep GC
  62 *    can find, until the new allocation has a pointer to it inserted into the
  63 *    btree
  64 *
  65 * When allocating some space with the sector allocator, the allocation comes
  66 * with a reference to an open bucket - the caller is required to put that
  67 * reference _after_ doing the index update that makes its allocation reachable.
  68 */
  69
  70void bch2_reset_alloc_cursors(struct bch_fs *c)
  71{
  72	rcu_read_lock();
  73	for_each_member_device_rcu(c, ca, NULL)
  74		memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
  75	rcu_read_unlock();
  76}
  77
  78static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
  79{
  80	open_bucket_idx_t idx = ob - c->open_buckets;
  81	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
  82
  83	ob->hash = *slot;
  84	*slot = idx;
  85}
  86
  87static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
  88{
  89	open_bucket_idx_t idx = ob - c->open_buckets;
  90	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
  91
  92	while (*slot != idx) {
  93		BUG_ON(!*slot);
  94		slot = &c->open_buckets[*slot].hash;
  95	}
  96
  97	*slot = ob->hash;
  98	ob->hash = 0;
  99}
 100
 101void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
 102{
 103	struct bch_dev *ca = ob_dev(c, ob);
 104
 105	if (ob->ec) {
 106		ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
 107		return;
 108	}
 109
 110	percpu_down_read(&c->mark_lock);
 111	spin_lock(&ob->lock);
 112
 113	ob->valid = false;
 114	ob->data_type = 0;
 115
 116	spin_unlock(&ob->lock);
 117	percpu_up_read(&c->mark_lock);
 118
 119	spin_lock(&c->freelist_lock);
 120	bch2_open_bucket_hash_remove(c, ob);
 121
 122	ob->freelist = c->open_buckets_freelist;
 123	c->open_buckets_freelist = ob - c->open_buckets;
 124
 125	c->open_buckets_nr_free++;
 126	ca->nr_open_buckets--;
 127	spin_unlock(&c->freelist_lock);
 128
 129	closure_wake_up(&c->open_buckets_wait);
 130}
 131
 132void bch2_open_bucket_write_error(struct bch_fs *c,
 133				  struct open_buckets *obs,
 134				  unsigned dev)
 135{
 136	struct open_bucket *ob;
 137	unsigned i;
 138
 139	open_bucket_for_each(c, obs, ob, i)
 140		if (ob->dev == dev && ob->ec)
 141			bch2_ec_bucket_cancel(c, ob);
 142}
 143
 144static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
 145{
 146	struct open_bucket *ob;
 147
 148	BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
 149
 150	ob = c->open_buckets + c->open_buckets_freelist;
 151	c->open_buckets_freelist = ob->freelist;
 152	atomic_set(&ob->pin, 1);
 153	ob->data_type = 0;
 154
 155	c->open_buckets_nr_free--;
 156	return ob;
 157}
 158
 159static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
 160{
 161	BUG_ON(c->open_buckets_partial_nr >=
 162	       ARRAY_SIZE(c->open_buckets_partial));
 163
 164	spin_lock(&c->freelist_lock);
 165	rcu_read_lock();
 166	bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
 167	rcu_read_unlock();
 168
 169	ob->on_partial_list = true;
 170	c->open_buckets_partial[c->open_buckets_partial_nr++] =
 171		ob - c->open_buckets;
 172	spin_unlock(&c->freelist_lock);
 173
 174	closure_wake_up(&c->open_buckets_wait);
 175	closure_wake_up(&c->freelist_wait);
 176}
 177
 178/* _only_ for allocating the journal on a new device: */
 179long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
 180{
 181	while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
 182		u64 b = ca->new_fs_bucket_idx++;
 183
 184		if (!is_superblock_bucket(ca, b) &&
 185		    (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
 186			return b;
 187	}
 188
 189	return -1;
 190}
 191
 192static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
 193{
 194	switch (watermark) {
 195	case BCH_WATERMARK_interior_updates:
 196		return 0;
 197	case BCH_WATERMARK_reclaim:
 198		return OPEN_BUCKETS_COUNT / 6;
 199	case BCH_WATERMARK_btree:
 200	case BCH_WATERMARK_btree_copygc:
 201		return OPEN_BUCKETS_COUNT / 4;
 202	case BCH_WATERMARK_copygc:
 203		return OPEN_BUCKETS_COUNT / 3;
 204	default:
 205		return OPEN_BUCKETS_COUNT / 2;
 206	}
 207}
 208
 209static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
 210					      u64 bucket,
 211					      enum bch_watermark watermark,
 212					      const struct bch_alloc_v4 *a,
 213					      struct bucket_alloc_state *s,
 214					      struct closure *cl)
 215{
 216	struct open_bucket *ob;
 217
 218	if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
 219		s->skipped_nouse++;
 220		return NULL;
 221	}
 222
 223	if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
 224		s->skipped_open++;
 225		return NULL;
 226	}
 227
 228	if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
 229			c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
 230		s->skipped_need_journal_commit++;
 231		return NULL;
 232	}
 233
 234	if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
 235		s->skipped_nocow++;
 236		return NULL;
 237	}
 238
 239	spin_lock(&c->freelist_lock);
 240
 241	if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
 242		if (cl)
 243			closure_wait(&c->open_buckets_wait, cl);
 244
 245		track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
 246		spin_unlock(&c->freelist_lock);
 247		return ERR_PTR(-BCH_ERR_open_buckets_empty);
 248	}
 249
 250	/* Recheck under lock: */
 251	if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
 252		spin_unlock(&c->freelist_lock);
 253		s->skipped_open++;
 254		return NULL;
 255	}
 256
 257	ob = bch2_open_bucket_alloc(c);
 258
 259	spin_lock(&ob->lock);
 260
 261	ob->valid	= true;
 262	ob->sectors_free = ca->mi.bucket_size;
 263	ob->dev		= ca->dev_idx;
 264	ob->gen		= a->gen;
 265	ob->bucket	= bucket;
 266	spin_unlock(&ob->lock);
 267
 268	ca->nr_open_buckets++;
 269	bch2_open_bucket_hash_add(c, ob);
 270
 271	track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
 272	track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
 273
 274	spin_unlock(&c->freelist_lock);
 275	return ob;
 276}
 277
 278static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
 279					    enum bch_watermark watermark, u64 free_entry,
 280					    struct bucket_alloc_state *s,
 281					    struct bkey_s_c freespace_k,
 282					    struct closure *cl)
 283{
 284	struct bch_fs *c = trans->c;
 285	struct btree_iter iter = { NULL };
 286	struct bkey_s_c k;
 287	struct open_bucket *ob;
 288	struct bch_alloc_v4 a_convert;
 289	const struct bch_alloc_v4 *a;
 290	u64 b = free_entry & ~(~0ULL << 56);
 291	unsigned genbits = free_entry >> 56;
 292	struct printbuf buf = PRINTBUF;
 293	int ret;
 294
 295	if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
 296		prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
 297		       "  freespace key ",
 298			ca->mi.first_bucket, ca->mi.nbuckets);
 299		bch2_bkey_val_to_text(&buf, c, freespace_k);
 300		bch2_trans_inconsistent(trans, "%s", buf.buf);
 301		ob = ERR_PTR(-EIO);
 302		goto err;
 303	}
 304
 305	k = bch2_bkey_get_iter(trans, &iter,
 306			       BTREE_ID_alloc, POS(ca->dev_idx, b),
 307			       BTREE_ITER_cached);
 308	ret = bkey_err(k);
 309	if (ret) {
 310		ob = ERR_PTR(ret);
 311		goto err;
 312	}
 313
 314	a = bch2_alloc_to_v4(k, &a_convert);
 315
 316	if (a->data_type != BCH_DATA_free) {
 317		if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
 318			ob = NULL;
 319			goto err;
 320		}
 321
 322		prt_printf(&buf, "non free bucket in freespace btree\n"
 323		       "  freespace key ");
 324		bch2_bkey_val_to_text(&buf, c, freespace_k);
 325		prt_printf(&buf, "\n  ");
 326		bch2_bkey_val_to_text(&buf, c, k);
 327		bch2_trans_inconsistent(trans, "%s", buf.buf);
 328		ob = ERR_PTR(-EIO);
 329		goto err;
 330	}
 331
 332	if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
 333	    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
 334		prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
 335		       "  freespace key ",
 336		       genbits, alloc_freespace_genbits(*a) >> 56);
 337		bch2_bkey_val_to_text(&buf, c, freespace_k);
 338		prt_printf(&buf, "\n  ");
 339		bch2_bkey_val_to_text(&buf, c, k);
 340		bch2_trans_inconsistent(trans, "%s", buf.buf);
 341		ob = ERR_PTR(-EIO);
 342		goto err;
 343	}
 344
 345	if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
 346		struct bch_backpointer bp;
 347		struct bpos bp_pos = POS_MIN;
 348
 349		ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
 350						&bp_pos, &bp,
 351						BTREE_ITER_nopreserve);
 352		if (ret) {
 353			ob = ERR_PTR(ret);
 354			goto err;
 355		}
 356
 357		if (!bkey_eq(bp_pos, POS_MAX)) {
 358			/*
 359			 * Bucket may have data in it - we don't call
 360			 * bc2h_trans_inconnsistent() because fsck hasn't
 361			 * finished yet
 362			 */
 363			ob = NULL;
 364			goto err;
 365		}
 366	}
 367
 368	ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
 369	if (!ob)
 370		bch2_set_btree_iter_dontneed(&iter);
 371err:
 372	if (iter.path)
 373		bch2_set_btree_iter_dontneed(&iter);
 374	bch2_trans_iter_exit(trans, &iter);
 375	printbuf_exit(&buf);
 376	return ob;
 377}
 378
 379/*
 380 * This path is for before the freespace btree is initialized:
 381 *
 382 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
 383 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
 384 */
 385static noinline struct open_bucket *
 386bch2_bucket_alloc_early(struct btree_trans *trans,
 387			struct bch_dev *ca,
 388			enum bch_watermark watermark,
 389			struct bucket_alloc_state *s,
 390			struct closure *cl)
 391{
 392	struct btree_iter iter, citer;
 393	struct bkey_s_c k, ck;
 394	struct open_bucket *ob = NULL;
 395	u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
 396	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
 397	u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
 398	u64 alloc_cursor = alloc_start;
 399	int ret;
 400
 401	/*
 402	 * Scan with an uncached iterator to avoid polluting the key cache. An
 403	 * uncached iter will return a cached key if one exists, but if not
 404	 * there is no other underlying protection for the associated key cache
 405	 * slot. To avoid racing bucket allocations, look up the cached key slot
 406	 * of any likely allocation candidate before attempting to proceed with
 407	 * the allocation. This provides proper exclusion on the associated
 408	 * bucket.
 409	 */
 410again:
 411	for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
 412			   BTREE_ITER_slots, k, ret) {
 413		u64 bucket = k.k->p.offset;
 414
 415		if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
 416			break;
 417
 418		if (ca->new_fs_bucket_idx &&
 419		    is_superblock_bucket(ca, k.k->p.offset))
 420			continue;
 421
 422		if (s->btree_bitmap != BTREE_BITMAP_ANY &&
 423		    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
 424				bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
 425			if (s->btree_bitmap == BTREE_BITMAP_YES &&
 426			    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
 427				break;
 428
 429			bucket = sector_to_bucket(ca,
 430					round_up(bucket_to_sector(ca, bucket) + 1,
 431						 1ULL << ca->mi.btree_bitmap_shift));
 432			bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
 433			s->buckets_seen++;
 434			s->skipped_mi_btree_bitmap++;
 435			continue;
 436		}
 437
 438		struct bch_alloc_v4 a_convert;
 439		const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
 440		if (a->data_type != BCH_DATA_free)
 441			continue;
 442
 443		/* now check the cached key to serialize concurrent allocs of the bucket */
 444		ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
 445		ret = bkey_err(ck);
 446		if (ret)
 447			break;
 448
 449		a = bch2_alloc_to_v4(ck, &a_convert);
 450		if (a->data_type != BCH_DATA_free)
 451			goto next;
 452
 453		s->buckets_seen++;
 454
 455		ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
 456next:
 457		bch2_set_btree_iter_dontneed(&citer);
 458		bch2_trans_iter_exit(trans, &citer);
 459		if (ob)
 460			break;
 461	}
 462	bch2_trans_iter_exit(trans, &iter);
 463
 464	alloc_cursor = iter.pos.offset;
 465
 466	if (!ob && ret)
 467		ob = ERR_PTR(ret);
 468
 469	if (!ob && alloc_start > first_bucket) {
 470		alloc_cursor = alloc_start = first_bucket;
 471		goto again;
 472	}
 473
 474	*dev_alloc_cursor = alloc_cursor;
 475
 476	return ob;
 477}
 478
 479static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
 480						   struct bch_dev *ca,
 481						   enum bch_watermark watermark,
 482						   struct bucket_alloc_state *s,
 483						   struct closure *cl)
 484{
 485	struct btree_iter iter;
 486	struct bkey_s_c k;
 487	struct open_bucket *ob = NULL;
 488	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
 489	u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
 490	u64 alloc_cursor = alloc_start;
 491	int ret;
 492
 493	BUG_ON(ca->new_fs_bucket_idx);
 494again:
 495	for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
 496				     POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
 497		if (k.k->p.inode != ca->dev_idx)
 498			break;
 499
 500		for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
 501		     alloc_cursor < k.k->p.offset;
 502		     alloc_cursor++) {
 503			s->buckets_seen++;
 504
 505			u64 bucket = alloc_cursor & ~(~0ULL << 56);
 506			if (s->btree_bitmap != BTREE_BITMAP_ANY &&
 507			    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
 508					bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
 509				if (s->btree_bitmap == BTREE_BITMAP_YES &&
 510				    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
 511					goto fail;
 512
 513				bucket = sector_to_bucket(ca,
 514						round_up(bucket_to_sector(ca, bucket) + 1,
 515							 1ULL << ca->mi.btree_bitmap_shift));
 516				u64 genbits = alloc_cursor >> 56;
 517				alloc_cursor = bucket | (genbits << 56);
 518
 519				if (alloc_cursor > k.k->p.offset)
 520					bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
 521				s->skipped_mi_btree_bitmap++;
 522				continue;
 523			}
 524
 525			ob = try_alloc_bucket(trans, ca, watermark,
 526					      alloc_cursor, s, k, cl);
 527			if (ob) {
 528				bch2_set_btree_iter_dontneed(&iter);
 529				break;
 530			}
 531		}
 532
 533		if (ob || ret)
 534			break;
 535	}
 536fail:
 537	bch2_trans_iter_exit(trans, &iter);
 538
 539	if (!ob && ret)
 540		ob = ERR_PTR(ret);
 541
 542	if (!ob && alloc_start > ca->mi.first_bucket) {
 543		alloc_cursor = alloc_start = ca->mi.first_bucket;
 544		goto again;
 545	}
 546
 547	*dev_alloc_cursor = alloc_cursor;
 548
 549	return ob;
 550}
 551
 552static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
 553					 enum bch_watermark watermark,
 554					 enum bch_data_type data_type,
 555					 struct closure *cl,
 556					 struct bch_dev_usage *usage,
 557					 struct bucket_alloc_state *s,
 558					 struct open_bucket *ob)
 559{
 560	struct printbuf buf = PRINTBUF;
 561
 562	printbuf_tabstop_push(&buf, 24);
 563
 564	prt_printf(&buf, "dev\t%s (%u)\n",	ca->name, ca->dev_idx);
 565	prt_printf(&buf, "watermark\t%s\n",	bch2_watermarks[watermark]);
 566	prt_printf(&buf, "data type\t%s\n",	__bch2_data_types[data_type]);
 567	prt_printf(&buf, "blocking\t%u\n",	cl != NULL);
 568	prt_printf(&buf, "free\t%llu\n",	usage->d[BCH_DATA_free].buckets);
 569	prt_printf(&buf, "avail\t%llu\n",	dev_buckets_free(ca, *usage, watermark));
 570	prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
 571		   bch2_copygc_wait_amount(c),
 572		   c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
 573	prt_printf(&buf, "seen\t%llu\n",	s->buckets_seen);
 574	prt_printf(&buf, "open\t%llu\n",	s->skipped_open);
 575	prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
 576	prt_printf(&buf, "nocow\t%llu\n",	s->skipped_nocow);
 577	prt_printf(&buf, "nouse\t%llu\n",	s->skipped_nouse);
 578	prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
 579
 580	if (!IS_ERR(ob)) {
 581		prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
 582		trace_bucket_alloc(c, buf.buf);
 583	} else {
 584		prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
 585		trace_bucket_alloc_fail(c, buf.buf);
 586	}
 587
 588	printbuf_exit(&buf);
 589}
 590
 591/**
 592 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
 593 * @trans:	transaction object
 594 * @ca:		device to allocate from
 595 * @watermark:	how important is this allocation?
 596 * @data_type:	BCH_DATA_journal, btree, user...
 597 * @cl:		if not NULL, closure to be used to wait if buckets not available
 598 * @usage:	for secondarily also returning the current device usage
 599 *
 600 * Returns:	an open_bucket on success, or an ERR_PTR() on failure.
 601 */
 602static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
 603				      struct bch_dev *ca,
 604				      enum bch_watermark watermark,
 605				      enum bch_data_type data_type,
 606				      struct closure *cl,
 607				      bool nowait,
 608				      struct bch_dev_usage *usage)
 609{
 610	struct bch_fs *c = trans->c;
 611	struct open_bucket *ob = NULL;
 612	bool freespace = READ_ONCE(ca->mi.freespace_initialized);
 613	u64 avail;
 614	struct bucket_alloc_state s = {
 615		.btree_bitmap = data_type == BCH_DATA_btree,
 616	};
 617	bool waiting = nowait;
 618again:
 619	bch2_dev_usage_read_fast(ca, usage);
 620	avail = dev_buckets_free(ca, *usage, watermark);
 621
 622	if (usage->d[BCH_DATA_need_discard].buckets > avail)
 623		bch2_dev_do_discards(ca);
 624
 625	if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
 626		bch2_gc_gens_async(c);
 627
 628	if (should_invalidate_buckets(ca, *usage))
 629		bch2_dev_do_invalidates(ca);
 630
 631	if (!avail) {
 632		if (cl && !waiting) {
 633			closure_wait(&c->freelist_wait, cl);
 634			waiting = true;
 635			goto again;
 636		}
 637
 638		track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
 639
 640		ob = ERR_PTR(-BCH_ERR_freelist_empty);
 641		goto err;
 642	}
 643
 644	if (waiting)
 645		closure_wake_up(&c->freelist_wait);
 646alloc:
 647	ob = likely(freespace)
 648		? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
 649		: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
 650
 651	if (s.skipped_need_journal_commit * 2 > avail)
 652		bch2_journal_flush_async(&c->journal, NULL);
 653
 654	if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
 655		s.btree_bitmap = BTREE_BITMAP_ANY;
 656		goto alloc;
 657	}
 658
 659	if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
 660		freespace = false;
 661		goto alloc;
 662	}
 663err:
 664	if (!ob)
 665		ob = ERR_PTR(-BCH_ERR_no_buckets_found);
 666
 667	if (!IS_ERR(ob))
 668		ob->data_type = data_type;
 669
 670	if (!IS_ERR(ob))
 671		count_event(c, bucket_alloc);
 672	else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
 673		count_event(c, bucket_alloc_fail);
 674
 675	if (!IS_ERR(ob)
 676	    ? trace_bucket_alloc_enabled()
 677	    : trace_bucket_alloc_fail_enabled())
 678		trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
 679
 680	return ob;
 681}
 682
 683struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
 684				      enum bch_watermark watermark,
 685				      enum bch_data_type data_type,
 686				      struct closure *cl)
 687{
 688	struct bch_dev_usage usage;
 689	struct open_bucket *ob;
 690
 691	bch2_trans_do(c,
 692		      PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
 693							data_type, cl, false, &usage)));
 694	return ob;
 695}
 696
 697static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
 698			    unsigned l, unsigned r)
 699{
 700	return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
 701		(stripe->next_alloc[l] < stripe->next_alloc[r]));
 702}
 703
 704#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
 705
 706struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
 707					  struct dev_stripe_state *stripe,
 708					  struct bch_devs_mask *devs)
 709{
 710	struct dev_alloc_list ret = { .nr = 0 };
 711	unsigned i;
 712
 713	for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
 714		ret.devs[ret.nr++] = i;
 715
 716	bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
 717	return ret;
 718}
 719
 720static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
 721			       struct dev_stripe_state *stripe,
 722			       struct bch_dev_usage *usage)
 723{
 724	u64 *v = stripe->next_alloc + ca->dev_idx;
 725	u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
 726	u64 free_space_inv = free_space
 727		? div64_u64(1ULL << 48, free_space)
 728		: 1ULL << 48;
 729	u64 scale = *v / 4;
 730
 731	if (*v + free_space_inv >= *v)
 732		*v += free_space_inv;
 733	else
 734		*v = U64_MAX;
 735
 736	for (v = stripe->next_alloc;
 737	     v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
 738		*v = *v < scale ? 0 : *v - scale;
 739}
 740
 741void bch2_dev_stripe_increment(struct bch_dev *ca,
 742			       struct dev_stripe_state *stripe)
 743{
 744	struct bch_dev_usage usage;
 745
 746	bch2_dev_usage_read_fast(ca, &usage);
 747	bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
 748}
 749
 750static int add_new_bucket(struct bch_fs *c,
 751			   struct open_buckets *ptrs,
 752			   struct bch_devs_mask *devs_may_alloc,
 753			   unsigned nr_replicas,
 754			   unsigned *nr_effective,
 755			   bool *have_cache,
 756			   struct open_bucket *ob)
 757{
 758	unsigned durability = ob_dev(c, ob)->mi.durability;
 759
 760	BUG_ON(*nr_effective >= nr_replicas);
 761
 762	__clear_bit(ob->dev, devs_may_alloc->d);
 763	*nr_effective	+= durability;
 764	*have_cache	|= !durability;
 765
 766	ob_push(c, ptrs, ob);
 767
 768	if (*nr_effective >= nr_replicas)
 769		return 1;
 770	if (ob->ec)
 771		return 1;
 772	return 0;
 773}
 774
 775int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
 776		      struct open_buckets *ptrs,
 777		      struct dev_stripe_state *stripe,
 778		      struct bch_devs_mask *devs_may_alloc,
 779		      unsigned nr_replicas,
 780		      unsigned *nr_effective,
 781		      bool *have_cache,
 782		      enum bch_write_flags flags,
 783		      enum bch_data_type data_type,
 784		      enum bch_watermark watermark,
 785		      struct closure *cl)
 786{
 787	struct bch_fs *c = trans->c;
 788	struct dev_alloc_list devs_sorted =
 789		bch2_dev_alloc_list(c, stripe, devs_may_alloc);
 790	int ret = -BCH_ERR_insufficient_devices;
 791
 792	BUG_ON(*nr_effective >= nr_replicas);
 793
 794	for (unsigned i = 0; i < devs_sorted.nr; i++) {
 795		struct bch_dev_usage usage;
 796		struct open_bucket *ob;
 797
 798		unsigned dev = devs_sorted.devs[i];
 799		struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
 800		if (!ca)
 801			continue;
 802
 803		if (!ca->mi.durability && *have_cache) {
 804			bch2_dev_put(ca);
 805			continue;
 806		}
 807
 808		ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
 809					     cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
 810		if (!IS_ERR(ob))
 811			bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
 812		bch2_dev_put(ca);
 813
 814		if (IS_ERR(ob)) {
 815			ret = PTR_ERR(ob);
 816			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
 817				break;
 818			continue;
 819		}
 820
 821		if (add_new_bucket(c, ptrs, devs_may_alloc,
 822				   nr_replicas, nr_effective,
 823				   have_cache, ob)) {
 824			ret = 0;
 825			break;
 826		}
 827	}
 828
 829	return ret;
 830}
 831
 832/* Allocate from stripes: */
 833
 834/*
 835 * if we can't allocate a new stripe because there are already too many
 836 * partially filled stripes, force allocating from an existing stripe even when
 837 * it's to a device we don't want:
 838 */
 839
 840static int bucket_alloc_from_stripe(struct btree_trans *trans,
 841			 struct open_buckets *ptrs,
 842			 struct write_point *wp,
 843			 struct bch_devs_mask *devs_may_alloc,
 844			 u16 target,
 845			 unsigned nr_replicas,
 846			 unsigned *nr_effective,
 847			 bool *have_cache,
 848			 enum bch_watermark watermark,
 849			 enum bch_write_flags flags,
 850			 struct closure *cl)
 851{
 852	struct bch_fs *c = trans->c;
 853	struct dev_alloc_list devs_sorted;
 854	struct ec_stripe_head *h;
 855	struct open_bucket *ob;
 856	unsigned i, ec_idx;
 857	int ret = 0;
 858
 859	if (nr_replicas < 2)
 860		return 0;
 861
 862	if (ec_open_bucket(c, ptrs))
 863		return 0;
 864
 865	h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
 866	if (IS_ERR(h))
 867		return PTR_ERR(h);
 868	if (!h)
 869		return 0;
 870
 871	devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
 872
 873	for (i = 0; i < devs_sorted.nr; i++)
 874		for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
 875			if (!h->s->blocks[ec_idx])
 876				continue;
 877
 878			ob = c->open_buckets + h->s->blocks[ec_idx];
 879			if (ob->dev == devs_sorted.devs[i] &&
 880			    !test_and_set_bit(ec_idx, h->s->blocks_allocated))
 881				goto got_bucket;
 882		}
 883	goto out_put_head;
 884got_bucket:
 885	ob->ec_idx	= ec_idx;
 886	ob->ec		= h->s;
 887	ec_stripe_new_get(h->s, STRIPE_REF_io);
 888
 889	ret = add_new_bucket(c, ptrs, devs_may_alloc,
 890			     nr_replicas, nr_effective,
 891			     have_cache, ob);
 892out_put_head:
 893	bch2_ec_stripe_head_put(c, h);
 894	return ret;
 895}
 896
 897/* Sector allocator */
 898
 899static bool want_bucket(struct bch_fs *c,
 900			struct write_point *wp,
 901			struct bch_devs_mask *devs_may_alloc,
 902			bool *have_cache, bool ec,
 903			struct open_bucket *ob)
 904{
 905	struct bch_dev *ca = ob_dev(c, ob);
 906
 907	if (!test_bit(ob->dev, devs_may_alloc->d))
 908		return false;
 909
 910	if (ob->data_type != wp->data_type)
 911		return false;
 912
 913	if (!ca->mi.durability &&
 914	    (wp->data_type == BCH_DATA_btree || ec || *have_cache))
 915		return false;
 916
 917	if (ec != (ob->ec != NULL))
 918		return false;
 919
 920	return true;
 921}
 922
 923static int bucket_alloc_set_writepoint(struct bch_fs *c,
 924				       struct open_buckets *ptrs,
 925				       struct write_point *wp,
 926				       struct bch_devs_mask *devs_may_alloc,
 927				       unsigned nr_replicas,
 928				       unsigned *nr_effective,
 929				       bool *have_cache,
 930				       bool ec)
 931{
 932	struct open_buckets ptrs_skip = { .nr = 0 };
 933	struct open_bucket *ob;
 934	unsigned i;
 935	int ret = 0;
 936
 937	open_bucket_for_each(c, &wp->ptrs, ob, i) {
 938		if (!ret && want_bucket(c, wp, devs_may_alloc,
 939					have_cache, ec, ob))
 940			ret = add_new_bucket(c, ptrs, devs_may_alloc,
 941				       nr_replicas, nr_effective,
 942				       have_cache, ob);
 943		else
 944			ob_push(c, &ptrs_skip, ob);
 945	}
 946	wp->ptrs = ptrs_skip;
 947
 948	return ret;
 949}
 950
 951static int bucket_alloc_set_partial(struct bch_fs *c,
 952				    struct open_buckets *ptrs,
 953				    struct write_point *wp,
 954				    struct bch_devs_mask *devs_may_alloc,
 955				    unsigned nr_replicas,
 956				    unsigned *nr_effective,
 957				    bool *have_cache, bool ec,
 958				    enum bch_watermark watermark)
 959{
 960	int i, ret = 0;
 961
 962	if (!c->open_buckets_partial_nr)
 963		return 0;
 964
 965	spin_lock(&c->freelist_lock);
 966
 967	if (!c->open_buckets_partial_nr)
 968		goto unlock;
 969
 970	for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
 971		struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
 972
 973		if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
 974			struct bch_dev *ca = ob_dev(c, ob);
 975			struct bch_dev_usage usage;
 976			u64 avail;
 977
 978			bch2_dev_usage_read_fast(ca, &usage);
 979			avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets;
 980			if (!avail)
 981				continue;
 982
 983			array_remove_item(c->open_buckets_partial,
 984					  c->open_buckets_partial_nr,
 985					  i);
 986			ob->on_partial_list = false;
 987
 988			rcu_read_lock();
 989			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
 990			rcu_read_unlock();
 991
 992			ret = add_new_bucket(c, ptrs, devs_may_alloc,
 993					     nr_replicas, nr_effective,
 994					     have_cache, ob);
 995			if (ret)
 996				break;
 997		}
 998	}
 999unlock:
1000	spin_unlock(&c->freelist_lock);
1001	return ret;
1002}
1003
1004static int __open_bucket_add_buckets(struct btree_trans *trans,
1005			struct open_buckets *ptrs,
1006			struct write_point *wp,
1007			struct bch_devs_list *devs_have,
1008			u16 target,
1009			bool erasure_code,
1010			unsigned nr_replicas,
1011			unsigned *nr_effective,
1012			bool *have_cache,
1013			enum bch_watermark watermark,
1014			enum bch_write_flags flags,
1015			struct closure *_cl)
1016{
1017	struct bch_fs *c = trans->c;
1018	struct bch_devs_mask devs;
1019	struct open_bucket *ob;
1020	struct closure *cl = NULL;
1021	unsigned i;
1022	int ret;
1023
1024	devs = target_rw_devs(c, wp->data_type, target);
1025
1026	/* Don't allocate from devices we already have pointers to: */
1027	darray_for_each(*devs_have, i)
1028		__clear_bit(*i, devs.d);
1029
1030	open_bucket_for_each(c, ptrs, ob, i)
1031		__clear_bit(ob->dev, devs.d);
1032
1033	ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
1034				 nr_replicas, nr_effective,
1035				 have_cache, erasure_code);
1036	if (ret)
1037		return ret;
1038
1039	ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
1040				 nr_replicas, nr_effective,
1041				 have_cache, erasure_code, watermark);
1042	if (ret)
1043		return ret;
1044
1045	if (erasure_code) {
1046		ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
1047					 target,
1048					 nr_replicas, nr_effective,
1049					 have_cache,
1050					 watermark, flags, _cl);
1051	} else {
1052retry_blocking:
1053		/*
1054		 * Try nonblocking first, so that if one device is full we'll try from
1055		 * other devices:
1056		 */
1057		ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
1058					nr_replicas, nr_effective, have_cache,
1059					flags, wp->data_type, watermark, cl);
1060		if (ret &&
1061		    !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1062		    !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
1063		    !cl && _cl) {
1064			cl = _cl;
1065			goto retry_blocking;
1066		}
1067	}
1068
1069	return ret;
1070}
1071
1072static int open_bucket_add_buckets(struct btree_trans *trans,
1073			struct open_buckets *ptrs,
1074			struct write_point *wp,
1075			struct bch_devs_list *devs_have,
1076			u16 target,
1077			unsigned erasure_code,
1078			unsigned nr_replicas,
1079			unsigned *nr_effective,
1080			bool *have_cache,
1081			enum bch_watermark watermark,
1082			enum bch_write_flags flags,
1083			struct closure *cl)
1084{
1085	int ret;
1086
1087	if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
1088		ret = __open_bucket_add_buckets(trans, ptrs, wp,
1089				devs_have, target, erasure_code,
1090				nr_replicas, nr_effective, have_cache,
1091				watermark, flags, cl);
1092		if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1093		    bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1094		    bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1095		    bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1096			return ret;
1097		if (*nr_effective >= nr_replicas)
1098			return 0;
1099	}
1100
1101	ret = __open_bucket_add_buckets(trans, ptrs, wp,
1102			devs_have, target, false,
1103			nr_replicas, nr_effective, have_cache,
1104			watermark, flags, cl);
1105	return ret < 0 ? ret : 0;
1106}
1107
1108/**
1109 * should_drop_bucket - check if this is open_bucket should go away
1110 * @ob:		open_bucket to predicate on
1111 * @c:		filesystem handle
1112 * @ca:		if set, we're killing buckets for a particular device
1113 * @ec:		if true, we're shutting down erasure coding and killing all ec
1114 *		open_buckets
1115 *		otherwise, return true
1116 * Returns: true if we should kill this open_bucket
1117 *
1118 * We're killing open_buckets because we're shutting down a device, erasure
1119 * coding, or the entire filesystem - check if this open_bucket matches:
1120 */
1121static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1122			       struct bch_dev *ca, bool ec)
1123{
1124	if (ec) {
1125		return ob->ec != NULL;
1126	} else if (ca) {
1127		bool drop = ob->dev == ca->dev_idx;
1128		struct open_bucket *ob2;
1129		unsigned i;
1130
1131		if (!drop && ob->ec) {
1132			unsigned nr_blocks;
1133
1134			mutex_lock(&ob->ec->lock);
1135			nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1136
1137			for (i = 0; i < nr_blocks; i++) {
1138				if (!ob->ec->blocks[i])
1139					continue;
1140
1141				ob2 = c->open_buckets + ob->ec->blocks[i];
1142				drop |= ob2->dev == ca->dev_idx;
1143			}
1144			mutex_unlock(&ob->ec->lock);
1145		}
1146
1147		return drop;
1148	} else {
1149		return true;
1150	}
1151}
1152
1153static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1154				 bool ec, struct write_point *wp)
1155{
1156	struct open_buckets ptrs = { .nr = 0 };
1157	struct open_bucket *ob;
1158	unsigned i;
1159
1160	mutex_lock(&wp->lock);
1161	open_bucket_for_each(c, &wp->ptrs, ob, i)
1162		if (should_drop_bucket(ob, c, ca, ec))
1163			bch2_open_bucket_put(c, ob);
1164		else
1165			ob_push(c, &ptrs, ob);
1166	wp->ptrs = ptrs;
1167	mutex_unlock(&wp->lock);
1168}
1169
1170void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1171			    bool ec)
1172{
1173	unsigned i;
1174
1175	/* Next, close write points that point to this device... */
1176	for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1177		bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1178
1179	bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1180	bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1181	bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1182
1183	mutex_lock(&c->btree_reserve_cache_lock);
1184	while (c->btree_reserve_cache_nr) {
1185		struct btree_alloc *a =
1186			&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1187
1188		bch2_open_buckets_put(c, &a->ob);
1189	}
1190	mutex_unlock(&c->btree_reserve_cache_lock);
1191
1192	spin_lock(&c->freelist_lock);
1193	i = 0;
1194	while (i < c->open_buckets_partial_nr) {
1195		struct open_bucket *ob =
1196			c->open_buckets + c->open_buckets_partial[i];
1197
1198		if (should_drop_bucket(ob, c, ca, ec)) {
1199			--c->open_buckets_partial_nr;
1200			swap(c->open_buckets_partial[i],
1201			     c->open_buckets_partial[c->open_buckets_partial_nr]);
1202
1203			ob->on_partial_list = false;
1204
1205			rcu_read_lock();
1206			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
1207			rcu_read_unlock();
1208
1209			spin_unlock(&c->freelist_lock);
1210			bch2_open_bucket_put(c, ob);
1211			spin_lock(&c->freelist_lock);
1212		} else {
1213			i++;
1214		}
1215	}
1216	spin_unlock(&c->freelist_lock);
1217
1218	bch2_ec_stop_dev(c, ca);
1219}
1220
1221static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1222						 unsigned long write_point)
1223{
1224	unsigned hash =
1225		hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1226
1227	return &c->write_points_hash[hash];
1228}
1229
1230static struct write_point *__writepoint_find(struct hlist_head *head,
1231					     unsigned long write_point)
1232{
1233	struct write_point *wp;
1234
1235	rcu_read_lock();
1236	hlist_for_each_entry_rcu(wp, head, node)
1237		if (wp->write_point == write_point)
1238			goto out;
1239	wp = NULL;
1240out:
1241	rcu_read_unlock();
1242	return wp;
1243}
1244
1245static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1246{
1247	u64 stranded	= c->write_points_nr * c->bucket_size_max;
1248	u64 free	= bch2_fs_usage_read_short(c).free;
1249
1250	return stranded * factor > free;
1251}
1252
1253static bool try_increase_writepoints(struct bch_fs *c)
1254{
1255	struct write_point *wp;
1256
1257	if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1258	    too_many_writepoints(c, 32))
1259		return false;
1260
1261	wp = c->write_points + c->write_points_nr++;
1262	hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1263	return true;
1264}
1265
1266static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1267{
1268	struct bch_fs *c = trans->c;
1269	struct write_point *wp;
1270	struct open_bucket *ob;
1271	unsigned i;
1272
1273	mutex_lock(&c->write_points_hash_lock);
1274	if (c->write_points_nr < old_nr) {
1275		mutex_unlock(&c->write_points_hash_lock);
1276		return true;
1277	}
1278
1279	if (c->write_points_nr == 1 ||
1280	    !too_many_writepoints(c, 8)) {
1281		mutex_unlock(&c->write_points_hash_lock);
1282		return false;
1283	}
1284
1285	wp = c->write_points + --c->write_points_nr;
1286
1287	hlist_del_rcu(&wp->node);
1288	mutex_unlock(&c->write_points_hash_lock);
1289
1290	bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1291	open_bucket_for_each(c, &wp->ptrs, ob, i)
1292		open_bucket_free_unused(c, ob);
1293	wp->ptrs.nr = 0;
1294	mutex_unlock(&wp->lock);
1295	return true;
1296}
1297
1298static struct write_point *writepoint_find(struct btree_trans *trans,
1299					   unsigned long write_point)
1300{
1301	struct bch_fs *c = trans->c;
1302	struct write_point *wp, *oldest;
1303	struct hlist_head *head;
1304
1305	if (!(write_point & 1UL)) {
1306		wp = (struct write_point *) write_point;
1307		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1308		return wp;
1309	}
1310
1311	head = writepoint_hash(c, write_point);
1312restart_find:
1313	wp = __writepoint_find(head, write_point);
1314	if (wp) {
1315lock_wp:
1316		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1317		if (wp->write_point == write_point)
1318			goto out;
1319		mutex_unlock(&wp->lock);
1320		goto restart_find;
1321	}
1322restart_find_oldest:
1323	oldest = NULL;
1324	for (wp = c->write_points;
1325	     wp < c->write_points + c->write_points_nr; wp++)
1326		if (!oldest || time_before64(wp->last_used, oldest->last_used))
1327			oldest = wp;
1328
1329	bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1330	bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1331	if (oldest >= c->write_points + c->write_points_nr ||
1332	    try_increase_writepoints(c)) {
1333		mutex_unlock(&c->write_points_hash_lock);
1334		mutex_unlock(&oldest->lock);
1335		goto restart_find_oldest;
1336	}
1337
1338	wp = __writepoint_find(head, write_point);
1339	if (wp && wp != oldest) {
1340		mutex_unlock(&c->write_points_hash_lock);
1341		mutex_unlock(&oldest->lock);
1342		goto lock_wp;
1343	}
1344
1345	wp = oldest;
1346	hlist_del_rcu(&wp->node);
1347	wp->write_point = write_point;
1348	hlist_add_head_rcu(&wp->node, head);
1349	mutex_unlock(&c->write_points_hash_lock);
1350out:
1351	wp->last_used = local_clock();
1352	return wp;
1353}
1354
1355static noinline void
1356deallocate_extra_replicas(struct bch_fs *c,
1357			  struct open_buckets *ptrs,
1358			  struct open_buckets *ptrs_no_use,
1359			  unsigned extra_replicas)
1360{
1361	struct open_buckets ptrs2 = { 0 };
1362	struct open_bucket *ob;
1363	unsigned i;
1364
1365	open_bucket_for_each(c, ptrs, ob, i) {
1366		unsigned d = ob_dev(c, ob)->mi.durability;
1367
1368		if (d && d <= extra_replicas) {
1369			extra_replicas -= d;
1370			ob_push(c, ptrs_no_use, ob);
1371		} else {
1372			ob_push(c, &ptrs2, ob);
1373		}
1374	}
1375
1376	*ptrs = ptrs2;
1377}
1378
1379/*
1380 * Get us an open_bucket we can allocate from, return with it locked:
1381 */
1382int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1383			     unsigned target,
1384			     unsigned erasure_code,
1385			     struct write_point_specifier write_point,
1386			     struct bch_devs_list *devs_have,
1387			     unsigned nr_replicas,
1388			     unsigned nr_replicas_required,
1389			     enum bch_watermark watermark,
1390			     enum bch_write_flags flags,
1391			     struct closure *cl,
1392			     struct write_point **wp_ret)
1393{
1394	struct bch_fs *c = trans->c;
1395	struct write_point *wp;
1396	struct open_bucket *ob;
1397	struct open_buckets ptrs;
1398	unsigned nr_effective, write_points_nr;
1399	bool have_cache;
1400	int ret;
1401	int i;
1402
1403	if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1404		erasure_code = false;
1405
1406	BUG_ON(!nr_replicas || !nr_replicas_required);
1407retry:
1408	ptrs.nr		= 0;
1409	nr_effective	= 0;
1410	write_points_nr = c->write_points_nr;
1411	have_cache	= false;
1412
1413	*wp_ret = wp = writepoint_find(trans, write_point.v);
1414
1415	ret = bch2_trans_relock(trans);
1416	if (ret)
1417		goto err;
1418
1419	/* metadata may not allocate on cache devices: */
1420	if (wp->data_type != BCH_DATA_user)
1421		have_cache = true;
1422
1423	if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1424		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1425					      target, erasure_code,
1426					      nr_replicas, &nr_effective,
1427					      &have_cache, watermark,
1428					      flags, NULL);
1429		if (!ret ||
1430		    bch2_err_matches(ret, BCH_ERR_transaction_restart))
1431			goto alloc_done;
1432
1433		/* Don't retry from all devices if we're out of open buckets: */
1434		if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1435			int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1436					      target, erasure_code,
1437					      nr_replicas, &nr_effective,
1438					      &have_cache, watermark,
1439					      flags, cl);
1440			if (!ret2 ||
1441			    bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1442			    bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1443				ret = ret2;
1444				goto alloc_done;
1445			}
1446		}
1447
1448		/*
1449		 * Only try to allocate cache (durability = 0 devices) from the
1450		 * specified target:
1451		 */
1452		have_cache = true;
1453
1454		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1455					      0, erasure_code,
1456					      nr_replicas, &nr_effective,
1457					      &have_cache, watermark,
1458					      flags, cl);
1459	} else {
1460		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1461					      target, erasure_code,
1462					      nr_replicas, &nr_effective,
1463					      &have_cache, watermark,
1464					      flags, cl);
1465	}
1466alloc_done:
1467	BUG_ON(!ret && nr_effective < nr_replicas);
1468
1469	if (erasure_code && !ec_open_bucket(c, &ptrs))
1470		pr_debug("failed to get ec bucket: ret %u", ret);
1471
1472	if (ret == -BCH_ERR_insufficient_devices &&
1473	    nr_effective >= nr_replicas_required)
1474		ret = 0;
1475
1476	if (ret)
1477		goto err;
1478
1479	if (nr_effective > nr_replicas)
1480		deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1481
1482	/* Free buckets we didn't use: */
1483	open_bucket_for_each(c, &wp->ptrs, ob, i)
1484		open_bucket_free_unused(c, ob);
1485
1486	wp->ptrs = ptrs;
1487
1488	wp->sectors_free = UINT_MAX;
1489
1490	open_bucket_for_each(c, &wp->ptrs, ob, i)
1491		wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1492
1493	BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1494
1495	return 0;
1496err:
1497	open_bucket_for_each(c, &wp->ptrs, ob, i)
1498		if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1499			ob_push(c, &ptrs, ob);
1500		else
1501			open_bucket_free_unused(c, ob);
1502	wp->ptrs = ptrs;
1503
1504	mutex_unlock(&wp->lock);
1505
1506	if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1507	    try_decrease_writepoints(trans, write_points_nr))
1508		goto retry;
1509
1510	if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1511		ret = -BCH_ERR_bucket_alloc_blocked;
1512
1513	if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
1514	    bch2_err_matches(ret, BCH_ERR_freelist_empty))
1515		ret = -BCH_ERR_bucket_alloc_blocked;
1516
1517	return ret;
1518}
1519
1520struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1521{
1522	struct bch_dev *ca = ob_dev(c, ob);
1523
1524	return (struct bch_extent_ptr) {
1525		.type	= 1 << BCH_EXTENT_ENTRY_ptr,
1526		.gen	= ob->gen,
1527		.dev	= ob->dev,
1528		.offset	= bucket_to_sector(ca, ob->bucket) +
1529			ca->mi.bucket_size -
1530			ob->sectors_free,
1531	};
1532}
1533
1534void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1535				    struct bkey_i *k, unsigned sectors,
1536				    bool cached)
1537{
1538	bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1539}
1540
1541/*
1542 * Append pointers to the space we just allocated to @k, and mark @sectors space
1543 * as allocated out of @ob
1544 */
1545void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1546{
1547	bch2_alloc_sectors_done_inlined(c, wp);
1548}
1549
1550static inline void writepoint_init(struct write_point *wp,
1551				   enum bch_data_type type)
1552{
1553	mutex_init(&wp->lock);
1554	wp->data_type = type;
1555
1556	INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1557	INIT_LIST_HEAD(&wp->writes);
1558	spin_lock_init(&wp->writes_lock);
1559}
1560
1561void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1562{
1563	struct open_bucket *ob;
1564	struct write_point *wp;
1565
1566	mutex_init(&c->write_points_hash_lock);
1567	c->write_points_nr = ARRAY_SIZE(c->write_points);
1568
1569	/* open bucket 0 is a sentinal NULL: */
1570	spin_lock_init(&c->open_buckets[0].lock);
1571
1572	for (ob = c->open_buckets + 1;
1573	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1574		spin_lock_init(&ob->lock);
1575		c->open_buckets_nr_free++;
1576
1577		ob->freelist = c->open_buckets_freelist;
1578		c->open_buckets_freelist = ob - c->open_buckets;
1579	}
1580
1581	writepoint_init(&c->btree_write_point,		BCH_DATA_btree);
1582	writepoint_init(&c->rebalance_write_point,	BCH_DATA_user);
1583	writepoint_init(&c->copygc_write_point,		BCH_DATA_user);
1584
1585	for (wp = c->write_points;
1586	     wp < c->write_points + c->write_points_nr; wp++) {
1587		writepoint_init(wp, BCH_DATA_user);
1588
1589		wp->last_used	= local_clock();
1590		wp->write_point	= (unsigned long) wp;
1591		hlist_add_head_rcu(&wp->node,
1592				   writepoint_hash(c, wp->write_point));
1593	}
1594}
1595
1596void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1597{
1598	struct bch_dev *ca = ob_dev(c, ob);
1599	unsigned data_type = ob->data_type;
1600	barrier(); /* READ_ONCE() doesn't work on bitfields */
1601
1602	prt_printf(out, "%zu ref %u ",
1603		   ob - c->open_buckets,
1604		   atomic_read(&ob->pin));
1605	bch2_prt_data_type(out, data_type);
1606	prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1607		   ob->dev, ob->bucket, ob->gen,
1608		   ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1609	if (ob->ec)
1610		prt_printf(out, " ec idx %llu", ob->ec->idx);
1611	if (ob->on_partial_list)
1612		prt_str(out, " partial");
1613	prt_newline(out);
1614}
1615
1616void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
1617			       struct bch_dev *ca)
1618{
1619	struct open_bucket *ob;
1620
1621	out->atomic++;
1622
1623	for (ob = c->open_buckets;
1624	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1625	     ob++) {
1626		spin_lock(&ob->lock);
1627		if (ob->valid && (!ca || ob->dev == ca->dev_idx))
1628			bch2_open_bucket_to_text(out, c, ob);
1629		spin_unlock(&ob->lock);
1630	}
1631
1632	--out->atomic;
1633}
1634
1635void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1636{
1637	unsigned i;
1638
1639	out->atomic++;
1640	spin_lock(&c->freelist_lock);
1641
1642	for (i = 0; i < c->open_buckets_partial_nr; i++)
1643		bch2_open_bucket_to_text(out, c,
1644				c->open_buckets + c->open_buckets_partial[i]);
1645
1646	spin_unlock(&c->freelist_lock);
1647	--out->atomic;
1648}
1649
1650static const char * const bch2_write_point_states[] = {
1651#define x(n)	#n,
1652	WRITE_POINT_STATES()
1653#undef x
1654	NULL
1655};
1656
1657static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1658				     struct write_point *wp)
1659{
1660	struct open_bucket *ob;
1661	unsigned i;
1662
1663	prt_printf(out, "%lu: ", wp->write_point);
1664	prt_human_readable_u64(out, wp->sectors_allocated);
1665
1666	prt_printf(out, " last wrote: ");
1667	bch2_pr_time_units(out, sched_clock() - wp->last_used);
1668
1669	for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1670		prt_printf(out, " %s: ", bch2_write_point_states[i]);
1671		bch2_pr_time_units(out, wp->time[i]);
1672	}
1673
1674	prt_newline(out);
1675
1676	printbuf_indent_add(out, 2);
1677	open_bucket_for_each(c, &wp->ptrs, ob, i)
1678		bch2_open_bucket_to_text(out, c, ob);
1679	printbuf_indent_sub(out, 2);
1680}
1681
1682void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1683{
1684	struct write_point *wp;
1685
1686	prt_str(out, "Foreground write points\n");
1687	for (wp = c->write_points;
1688	     wp < c->write_points + ARRAY_SIZE(c->write_points);
1689	     wp++)
1690		bch2_write_point_to_text(out, c, wp);
1691
1692	prt_str(out, "Copygc write point\n");
1693	bch2_write_point_to_text(out, c, &c->copygc_write_point);
1694
1695	prt_str(out, "Rebalance write point\n");
1696	bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1697
1698	prt_str(out, "Btree write point\n");
1699	bch2_write_point_to_text(out, c, &c->btree_write_point);
1700}
1701
1702void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1703{
1704	unsigned nr[BCH_DATA_NR];
1705
1706	memset(nr, 0, sizeof(nr));
1707
1708	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1709		nr[c->open_buckets[i].data_type]++;
1710
1711	printbuf_tabstops_reset(out);
1712	printbuf_tabstop_push(out, 24);
1713
1714	prt_printf(out, "capacity\t%llu\n",		c->capacity);
1715	prt_printf(out, "reserved\t%llu\n",		c->reserved);
1716	prt_printf(out, "hidden\t%llu\n",		percpu_u64_get(&c->usage->hidden));
1717	prt_printf(out, "btree\t%llu\n",		percpu_u64_get(&c->usage->btree));
1718	prt_printf(out, "data\t%llu\n",			percpu_u64_get(&c->usage->data));
1719	prt_printf(out, "cached\t%llu\n",		percpu_u64_get(&c->usage->cached));
1720	prt_printf(out, "reserved\t%llu\n",		percpu_u64_get(&c->usage->reserved));
1721	prt_printf(out, "online_reserved\t%llu\n",	percpu_u64_get(c->online_reserved));
1722	prt_printf(out, "nr_inodes\t%llu\n",		percpu_u64_get(&c->usage->nr_inodes));
1723
1724	prt_newline(out);
1725	prt_printf(out, "freelist_wait\t%s\n",			c->freelist_wait.list.first ? "waiting" : "empty");
1726	prt_printf(out, "open buckets allocated\t%i\n",		OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1727	prt_printf(out, "open buckets total\t%u\n",		OPEN_BUCKETS_COUNT);
1728	prt_printf(out, "open_buckets_wait\t%s\n",		c->open_buckets_wait.list.first ? "waiting" : "empty");
1729	prt_printf(out, "open_buckets_btree\t%u\n",		nr[BCH_DATA_btree]);
1730	prt_printf(out, "open_buckets_user\t%u\n",		nr[BCH_DATA_user]);
1731	prt_printf(out, "btree reserve cache\t%u\n",		c->btree_reserve_cache_nr);
1732}
1733
1734void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1735{
1736	struct bch_fs *c = ca->fs;
1737	struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1738	unsigned nr[BCH_DATA_NR];
1739
1740	memset(nr, 0, sizeof(nr));
1741
1742	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1743		nr[c->open_buckets[i].data_type]++;
1744
1745	bch2_dev_usage_to_text(out, ca, &stats);
1746
1747	prt_newline(out);
1748
1749	prt_printf(out, "reserves:\n");
1750	for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1751		prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1752
1753	prt_newline(out);
1754
1755	printbuf_tabstops_reset(out);
1756	printbuf_tabstop_push(out, 12);
1757	printbuf_tabstop_push(out, 16);
1758
1759	prt_printf(out, "open buckets\t%i\r\n",	ca->nr_open_buckets);
1760	prt_printf(out, "buckets to invalidate\t%llu\r\n",	should_invalidate_buckets(ca, stats));
1761}
1762
1763static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
1764{
1765	struct printbuf buf = PRINTBUF;
1766
1767	prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
1768		   c->opts.allocator_stuck_timeout);
1769
1770	prt_printf(&buf, "Allocator debug:\n");
1771	printbuf_indent_add(&buf, 2);
1772	bch2_fs_alloc_debug_to_text(&buf, c);
1773	printbuf_indent_sub(&buf, 2);
1774	prt_newline(&buf);
1775
1776	for_each_online_member(c, ca) {
1777		prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1778		printbuf_indent_add(&buf, 2);
1779		bch2_dev_alloc_debug_to_text(&buf, ca);
1780		printbuf_indent_sub(&buf, 2);
1781		prt_newline(&buf);
1782	}
1783
1784	prt_printf(&buf, "Copygc debug:\n");
1785	printbuf_indent_add(&buf, 2);
1786	bch2_copygc_wait_to_text(&buf, c);
1787	printbuf_indent_sub(&buf, 2);
1788	prt_newline(&buf);
1789
1790	prt_printf(&buf, "Journal debug:\n");
1791	printbuf_indent_add(&buf, 2);
1792	bch2_journal_debug_to_text(&buf, &c->journal);
1793	printbuf_indent_sub(&buf, 2);
1794
1795	bch2_print_string_as_lines(KERN_ERR, buf.buf);
1796	printbuf_exit(&buf);
1797}
1798
1799static inline unsigned allocator_wait_timeout(struct bch_fs *c)
1800{
1801	if (c->allocator_last_stuck &&
1802	    time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
1803		return 0;
1804
1805	return c->opts.allocator_stuck_timeout * HZ;
1806}
1807
1808void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
1809{
1810	unsigned t = allocator_wait_timeout(c);
1811
1812	if (t && closure_sync_timeout(cl, t)) {
1813		c->allocator_last_stuck = jiffies;
1814		bch2_print_allocator_stuck(c);
1815	}
1816
1817	closure_sync(cl);
1818}