Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include "bcachefs.h"
  4#include "btree_locking.h"
  5#include "btree_types.h"
  6
  7static struct lock_class_key bch2_btree_node_lock_key;
  8
  9void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
 10			  enum six_lock_init_flags flags)
 11{
 12	__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
 13	lockdep_set_novalidate_class(&b->lock);
 14}
 15
 16#ifdef CONFIG_LOCKDEP
 17void bch2_assert_btree_nodes_not_locked(void)
 18{
 19#if 0
 20	//Re-enable when lock_class_is_held() is merged:
 21	BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
 22#endif
 23}
 24#endif
 25
 26/* Btree node locking: */
 27
 28struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
 29						  struct btree_path *skip,
 30						  struct btree_bkey_cached_common *b,
 31						  unsigned level)
 32{
 33	struct btree_path *path;
 34	struct six_lock_count ret;
 35	unsigned i;
 36
 37	memset(&ret, 0, sizeof(ret));
 38
 39	if (IS_ERR_OR_NULL(b))
 40		return ret;
 41
 42	trans_for_each_path(trans, path, i)
 43		if (path != skip && &path->l[level].b->c == b) {
 44			int t = btree_node_locked_type(path, level);
 45
 46			if (t != BTREE_NODE_UNLOCKED)
 47				ret.n[t]++;
 48		}
 49
 50	return ret;
 51}
 52
 53/* unlock */
 54
 55void bch2_btree_node_unlock_write(struct btree_trans *trans,
 56			struct btree_path *path, struct btree *b)
 57{
 58	bch2_btree_node_unlock_write_inlined(trans, path, b);
 59}
 60
 61/* lock */
 62
 63/*
 64 * @trans wants to lock @b with type @type
 65 */
 66struct trans_waiting_for_lock {
 67	struct btree_trans		*trans;
 68	struct btree_bkey_cached_common	*node_want;
 69	enum six_lock_type		lock_want;
 70
 71	/* for iterating over held locks :*/
 72	u8				path_idx;
 73	u8				level;
 74	u64				lock_start_time;
 75};
 76
 77struct lock_graph {
 78	struct trans_waiting_for_lock	g[8];
 79	unsigned			nr;
 80};
 81
 82static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
 83{
 84	struct trans_waiting_for_lock *i;
 85
 86	prt_printf(out, "Found lock cycle (%u entries):", g->nr);
 87	prt_newline(out);
 88
 89	for (i = g->g; i < g->g + g->nr; i++) {
 90		struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
 91		if (!task)
 92			continue;
 93
 94		bch2_btree_trans_to_text(out, i->trans);
 95		bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
 96	}
 97}
 98
 99static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
100{
101	struct trans_waiting_for_lock *i;
102
103	for (i = g->g; i != g->g + g->nr; i++) {
104		struct task_struct *task = i->trans->locking_wait.task;
105		if (i != g->g)
106			prt_str(out, "<- ");
107		prt_printf(out, "%u ", task ?task->pid : 0);
108	}
109	prt_newline(out);
110}
111
112static void lock_graph_up(struct lock_graph *g)
113{
114	closure_put(&g->g[--g->nr].trans->ref);
115}
116
117static noinline void lock_graph_pop_all(struct lock_graph *g)
118{
119	while (g->nr)
120		lock_graph_up(g);
121}
122
123static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
124{
125	g->g[g->nr++] = (struct trans_waiting_for_lock) {
126		.trans		= trans,
127		.node_want	= trans->locking,
128		.lock_want	= trans->locking_wait.lock_want,
129	};
130}
131
132static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
133{
134	closure_get(&trans->ref);
135	__lock_graph_down(g, trans);
136}
137
138static bool lock_graph_remove_non_waiters(struct lock_graph *g)
139{
140	struct trans_waiting_for_lock *i;
141
142	for (i = g->g + 1; i < g->g + g->nr; i++)
143		if (i->trans->locking != i->node_want ||
144		    i->trans->locking_wait.start_time != i[-1].lock_start_time) {
145			while (g->g + g->nr > i)
146				lock_graph_up(g);
147			return true;
148		}
149
150	return false;
151}
152
153static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
154{
155	struct bch_fs *c = trans->c;
156
157	count_event(c, trans_restart_would_deadlock);
158
159	if (trace_trans_restart_would_deadlock_enabled()) {
160		struct printbuf buf = PRINTBUF;
161
162		buf.atomic++;
163		print_cycle(&buf, g);
164
165		trace_trans_restart_would_deadlock(trans, buf.buf);
166		printbuf_exit(&buf);
167	}
168}
169
170static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
171{
172	if (i == g->g) {
173		trace_would_deadlock(g, i->trans);
174		return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
175	} else {
176		i->trans->lock_must_abort = true;
177		wake_up_process(i->trans->locking_wait.task);
178		return 0;
179	}
180}
181
182static int btree_trans_abort_preference(struct btree_trans *trans)
183{
184	if (trans->lock_may_not_fail)
185		return 0;
186	if (trans->locking_wait.lock_want == SIX_LOCK_write)
187		return 1;
188	if (!trans->in_traverse_all)
189		return 2;
190	return 3;
191}
192
193static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
194{
195	struct trans_waiting_for_lock *i, *abort = NULL;
196	unsigned best = 0, pref;
197	int ret;
198
199	if (lock_graph_remove_non_waiters(g))
200		return 0;
201
202	/* Only checking, for debugfs: */
203	if (cycle) {
204		print_cycle(cycle, g);
205		ret = -1;
206		goto out;
207	}
208
209	for (i = g->g; i < g->g + g->nr; i++) {
210		pref = btree_trans_abort_preference(i->trans);
211		if (pref > best) {
212			abort = i;
213			best = pref;
214		}
215	}
216
217	if (unlikely(!best)) {
218		struct printbuf buf = PRINTBUF;
219
220		prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
221
222		for (i = g->g; i < g->g + g->nr; i++) {
223			struct btree_trans *trans = i->trans;
224
225			bch2_btree_trans_to_text(&buf, trans);
226
227			prt_printf(&buf, "backtrace:");
228			prt_newline(&buf);
229			printbuf_indent_add(&buf, 2);
230			bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
231			printbuf_indent_sub(&buf, 2);
232			prt_newline(&buf);
233		}
234
235		bch2_print_string_as_lines(KERN_ERR, buf.buf);
236		printbuf_exit(&buf);
237		BUG();
238	}
239
240	ret = abort_lock(g, abort);
241out:
242	if (ret)
243		while (g->nr)
244			lock_graph_up(g);
245	return ret;
246}
247
248static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
249			      struct printbuf *cycle)
250{
251	struct btree_trans *orig_trans = g->g->trans;
252	struct trans_waiting_for_lock *i;
253
254	for (i = g->g; i < g->g + g->nr; i++)
255		if (i->trans == trans) {
256			closure_put(&trans->ref);
257			return break_cycle(g, cycle);
258		}
259
260	if (g->nr == ARRAY_SIZE(g->g)) {
261		closure_put(&trans->ref);
262
263		if (orig_trans->lock_may_not_fail)
264			return 0;
265
266		while (g->nr)
267			lock_graph_up(g);
268
269		if (cycle)
270			return 0;
271
272		trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
273		return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
274	}
275
276	__lock_graph_down(g, trans);
277	return 0;
278}
279
280static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
281{
282	return t1 + t2 > 1;
283}
284
285int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
286{
287	struct lock_graph g;
288	struct trans_waiting_for_lock *top;
289	struct btree_bkey_cached_common *b;
290	btree_path_idx_t path_idx;
291	int ret = 0;
292
293	g.nr = 0;
294
295	if (trans->lock_must_abort) {
296		if (cycle)
297			return -1;
298
299		trace_would_deadlock(&g, trans);
300		return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
301	}
302
303	lock_graph_down(&g, trans);
304
305	/* trans->paths is rcu protected vs. freeing */
306	rcu_read_lock();
307	if (cycle)
308		cycle->atomic++;
309next:
310	if (!g.nr)
311		goto out;
312
313	top = &g.g[g.nr - 1];
314
315	struct btree_path *paths = rcu_dereference(top->trans->paths);
316	if (!paths)
317		goto up;
318
319	unsigned long *paths_allocated = trans_paths_allocated(paths);
320
321	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
322				     path_idx, top->path_idx) {
323		struct btree_path *path = paths + path_idx;
324		if (!path->nodes_locked)
325			continue;
326
327		if (path_idx != top->path_idx) {
328			top->path_idx		= path_idx;
329			top->level		= 0;
330			top->lock_start_time	= 0;
331		}
332
333		for (;
334		     top->level < BTREE_MAX_DEPTH;
335		     top->level++, top->lock_start_time = 0) {
336			int lock_held = btree_node_locked_type(path, top->level);
337
338			if (lock_held == BTREE_NODE_UNLOCKED)
339				continue;
340
341			b = &READ_ONCE(path->l[top->level].b)->c;
342
343			if (IS_ERR_OR_NULL(b)) {
344				/*
345				 * If we get here, it means we raced with the
346				 * other thread updating its btree_path
347				 * structures - which means it can't be blocked
348				 * waiting on a lock:
349				 */
350				if (!lock_graph_remove_non_waiters(&g)) {
351					/*
352					 * If lock_graph_remove_non_waiters()
353					 * didn't do anything, it must be
354					 * because we're being called by debugfs
355					 * checking for lock cycles, which
356					 * invokes us on btree_transactions that
357					 * aren't actually waiting on anything.
358					 * Just bail out:
359					 */
360					lock_graph_pop_all(&g);
361				}
362
363				goto next;
364			}
365
366			if (list_empty_careful(&b->lock.wait_list))
367				continue;
368
369			raw_spin_lock(&b->lock.wait_lock);
370			list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
371				BUG_ON(b != trans->locking);
372
373				if (top->lock_start_time &&
374				    time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
375					continue;
376
377				top->lock_start_time = trans->locking_wait.start_time;
378
379				/* Don't check for self deadlock: */
380				if (trans == top->trans ||
381				    !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
382					continue;
383
384				closure_get(&trans->ref);
385				raw_spin_unlock(&b->lock.wait_lock);
386
387				ret = lock_graph_descend(&g, trans, cycle);
388				if (ret)
389					goto out;
390				goto next;
391
392			}
393			raw_spin_unlock(&b->lock.wait_lock);
394		}
395	}
396up:
397	if (g.nr > 1 && cycle)
398		print_chain(cycle, &g);
399	lock_graph_up(&g);
400	goto next;
401out:
402	if (cycle)
403		--cycle->atomic;
404	rcu_read_unlock();
405	return ret;
406}
407
408int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
409{
410	struct btree_trans *trans = p;
411
412	return bch2_check_for_deadlock(trans, NULL);
413}
414
415int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
416				 struct btree_bkey_cached_common *b,
417				 bool lock_may_not_fail)
418{
419	int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
420	int ret;
421
422	/*
423	 * Must drop our read locks before calling six_lock_write() -
424	 * six_unlock() won't do wakeups until the reader count
425	 * goes to 0, and it's safe because we have the node intent
426	 * locked:
427	 */
428	six_lock_readers_add(&b->lock, -readers);
429	ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
430				       lock_may_not_fail, _RET_IP_);
431	six_lock_readers_add(&b->lock, readers);
432
433	if (ret)
434		mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
435
436	return ret;
437}
438
439void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
440				       struct btree_path *path,
441				       struct btree_bkey_cached_common *b)
442{
443	struct btree_path *linked;
444	unsigned i, iter;
445	int ret;
446
447	/*
448	 * XXX BIG FAT NOTICE
449	 *
450	 * Drop all read locks before taking a write lock:
451	 *
452	 * This is a hack, because bch2_btree_node_lock_write_nofail() is a
453	 * hack - but by dropping read locks first, this should never fail, and
454	 * we only use this in code paths where whatever read locks we've
455	 * already taken are no longer needed:
456	 */
457
458	trans_for_each_path(trans, linked, iter) {
459		if (!linked->nodes_locked)
460			continue;
461
462		for (i = 0; i < BTREE_MAX_DEPTH; i++)
463			if (btree_node_read_locked(linked, i)) {
464				btree_node_unlock(trans, linked, i);
465				btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
466			}
467	}
468
469	ret = __btree_node_lock_write(trans, path, b, true);
470	BUG_ON(ret);
471}
472
473/* relock */
474
475static inline bool btree_path_get_locks(struct btree_trans *trans,
476					struct btree_path *path,
477					bool upgrade,
478					struct get_locks_fail *f)
479{
480	unsigned l = path->level;
481	int fail_idx = -1;
482
483	do {
484		if (!btree_path_node(path, l))
485			break;
486
487		if (!(upgrade
488		      ? bch2_btree_node_upgrade(trans, path, l)
489		      : bch2_btree_node_relock(trans, path, l))) {
490			fail_idx	= l;
491
492			if (f) {
493				f->l	= l;
494				f->b	= path->l[l].b;
495			}
496		}
497
498		l++;
499	} while (l < path->locks_want);
500
501	/*
502	 * When we fail to get a lock, we have to ensure that any child nodes
503	 * can't be relocked so bch2_btree_path_traverse has to walk back up to
504	 * the node that we failed to relock:
505	 */
506	if (fail_idx >= 0) {
507		__bch2_btree_path_unlock(trans, path);
508		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
509
510		do {
511			path->l[fail_idx].b = upgrade
512				? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
513				: ERR_PTR(-BCH_ERR_no_btree_node_relock);
514			--fail_idx;
515		} while (fail_idx >= 0);
516	}
517
518	if (path->uptodate == BTREE_ITER_NEED_RELOCK)
519		path->uptodate = BTREE_ITER_UPTODATE;
520
521	bch2_trans_verify_locks(trans);
522
523	return path->uptodate < BTREE_ITER_NEED_RELOCK;
524}
525
526bool __bch2_btree_node_relock(struct btree_trans *trans,
527			      struct btree_path *path, unsigned level,
528			      bool trace)
529{
530	struct btree *b = btree_path_node(path, level);
531	int want = __btree_lock_want(path, level);
532
533	if (race_fault())
534		goto fail;
535
536	if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
537	    (btree_node_lock_seq_matches(path, b, level) &&
538	     btree_node_lock_increment(trans, &b->c, level, want))) {
539		mark_btree_node_locked(trans, path, level, want);
540		return true;
541	}
542fail:
543	if (trace && !trans->notrace_relock_fail)
544		trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
545	return false;
546}
547
548/* upgrade */
549
550bool bch2_btree_node_upgrade(struct btree_trans *trans,
551			     struct btree_path *path, unsigned level)
552{
553	struct btree *b = path->l[level].b;
554	struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
555
556	if (!is_btree_node(path, level))
557		return false;
558
559	switch (btree_lock_want(path, level)) {
560	case BTREE_NODE_UNLOCKED:
561		BUG_ON(btree_node_locked(path, level));
562		return true;
563	case BTREE_NODE_READ_LOCKED:
564		BUG_ON(btree_node_intent_locked(path, level));
565		return bch2_btree_node_relock(trans, path, level);
566	case BTREE_NODE_INTENT_LOCKED:
567		break;
568	case BTREE_NODE_WRITE_LOCKED:
569		BUG();
570	}
571
572	if (btree_node_intent_locked(path, level))
573		return true;
574
575	if (race_fault())
576		return false;
577
578	if (btree_node_locked(path, level)) {
579		bool ret;
580
581		six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
582		ret = six_lock_tryupgrade(&b->c.lock);
583		six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
584
585		if (ret)
586			goto success;
587	} else {
588		if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
589			goto success;
590	}
591
592	/*
593	 * Do we already have an intent lock via another path? If so, just bump
594	 * lock count:
595	 */
596	if (btree_node_lock_seq_matches(path, b, level) &&
597	    btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
598		btree_node_unlock(trans, path, level);
599		goto success;
600	}
601
602	trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
603	return false;
604success:
605	mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
606	return true;
607}
608
609/* Btree path locking: */
610
611/*
612 * Only for btree_cache.c - only relocks intent locks
613 */
614int bch2_btree_path_relock_intent(struct btree_trans *trans,
615				  struct btree_path *path)
616{
617	unsigned l;
618
619	for (l = path->level;
620	     l < path->locks_want && btree_path_node(path, l);
621	     l++) {
622		if (!bch2_btree_node_relock(trans, path, l)) {
623			__bch2_btree_path_unlock(trans, path);
624			btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
625			trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
626			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
627		}
628	}
629
630	return 0;
631}
632
633__flatten
634bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
635{
636	struct get_locks_fail f;
637
638	return btree_path_get_locks(trans, path, false, &f);
639}
640
641int __bch2_btree_path_relock(struct btree_trans *trans,
642			struct btree_path *path, unsigned long trace_ip)
643{
644	if (!bch2_btree_path_relock_norestart(trans, path)) {
645		trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
646		return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
647	}
648
649	return 0;
650}
651
652bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
653			       struct btree_path *path,
654			       unsigned new_locks_want,
655			       struct get_locks_fail *f)
656{
657	EBUG_ON(path->locks_want >= new_locks_want);
658
659	path->locks_want = new_locks_want;
660
661	return btree_path_get_locks(trans, path, true, f);
662}
663
664bool __bch2_btree_path_upgrade(struct btree_trans *trans,
665			       struct btree_path *path,
666			       unsigned new_locks_want,
667			       struct get_locks_fail *f)
668{
669	if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
670		return true;
671
672	/*
673	 * XXX: this is ugly - we'd prefer to not be mucking with other
674	 * iterators in the btree_trans here.
675	 *
676	 * On failure to upgrade the iterator, setting iter->locks_want and
677	 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
678	 * get the locks we want on transaction restart.
679	 *
680	 * But if this iterator was a clone, on transaction restart what we did
681	 * to this iterator isn't going to be preserved.
682	 *
683	 * Possibly we could add an iterator field for the parent iterator when
684	 * an iterator is a copy - for now, we'll just upgrade any other
685	 * iterators with the same btree id.
686	 *
687	 * The code below used to be needed to ensure ancestor nodes get locked
688	 * before interior nodes - now that's handled by
689	 * bch2_btree_path_traverse_all().
690	 */
691	if (!path->cached && !trans->in_traverse_all) {
692		struct btree_path *linked;
693		unsigned i;
694
695		trans_for_each_path(trans, linked, i)
696			if (linked != path &&
697			    linked->cached == path->cached &&
698			    linked->btree_id == path->btree_id &&
699			    linked->locks_want < new_locks_want) {
700				linked->locks_want = new_locks_want;
701				btree_path_get_locks(trans, linked, true, NULL);
702			}
703	}
704
705	return false;
706}
707
708void __bch2_btree_path_downgrade(struct btree_trans *trans,
709				 struct btree_path *path,
710				 unsigned new_locks_want)
711{
712	unsigned l, old_locks_want = path->locks_want;
713
714	if (trans->restarted)
715		return;
716
717	EBUG_ON(path->locks_want < new_locks_want);
718
719	path->locks_want = new_locks_want;
720
721	while (path->nodes_locked &&
722	       (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
723		if (l > path->level) {
724			btree_node_unlock(trans, path, l);
725		} else {
726			if (btree_node_intent_locked(path, l)) {
727				six_lock_downgrade(&path->l[l].b->c.lock);
728				mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
729			}
730			break;
731		}
732	}
733
734	bch2_btree_path_verify_locks(path);
735
736	trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
737}
738
739/* Btree transaction locking: */
740
741void bch2_trans_downgrade(struct btree_trans *trans)
742{
743	struct btree_path *path;
744	unsigned i;
745
746	if (trans->restarted)
747		return;
748
749	trans_for_each_path(trans, path, i)
750		bch2_btree_path_downgrade(trans, path);
751}
752
753int bch2_trans_relock(struct btree_trans *trans)
754{
755	struct btree_path *path;
756	unsigned i;
757
758	if (unlikely(trans->restarted))
759		return -((int) trans->restarted);
760
761	trans_for_each_path(trans, path, i) {
762		struct get_locks_fail f;
763
764		if (path->should_be_locked &&
765		    !btree_path_get_locks(trans, path, false, &f)) {
766			if (trace_trans_restart_relock_enabled()) {
767				struct printbuf buf = PRINTBUF;
768
769				bch2_bpos_to_text(&buf, path->pos);
770				prt_printf(&buf, " l=%u seq=%u node seq=",
771					   f.l, path->l[f.l].lock_seq);
772				if (IS_ERR_OR_NULL(f.b)) {
773					prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
774				} else {
775					prt_printf(&buf, "%u", f.b->c.lock.seq);
776
777					struct six_lock_count c =
778						bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
779					prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
780
781					c = six_lock_counts(&f.b->c.lock);
782					prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
783				}
784
785				trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
786				printbuf_exit(&buf);
787			}
788
789			count_event(trans->c, trans_restart_relock);
790			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
791		}
792	}
793
794	return 0;
795}
796
797int bch2_trans_relock_notrace(struct btree_trans *trans)
798{
799	struct btree_path *path;
800	unsigned i;
801
802	if (unlikely(trans->restarted))
803		return -((int) trans->restarted);
804
805	trans_for_each_path(trans, path, i)
806		if (path->should_be_locked &&
807		    !bch2_btree_path_relock_norestart(trans, path)) {
808			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
809		}
810	return 0;
811}
812
813void bch2_trans_unlock_noassert(struct btree_trans *trans)
814{
815	struct btree_path *path;
816	unsigned i;
817
818	trans_for_each_path(trans, path, i)
819		__bch2_btree_path_unlock(trans, path);
820}
821
822void bch2_trans_unlock(struct btree_trans *trans)
823{
824	struct btree_path *path;
825	unsigned i;
826
827	trans_for_each_path(trans, path, i)
828		__bch2_btree_path_unlock(trans, path);
829}
830
831void bch2_trans_unlock_long(struct btree_trans *trans)
832{
833	bch2_trans_unlock(trans);
834	bch2_trans_srcu_unlock(trans);
835}
836
837bool bch2_trans_locked(struct btree_trans *trans)
838{
839	struct btree_path *path;
840	unsigned i;
841
842	trans_for_each_path(trans, path, i)
843		if (path->nodes_locked)
844			return true;
845	return false;
846}
847
848int __bch2_trans_mutex_lock(struct btree_trans *trans,
849			    struct mutex *lock)
850{
851	int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
852
853	if (ret)
854		mutex_unlock(lock);
855	return ret;
856}
857
858/* Debug */
859
860#ifdef CONFIG_BCACHEFS_DEBUG
861
862void bch2_btree_path_verify_locks(struct btree_path *path)
863{
864	unsigned l;
865
866	if (!path->nodes_locked) {
867		BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
868		       btree_path_node(path, path->level));
869		return;
870	}
871
872	for (l = 0; l < BTREE_MAX_DEPTH; l++) {
873		int want = btree_lock_want(path, l);
874		int have = btree_node_locked_type(path, l);
875
876		BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
877
878		BUG_ON(is_btree_node(path, l) &&
879		       (want == BTREE_NODE_UNLOCKED ||
880			have != BTREE_NODE_WRITE_LOCKED) &&
881		       want != have);
882	}
883}
884
885void bch2_trans_verify_locks(struct btree_trans *trans)
886{
887	struct btree_path *path;
888	unsigned i;
889
890	trans_for_each_path(trans, path, i)
891		bch2_btree_path_verify_locks(path);
892}
893
894#endif