Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
  4 *
  5 * Uses a block device as cache for other block devices; optimized for SSDs.
  6 * All allocation is done in buckets, which should match the erase block size
  7 * of the device.
  8 *
  9 * Buckets containing cached data are kept on a heap sorted by priority;
 10 * bucket priority is increased on cache hit, and periodically all the buckets
 11 * on the heap have their priority scaled down. This currently is just used as
 12 * an LRU but in the future should allow for more intelligent heuristics.
 13 *
 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
 15 * counter. Garbage collection is used to remove stale pointers.
 16 *
 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
 18 * as keys are inserted we only sort the pages that have not yet been written.
 19 * When garbage collection is run, we resort the entire node.
 20 *
 21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
 22 */
 23
 24#include "bcache.h"
 25#include "btree.h"
 26#include "debug.h"
 27#include "extents.h"
 28#include "writeback.h"
 29
 30static void sort_key_next(struct btree_iter *iter,
 31			  struct btree_iter_set *i)
 32{
 33	i->k = bkey_next(i->k);
 34
 35	if (i->k == i->end)
 36		*i = iter->data[--iter->used];
 37}
 38
 39static bool bch_key_sort_cmp(struct btree_iter_set l,
 40			     struct btree_iter_set r)
 41{
 42	int64_t c = bkey_cmp(l.k, r.k);
 
 
 43
 44	return c ? c > 0 : l.k < r.k;
 45}
 46
 47static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
 48{
 49	unsigned int i;
 50
 51	for (i = 0; i < KEY_PTRS(k); i++)
 52		if (ptr_available(c, k, i)) {
 53			struct cache *ca = c->cache;
 54			size_t bucket = PTR_BUCKET_NR(c, k, i);
 55			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
 56
 57			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
 58			    bucket <  ca->sb.first_bucket ||
 59			    bucket >= ca->sb.nbuckets)
 60				return true;
 61		}
 62
 63	return false;
 64}
 65
 66/* Common among btree and extent ptrs */
 67
 68static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
 69{
 70	unsigned int i;
 71
 72	for (i = 0; i < KEY_PTRS(k); i++)
 73		if (ptr_available(c, k, i)) {
 74			struct cache *ca = c->cache;
 75			size_t bucket = PTR_BUCKET_NR(c, k, i);
 76			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
 77
 78			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
 79				return "bad, length too big";
 80			if (bucket <  ca->sb.first_bucket)
 81				return "bad, short offset";
 82			if (bucket >= ca->sb.nbuckets)
 83				return "bad, offset past end of device";
 84			if (ptr_stale(c, k, i))
 85				return "stale";
 86		}
 87
 88	if (!bkey_cmp(k, &ZERO_KEY))
 89		return "bad, null key";
 90	if (!KEY_PTRS(k))
 91		return "bad, no pointers";
 92	if (!KEY_SIZE(k))
 93		return "zeroed key";
 94	return "";
 95}
 96
 97void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
 98{
 99	unsigned int i = 0;
100	char *out = buf, *end = buf + size;
101
102#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
103
104	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
105
106	for (i = 0; i < KEY_PTRS(k); i++) {
107		if (i)
108			p(", ");
109
110		if (PTR_DEV(k, i) == PTR_CHECK_DEV)
111			p("check dev");
112		else
113			p("%llu:%llu gen %llu", PTR_DEV(k, i),
114			  PTR_OFFSET(k, i), PTR_GEN(k, i));
115	}
116
117	p("]");
118
119	if (KEY_DIRTY(k))
120		p(" dirty");
121	if (KEY_CSUM(k))
122		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
123#undef p
124}
125
126static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
127{
128	struct btree *b = container_of(keys, struct btree, keys);
129	unsigned int j;
130	char buf[80];
131
132	bch_extent_to_text(buf, sizeof(buf), k);
133	pr_cont(" %s", buf);
134
135	for (j = 0; j < KEY_PTRS(k); j++) {
136		size_t n = PTR_BUCKET_NR(b->c, k, j);
137
138		pr_cont(" bucket %zu", n);
139		if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
140			pr_cont(" prio %i",
141				PTR_BUCKET(b->c, k, j)->prio);
142	}
143
144	pr_cont(" %s\n", bch_ptr_status(b->c, k));
145}
146
147/* Btree ptrs */
148
149bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
150{
151	char buf[80];
152
153	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
154		goto bad;
155
156	if (__ptr_invalid(c, k))
157		goto bad;
158
159	return false;
160bad:
161	bch_extent_to_text(buf, sizeof(buf), k);
162	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
163	return true;
164}
165
166static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
167{
168	struct btree *b = container_of(bk, struct btree, keys);
169
170	return __bch_btree_ptr_invalid(b->c, k);
171}
172
173static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
174{
175	unsigned int i;
176	char buf[80];
177	struct bucket *g;
178
179	if (mutex_trylock(&b->c->bucket_lock)) {
180		for (i = 0; i < KEY_PTRS(k); i++)
181			if (ptr_available(b->c, k, i)) {
182				g = PTR_BUCKET(b->c, k, i);
183
184				if (KEY_DIRTY(k) ||
185				    g->prio != BTREE_PRIO ||
186				    (b->c->gc_mark_valid &&
187				     GC_MARK(g) != GC_MARK_METADATA))
188					goto err;
189			}
190
191		mutex_unlock(&b->c->bucket_lock);
192	}
193
194	return false;
195err:
196	mutex_unlock(&b->c->bucket_lock);
197	bch_extent_to_text(buf, sizeof(buf), k);
198	btree_bug(b,
199"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
200		  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
201		  g->prio, g->gen, g->last_gc, GC_MARK(g));
202	return true;
203}
204
205static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
206{
207	struct btree *b = container_of(bk, struct btree, keys);
208	unsigned int i;
209
210	if (!bkey_cmp(k, &ZERO_KEY) ||
211	    !KEY_PTRS(k) ||
212	    bch_ptr_invalid(bk, k))
213		return true;
214
215	for (i = 0; i < KEY_PTRS(k); i++)
216		if (!ptr_available(b->c, k, i) ||
217		    ptr_stale(b->c, k, i))
218			return true;
219
220	if (expensive_debug_checks(b->c) &&
221	    btree_ptr_bad_expensive(b, k))
222		return true;
223
224	return false;
225}
226
227static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
228				       struct bkey *insert,
229				       struct btree_iter *iter,
230				       struct bkey *replace_key)
231{
232	struct btree *b = container_of(bk, struct btree, keys);
233
234	if (!KEY_OFFSET(insert))
235		btree_current_write(b)->prio_blocked++;
236
237	return false;
238}
239
240const struct btree_keys_ops bch_btree_keys_ops = {
241	.sort_cmp	= bch_key_sort_cmp,
242	.insert_fixup	= bch_btree_ptr_insert_fixup,
243	.key_invalid	= bch_btree_ptr_invalid,
244	.key_bad	= bch_btree_ptr_bad,
245	.key_to_text	= bch_extent_to_text,
246	.key_dump	= bch_bkey_dump,
247};
248
249/* Extents */
250
251/*
252 * Returns true if l > r - unless l == r, in which case returns true if l is
253 * older than r.
254 *
255 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
256 * equal in different sets, we have to process them newest to oldest.
257 */
258static bool bch_extent_sort_cmp(struct btree_iter_set l,
259				struct btree_iter_set r)
260{
261	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
 
 
262
263	return c ? c > 0 : l.k < r.k;
264}
265
266static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
267					  struct bkey *tmp)
268{
269	while (iter->used > 1) {
270		struct btree_iter_set *top = iter->data, *i = top + 1;
 
 
 
 
271
272		if (iter->used > 2 &&
273		    bch_extent_sort_cmp(i[0], i[1]))
274			i++;
275
276		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
277			break;
278
279		if (!KEY_SIZE(i->k)) {
280			sort_key_next(iter, i);
281			heap_sift(iter, i - top, bch_extent_sort_cmp);
282			continue;
283		}
284
285		if (top->k > i->k) {
286			if (bkey_cmp(top->k, i->k) >= 0)
287				sort_key_next(iter, i);
288			else
289				bch_cut_front(top->k, i->k);
290
291			heap_sift(iter, i - top, bch_extent_sort_cmp);
292		} else {
293			/* can't happen because of comparison func */
294			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
295
296			if (bkey_cmp(i->k, top->k) < 0) {
297				bkey_copy(tmp, top->k);
298
299				bch_cut_back(&START_KEY(i->k), tmp);
300				bch_cut_front(i->k, top->k);
301				heap_sift(iter, 0, bch_extent_sort_cmp);
302
303				return tmp;
304			} else {
305				bch_cut_back(&START_KEY(i->k), top->k);
306			}
307		}
308	}
309
310	return NULL;
311}
312
313static void bch_subtract_dirty(struct bkey *k,
314			   struct cache_set *c,
315			   uint64_t offset,
316			   int sectors)
317{
318	if (KEY_DIRTY(k))
319		bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
320					     offset, -sectors);
321}
322
323static bool bch_extent_insert_fixup(struct btree_keys *b,
324				    struct bkey *insert,
325				    struct btree_iter *iter,
326				    struct bkey *replace_key)
327{
328	struct cache_set *c = container_of(b, struct btree, keys)->c;
329
330	uint64_t old_offset;
331	unsigned int old_size, sectors_found = 0;
332
333	BUG_ON(!KEY_OFFSET(insert));
334	BUG_ON(!KEY_SIZE(insert));
335
336	while (1) {
337		struct bkey *k = bch_btree_iter_next(iter);
338
339		if (!k)
340			break;
341
342		if (bkey_cmp(&START_KEY(k), insert) >= 0) {
343			if (KEY_SIZE(k))
344				break;
345			else
346				continue;
347		}
348
349		if (bkey_cmp(k, &START_KEY(insert)) <= 0)
350			continue;
351
352		old_offset = KEY_START(k);
353		old_size = KEY_SIZE(k);
354
355		/*
356		 * We might overlap with 0 size extents; we can't skip these
357		 * because if they're in the set we're inserting to we have to
358		 * adjust them so they don't overlap with the key we're
359		 * inserting. But we don't want to check them for replace
360		 * operations.
361		 */
362
363		if (replace_key && KEY_SIZE(k)) {
364			/*
365			 * k might have been split since we inserted/found the
366			 * key we're replacing
367			 */
368			unsigned int i;
369			uint64_t offset = KEY_START(k) -
370				KEY_START(replace_key);
371
372			/* But it must be a subset of the replace key */
373			if (KEY_START(k) < KEY_START(replace_key) ||
374			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
375				goto check_failed;
376
377			/* We didn't find a key that we were supposed to */
378			if (KEY_START(k) > KEY_START(insert) + sectors_found)
379				goto check_failed;
380
381			if (!bch_bkey_equal_header(k, replace_key))
382				goto check_failed;
383
384			/* skip past gen */
385			offset <<= 8;
386
387			BUG_ON(!KEY_PTRS(replace_key));
388
389			for (i = 0; i < KEY_PTRS(replace_key); i++)
390				if (k->ptr[i] != replace_key->ptr[i] + offset)
391					goto check_failed;
392
393			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
394		}
395
396		if (bkey_cmp(insert, k) < 0 &&
397		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
398			/*
399			 * We overlapped in the middle of an existing key: that
400			 * means we have to split the old key. But we have to do
401			 * slightly different things depending on whether the
402			 * old key has been written out yet.
403			 */
404
405			struct bkey *top;
406
407			bch_subtract_dirty(k, c, KEY_START(insert),
408				       KEY_SIZE(insert));
409
410			if (bkey_written(b, k)) {
411				/*
412				 * We insert a new key to cover the top of the
413				 * old key, and the old key is modified in place
414				 * to represent the bottom split.
415				 *
416				 * It's completely arbitrary whether the new key
417				 * is the top or the bottom, but it has to match
418				 * up with what btree_sort_fixup() does - it
419				 * doesn't check for this kind of overlap, it
420				 * depends on us inserting a new key for the top
421				 * here.
422				 */
423				top = bch_bset_search(b, bset_tree_last(b),
424						      insert);
425				bch_bset_insert(b, top, k);
426			} else {
427				BKEY_PADDED(key) temp;
428				bkey_copy(&temp.key, k);
429				bch_bset_insert(b, k, &temp.key);
430				top = bkey_next(k);
431			}
432
433			bch_cut_front(insert, top);
434			bch_cut_back(&START_KEY(insert), k);
435			bch_bset_fix_invalidated_key(b, k);
436			goto out;
437		}
438
439		if (bkey_cmp(insert, k) < 0) {
440			bch_cut_front(insert, k);
441		} else {
442			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
443				old_offset = KEY_START(insert);
444
445			if (bkey_written(b, k) &&
446			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
447				/*
448				 * Completely overwrote, so we don't have to
449				 * invalidate the binary search tree
450				 */
451				bch_cut_front(k, k);
452			} else {
453				__bch_cut_back(&START_KEY(insert), k);
454				bch_bset_fix_invalidated_key(b, k);
455			}
456		}
457
458		bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
459	}
460
461check_failed:
462	if (replace_key) {
463		if (!sectors_found) {
464			return true;
465		} else if (sectors_found < KEY_SIZE(insert)) {
466			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
467				       (KEY_SIZE(insert) - sectors_found));
468			SET_KEY_SIZE(insert, sectors_found);
469		}
470	}
471out:
472	if (KEY_DIRTY(insert))
473		bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
474					     KEY_START(insert),
475					     KEY_SIZE(insert));
476
477	return false;
478}
479
480bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
481{
482	char buf[80];
483
484	if (!KEY_SIZE(k))
485		return true;
486
487	if (KEY_SIZE(k) > KEY_OFFSET(k))
488		goto bad;
489
490	if (__ptr_invalid(c, k))
491		goto bad;
492
493	return false;
494bad:
495	bch_extent_to_text(buf, sizeof(buf), k);
496	cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
497	return true;
498}
499
500static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
501{
502	struct btree *b = container_of(bk, struct btree, keys);
503
504	return __bch_extent_invalid(b->c, k);
505}
506
507static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
508				     unsigned int ptr)
509{
510	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
511	char buf[80];
512
513	if (mutex_trylock(&b->c->bucket_lock)) {
514		if (b->c->gc_mark_valid &&
515		    (!GC_MARK(g) ||
516		     GC_MARK(g) == GC_MARK_METADATA ||
517		     (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
518			goto err;
519
520		if (g->prio == BTREE_PRIO)
521			goto err;
522
523		mutex_unlock(&b->c->bucket_lock);
524	}
525
526	return false;
527err:
528	mutex_unlock(&b->c->bucket_lock);
529	bch_extent_to_text(buf, sizeof(buf), k);
530	btree_bug(b,
531"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
532		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
533		  g->prio, g->gen, g->last_gc, GC_MARK(g));
534	return true;
535}
536
537static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
538{
539	struct btree *b = container_of(bk, struct btree, keys);
540	unsigned int i, stale;
541	char buf[80];
542
543	if (!KEY_PTRS(k) ||
544	    bch_extent_invalid(bk, k))
545		return true;
546
547	for (i = 0; i < KEY_PTRS(k); i++)
548		if (!ptr_available(b->c, k, i))
549			return true;
550
551	for (i = 0; i < KEY_PTRS(k); i++) {
552		stale = ptr_stale(b->c, k, i);
553
554		if (stale && KEY_DIRTY(k)) {
555			bch_extent_to_text(buf, sizeof(buf), k);
556			pr_info("stale dirty pointer, stale %u, key: %s\n",
557				stale, buf);
558		}
559
560		btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
561			     "key too stale: %i, need_gc %u",
562			     stale, b->c->need_gc);
563
564		if (stale)
565			return true;
566
567		if (expensive_debug_checks(b->c) &&
568		    bch_extent_bad_expensive(b, k, i))
569			return true;
570	}
571
572	return false;
573}
574
575static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
576{
577	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
578		~((uint64_t)1 << 63);
579}
580
581static bool bch_extent_merge(struct btree_keys *bk,
582			     struct bkey *l,
583			     struct bkey *r)
584{
585	struct btree *b = container_of(bk, struct btree, keys);
586	unsigned int i;
587
588	if (key_merging_disabled(b->c))
589		return false;
590
591	for (i = 0; i < KEY_PTRS(l); i++)
592		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
593		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
594			return false;
595
596	/* Keys with no pointers aren't restricted to one bucket and could
597	 * overflow KEY_SIZE
598	 */
599	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
600		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
601		SET_KEY_SIZE(l, USHRT_MAX);
602
603		bch_cut_front(l, r);
604		return false;
605	}
606
607	if (KEY_CSUM(l)) {
608		if (KEY_CSUM(r))
609			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
610		else
611			SET_KEY_CSUM(l, 0);
612	}
613
614	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
615	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
616
617	return true;
618}
619
620const struct btree_keys_ops bch_extent_keys_ops = {
621	.sort_cmp	= bch_extent_sort_cmp,
622	.sort_fixup	= bch_extent_sort_fixup,
623	.insert_fixup	= bch_extent_insert_fixup,
624	.key_invalid	= bch_extent_invalid,
625	.key_bad	= bch_extent_bad,
626	.key_merge	= bch_extent_merge,
627	.key_to_text	= bch_extent_to_text,
628	.key_dump	= bch_bkey_dump,
629	.is_extents	= true,
630};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
  4 *
  5 * Uses a block device as cache for other block devices; optimized for SSDs.
  6 * All allocation is done in buckets, which should match the erase block size
  7 * of the device.
  8 *
  9 * Buckets containing cached data are kept on a heap sorted by priority;
 10 * bucket priority is increased on cache hit, and periodically all the buckets
 11 * on the heap have their priority scaled down. This currently is just used as
 12 * an LRU but in the future should allow for more intelligent heuristics.
 13 *
 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
 15 * counter. Garbage collection is used to remove stale pointers.
 16 *
 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
 18 * as keys are inserted we only sort the pages that have not yet been written.
 19 * When garbage collection is run, we resort the entire node.
 20 *
 21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
 22 */
 23
 24#include "bcache.h"
 25#include "btree.h"
 26#include "debug.h"
 27#include "extents.h"
 28#include "writeback.h"
 29
 30static void sort_key_next(struct btree_iter *iter,
 31			  struct btree_iter_set *i)
 32{
 33	i->k = bkey_next(i->k);
 34
 35	if (i->k == i->end)
 36		*i = iter->heap.data[--iter->heap.nr];
 37}
 38
 39static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
 
 40{
 41	struct btree_iter_set *_l = (struct btree_iter_set *)l;
 42	struct btree_iter_set *_r = (struct btree_iter_set *)r;
 43	int64_t c = bkey_cmp(_l->k, _r->k);
 44
 45	return !(c ? c > 0 : _l->k < _r->k);
 46}
 47
 48static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
 49{
 50	unsigned int i;
 51
 52	for (i = 0; i < KEY_PTRS(k); i++)
 53		if (ptr_available(c, k, i)) {
 54			struct cache *ca = c->cache;
 55			size_t bucket = PTR_BUCKET_NR(c, k, i);
 56			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
 57
 58			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
 59			    bucket <  ca->sb.first_bucket ||
 60			    bucket >= ca->sb.nbuckets)
 61				return true;
 62		}
 63
 64	return false;
 65}
 66
 67/* Common among btree and extent ptrs */
 68
 69static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
 70{
 71	unsigned int i;
 72
 73	for (i = 0; i < KEY_PTRS(k); i++)
 74		if (ptr_available(c, k, i)) {
 75			struct cache *ca = c->cache;
 76			size_t bucket = PTR_BUCKET_NR(c, k, i);
 77			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
 78
 79			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
 80				return "bad, length too big";
 81			if (bucket <  ca->sb.first_bucket)
 82				return "bad, short offset";
 83			if (bucket >= ca->sb.nbuckets)
 84				return "bad, offset past end of device";
 85			if (ptr_stale(c, k, i))
 86				return "stale";
 87		}
 88
 89	if (!bkey_cmp(k, &ZERO_KEY))
 90		return "bad, null key";
 91	if (!KEY_PTRS(k))
 92		return "bad, no pointers";
 93	if (!KEY_SIZE(k))
 94		return "zeroed key";
 95	return "";
 96}
 97
 98void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
 99{
100	unsigned int i = 0;
101	char *out = buf, *end = buf + size;
102
103#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
104
105	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
106
107	for (i = 0; i < KEY_PTRS(k); i++) {
108		if (i)
109			p(", ");
110
111		if (PTR_DEV(k, i) == PTR_CHECK_DEV)
112			p("check dev");
113		else
114			p("%llu:%llu gen %llu", PTR_DEV(k, i),
115			  PTR_OFFSET(k, i), PTR_GEN(k, i));
116	}
117
118	p("]");
119
120	if (KEY_DIRTY(k))
121		p(" dirty");
122	if (KEY_CSUM(k))
123		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
124#undef p
125}
126
127static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
128{
129	struct btree *b = container_of(keys, struct btree, keys);
130	unsigned int j;
131	char buf[80];
132
133	bch_extent_to_text(buf, sizeof(buf), k);
134	pr_cont(" %s", buf);
135
136	for (j = 0; j < KEY_PTRS(k); j++) {
137		size_t n = PTR_BUCKET_NR(b->c, k, j);
138
139		pr_cont(" bucket %zu", n);
140		if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
141			pr_cont(" prio %i",
142				PTR_BUCKET(b->c, k, j)->prio);
143	}
144
145	pr_cont(" %s\n", bch_ptr_status(b->c, k));
146}
147
148/* Btree ptrs */
149
150bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
151{
152	char buf[80];
153
154	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
155		goto bad;
156
157	if (__ptr_invalid(c, k))
158		goto bad;
159
160	return false;
161bad:
162	bch_extent_to_text(buf, sizeof(buf), k);
163	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
164	return true;
165}
166
167static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
168{
169	struct btree *b = container_of(bk, struct btree, keys);
170
171	return __bch_btree_ptr_invalid(b->c, k);
172}
173
174static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
175{
176	unsigned int i;
177	char buf[80];
178	struct bucket *g;
179
180	if (mutex_trylock(&b->c->bucket_lock)) {
181		for (i = 0; i < KEY_PTRS(k); i++)
182			if (ptr_available(b->c, k, i)) {
183				g = PTR_BUCKET(b->c, k, i);
184
185				if (KEY_DIRTY(k) ||
186				    g->prio != BTREE_PRIO ||
187				    (b->c->gc_mark_valid &&
188				     GC_MARK(g) != GC_MARK_METADATA))
189					goto err;
190			}
191
192		mutex_unlock(&b->c->bucket_lock);
193	}
194
195	return false;
196err:
197	mutex_unlock(&b->c->bucket_lock);
198	bch_extent_to_text(buf, sizeof(buf), k);
199	btree_bug(b,
200"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
201		  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
202		  g->prio, g->gen, g->last_gc, GC_MARK(g));
203	return true;
204}
205
206static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
207{
208	struct btree *b = container_of(bk, struct btree, keys);
209	unsigned int i;
210
211	if (!bkey_cmp(k, &ZERO_KEY) ||
212	    !KEY_PTRS(k) ||
213	    bch_ptr_invalid(bk, k))
214		return true;
215
216	for (i = 0; i < KEY_PTRS(k); i++)
217		if (!ptr_available(b->c, k, i) ||
218		    ptr_stale(b->c, k, i))
219			return true;
220
221	if (expensive_debug_checks(b->c) &&
222	    btree_ptr_bad_expensive(b, k))
223		return true;
224
225	return false;
226}
227
228static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
229				       struct bkey *insert,
230				       struct btree_iter *iter,
231				       struct bkey *replace_key)
232{
233	struct btree *b = container_of(bk, struct btree, keys);
234
235	if (!KEY_OFFSET(insert))
236		btree_current_write(b)->prio_blocked++;
237
238	return false;
239}
240
241const struct btree_keys_ops bch_btree_keys_ops = {
242	.sort_cmp	= new_bch_key_sort_cmp,
243	.insert_fixup	= bch_btree_ptr_insert_fixup,
244	.key_invalid	= bch_btree_ptr_invalid,
245	.key_bad	= bch_btree_ptr_bad,
246	.key_to_text	= bch_extent_to_text,
247	.key_dump	= bch_bkey_dump,
248};
249
250/* Extents */
251
252/*
253 * Returns true if l > r - unless l == r, in which case returns true if l is
254 * older than r.
255 *
256 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
257 * equal in different sets, we have to process them newest to oldest.
258 */
259
260static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
261{
262	struct btree_iter_set *_l = (struct btree_iter_set *)l;
263	struct btree_iter_set *_r = (struct btree_iter_set *)r;
264	int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
265
266	return !(c ? c > 0 : _l->k < _r->k);
267}
268
269static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
270					  struct bkey *tmp)
271{
272	const struct min_heap_callbacks callbacks = {
273		.less = new_bch_extent_sort_cmp,
274		.swp = NULL,
275	};
276	while (iter->heap.nr > 1) {
277		struct btree_iter_set *top = iter->heap.data, *i = top + 1;
278
279		if (iter->heap.nr > 2 &&
280		    !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
281			i++;
282
283		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
284			break;
285
286		if (!KEY_SIZE(i->k)) {
287			sort_key_next(iter, i);
288			min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
289			continue;
290		}
291
292		if (top->k > i->k) {
293			if (bkey_cmp(top->k, i->k) >= 0)
294				sort_key_next(iter, i);
295			else
296				bch_cut_front(top->k, i->k);
297
298			min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
299		} else {
300			/* can't happen because of comparison func */
301			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
302
303			if (bkey_cmp(i->k, top->k) < 0) {
304				bkey_copy(tmp, top->k);
305
306				bch_cut_back(&START_KEY(i->k), tmp);
307				bch_cut_front(i->k, top->k);
308				min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
309
310				return tmp;
311			} else {
312				bch_cut_back(&START_KEY(i->k), top->k);
313			}
314		}
315	}
316
317	return NULL;
318}
319
320static void bch_subtract_dirty(struct bkey *k,
321			   struct cache_set *c,
322			   uint64_t offset,
323			   int sectors)
324{
325	if (KEY_DIRTY(k))
326		bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
327					     offset, -sectors);
328}
329
330static bool bch_extent_insert_fixup(struct btree_keys *b,
331				    struct bkey *insert,
332				    struct btree_iter *iter,
333				    struct bkey *replace_key)
334{
335	struct cache_set *c = container_of(b, struct btree, keys)->c;
336
337	uint64_t old_offset;
338	unsigned int old_size, sectors_found = 0;
339
340	BUG_ON(!KEY_OFFSET(insert));
341	BUG_ON(!KEY_SIZE(insert));
342
343	while (1) {
344		struct bkey *k = bch_btree_iter_next(iter);
345
346		if (!k)
347			break;
348
349		if (bkey_cmp(&START_KEY(k), insert) >= 0) {
350			if (KEY_SIZE(k))
351				break;
352			else
353				continue;
354		}
355
356		if (bkey_cmp(k, &START_KEY(insert)) <= 0)
357			continue;
358
359		old_offset = KEY_START(k);
360		old_size = KEY_SIZE(k);
361
362		/*
363		 * We might overlap with 0 size extents; we can't skip these
364		 * because if they're in the set we're inserting to we have to
365		 * adjust them so they don't overlap with the key we're
366		 * inserting. But we don't want to check them for replace
367		 * operations.
368		 */
369
370		if (replace_key && KEY_SIZE(k)) {
371			/*
372			 * k might have been split since we inserted/found the
373			 * key we're replacing
374			 */
375			unsigned int i;
376			uint64_t offset = KEY_START(k) -
377				KEY_START(replace_key);
378
379			/* But it must be a subset of the replace key */
380			if (KEY_START(k) < KEY_START(replace_key) ||
381			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
382				goto check_failed;
383
384			/* We didn't find a key that we were supposed to */
385			if (KEY_START(k) > KEY_START(insert) + sectors_found)
386				goto check_failed;
387
388			if (!bch_bkey_equal_header(k, replace_key))
389				goto check_failed;
390
391			/* skip past gen */
392			offset <<= 8;
393
394			BUG_ON(!KEY_PTRS(replace_key));
395
396			for (i = 0; i < KEY_PTRS(replace_key); i++)
397				if (k->ptr[i] != replace_key->ptr[i] + offset)
398					goto check_failed;
399
400			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
401		}
402
403		if (bkey_cmp(insert, k) < 0 &&
404		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
405			/*
406			 * We overlapped in the middle of an existing key: that
407			 * means we have to split the old key. But we have to do
408			 * slightly different things depending on whether the
409			 * old key has been written out yet.
410			 */
411
412			struct bkey *top;
413
414			bch_subtract_dirty(k, c, KEY_START(insert),
415				       KEY_SIZE(insert));
416
417			if (bkey_written(b, k)) {
418				/*
419				 * We insert a new key to cover the top of the
420				 * old key, and the old key is modified in place
421				 * to represent the bottom split.
422				 *
423				 * It's completely arbitrary whether the new key
424				 * is the top or the bottom, but it has to match
425				 * up with what btree_sort_fixup() does - it
426				 * doesn't check for this kind of overlap, it
427				 * depends on us inserting a new key for the top
428				 * here.
429				 */
430				top = bch_bset_search(b, bset_tree_last(b),
431						      insert);
432				bch_bset_insert(b, top, k);
433			} else {
434				BKEY_PADDED(key) temp;
435				bkey_copy(&temp.key, k);
436				bch_bset_insert(b, k, &temp.key);
437				top = bkey_next(k);
438			}
439
440			bch_cut_front(insert, top);
441			bch_cut_back(&START_KEY(insert), k);
442			bch_bset_fix_invalidated_key(b, k);
443			goto out;
444		}
445
446		if (bkey_cmp(insert, k) < 0) {
447			bch_cut_front(insert, k);
448		} else {
449			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
450				old_offset = KEY_START(insert);
451
452			if (bkey_written(b, k) &&
453			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
454				/*
455				 * Completely overwrote, so we don't have to
456				 * invalidate the binary search tree
457				 */
458				bch_cut_front(k, k);
459			} else {
460				__bch_cut_back(&START_KEY(insert), k);
461				bch_bset_fix_invalidated_key(b, k);
462			}
463		}
464
465		bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
466	}
467
468check_failed:
469	if (replace_key) {
470		if (!sectors_found) {
471			return true;
472		} else if (sectors_found < KEY_SIZE(insert)) {
473			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
474				       (KEY_SIZE(insert) - sectors_found));
475			SET_KEY_SIZE(insert, sectors_found);
476		}
477	}
478out:
479	if (KEY_DIRTY(insert))
480		bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
481					     KEY_START(insert),
482					     KEY_SIZE(insert));
483
484	return false;
485}
486
487bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
488{
489	char buf[80];
490
491	if (!KEY_SIZE(k))
492		return true;
493
494	if (KEY_SIZE(k) > KEY_OFFSET(k))
495		goto bad;
496
497	if (__ptr_invalid(c, k))
498		goto bad;
499
500	return false;
501bad:
502	bch_extent_to_text(buf, sizeof(buf), k);
503	cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
504	return true;
505}
506
507static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
508{
509	struct btree *b = container_of(bk, struct btree, keys);
510
511	return __bch_extent_invalid(b->c, k);
512}
513
514static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
515				     unsigned int ptr)
516{
517	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
518	char buf[80];
519
520	if (mutex_trylock(&b->c->bucket_lock)) {
521		if (b->c->gc_mark_valid &&
522		    (!GC_MARK(g) ||
523		     GC_MARK(g) == GC_MARK_METADATA ||
524		     (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
525			goto err;
526
527		if (g->prio == BTREE_PRIO)
528			goto err;
529
530		mutex_unlock(&b->c->bucket_lock);
531	}
532
533	return false;
534err:
535	mutex_unlock(&b->c->bucket_lock);
536	bch_extent_to_text(buf, sizeof(buf), k);
537	btree_bug(b,
538"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
539		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
540		  g->prio, g->gen, g->last_gc, GC_MARK(g));
541	return true;
542}
543
544static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
545{
546	struct btree *b = container_of(bk, struct btree, keys);
547	unsigned int i, stale;
548	char buf[80];
549
550	if (!KEY_PTRS(k) ||
551	    bch_extent_invalid(bk, k))
552		return true;
553
554	for (i = 0; i < KEY_PTRS(k); i++)
555		if (!ptr_available(b->c, k, i))
556			return true;
557
558	for (i = 0; i < KEY_PTRS(k); i++) {
559		stale = ptr_stale(b->c, k, i);
560
561		if (stale && KEY_DIRTY(k)) {
562			bch_extent_to_text(buf, sizeof(buf), k);
563			pr_info("stale dirty pointer, stale %u, key: %s\n",
564				stale, buf);
565		}
566
567		btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
568			     "key too stale: %i, need_gc %u",
569			     stale, b->c->need_gc);
570
571		if (stale)
572			return true;
573
574		if (expensive_debug_checks(b->c) &&
575		    bch_extent_bad_expensive(b, k, i))
576			return true;
577	}
578
579	return false;
580}
581
582static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
583{
584	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
585		~((uint64_t)1 << 63);
586}
587
588static bool bch_extent_merge(struct btree_keys *bk,
589			     struct bkey *l,
590			     struct bkey *r)
591{
592	struct btree *b = container_of(bk, struct btree, keys);
593	unsigned int i;
594
595	if (key_merging_disabled(b->c))
596		return false;
597
598	for (i = 0; i < KEY_PTRS(l); i++)
599		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
600		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
601			return false;
602
603	/* Keys with no pointers aren't restricted to one bucket and could
604	 * overflow KEY_SIZE
605	 */
606	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
607		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
608		SET_KEY_SIZE(l, USHRT_MAX);
609
610		bch_cut_front(l, r);
611		return false;
612	}
613
614	if (KEY_CSUM(l)) {
615		if (KEY_CSUM(r))
616			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
617		else
618			SET_KEY_CSUM(l, 0);
619	}
620
621	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
622	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
623
624	return true;
625}
626
627const struct btree_keys_ops bch_extent_keys_ops = {
628	.sort_cmp	= new_bch_extent_sort_cmp,
629	.sort_fixup	= bch_extent_sort_fixup,
630	.insert_fixup	= bch_extent_insert_fixup,
631	.key_invalid	= bch_extent_invalid,
632	.key_bad	= bch_extent_bad,
633	.key_merge	= bch_extent_merge,
634	.key_to_text	= bch_extent_to_text,
635	.key_dump	= bch_bkey_dump,
636	.is_extents	= true,
637};