Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BCACHE_H
   3#define _BCACHE_H
   4
   5/*
   6 * SOME HIGH LEVEL CODE DOCUMENTATION:
   7 *
   8 * Bcache mostly works with cache sets, cache devices, and backing devices.
   9 *
  10 * Support for multiple cache devices hasn't quite been finished off yet, but
  11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  12 * like a md raid array and its component devices. Most of the code doesn't care
  13 * about individual cache devices, the main abstraction is the cache set.
  14 *
  15 * Multiple cache devices is intended to give us the ability to mirror dirty
  16 * cached data and metadata, without mirroring clean cached data.
  17 *
  18 * Backing devices are different, in that they have a lifetime independent of a
  19 * cache set. When you register a newly formatted backing device it'll come up
  20 * in passthrough mode, and then you can attach and detach a backing device from
  21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  22 * invalidates any cached data for that backing device.
  23 *
  24 * A cache set can have multiple (many) backing devices attached to it.
  25 *
  26 * There's also flash only volumes - this is the reason for the distinction
  27 * between struct cached_dev and struct bcache_device. A flash only volume
  28 * works much like a bcache device that has a backing device, except the
  29 * "cached" data is always dirty. The end result is that we get thin
  30 * provisioning with very little additional code.
  31 *
  32 * Flash only volumes work but they're not production ready because the moving
  33 * garbage collector needs more work. More on that later.
  34 *
  35 * BUCKETS/ALLOCATION:
  36 *
  37 * Bcache is primarily designed for caching, which means that in normal
  38 * operation all of our available space will be allocated. Thus, we need an
  39 * efficient way of deleting things from the cache so we can write new things to
  40 * it.
  41 *
  42 * To do this, we first divide the cache device up into buckets. A bucket is the
  43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  44 * works efficiently.
  45 *
  46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  47 * it. The gens and priorities for all the buckets are stored contiguously and
  48 * packed on disk (in a linked list of buckets - aside from the superblock, all
  49 * of bcache's metadata is stored in buckets).
  50 *
  51 * The priority is used to implement an LRU. We reset a bucket's priority when
  52 * we allocate it or on cache it, and every so often we decrement the priority
  53 * of each bucket. It could be used to implement something more sophisticated,
  54 * if anyone ever gets around to it.
  55 *
  56 * The generation is used for invalidating buckets. Each pointer also has an 8
  57 * bit generation embedded in it; for a pointer to be considered valid, its gen
  58 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  59 * we have to do is increment its gen (and write its new gen to disk; we batch
  60 * this up).
  61 *
  62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  63 * contain metadata (including btree nodes).
  64 *
  65 * THE BTREE:
  66 *
  67 * Bcache is in large part design around the btree.
  68 *
  69 * At a high level, the btree is just an index of key -> ptr tuples.
  70 *
  71 * Keys represent extents, and thus have a size field. Keys also have a variable
  72 * number of pointers attached to them (potentially zero, which is handy for
  73 * invalidating the cache).
  74 *
  75 * The key itself is an inode:offset pair. The inode number corresponds to a
  76 * backing device or a flash only volume. The offset is the ending offset of the
  77 * extent within the inode - not the starting offset; this makes lookups
  78 * slightly more convenient.
  79 *
  80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  81 * generation number. More on the gen later.
  82 *
  83 * Index lookups are not fully abstracted - cache lookups in particular are
  84 * still somewhat mixed in with the btree code, but things are headed in that
  85 * direction.
  86 *
  87 * Updates are fairly well abstracted, though. There are two different ways of
  88 * updating the btree; insert and replace.
  89 *
  90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  91 * overwriting (possibly only partially) any extents they overlap with. This is
  92 * used to update the index after a write.
  93 *
  94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  95 * overwriting a key that matches another given key. This is used for inserting
  96 * data into the cache after a cache miss, and for background writeback, and for
  97 * the moving garbage collector.
  98 *
  99 * There is no "delete" operation; deleting things from the index is
 100 * accomplished by either by invalidating pointers (by incrementing a bucket's
 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 102 * previously present at that location in the index.
 103 *
 104 * This means that there are always stale/invalid keys in the btree. They're
 105 * filtered out by the code that iterates through a btree node, and removed when
 106 * a btree node is rewritten.
 107 *
 108 * BTREE NODES:
 109 *
 110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
 111 * free smaller than a bucket - so, that's how big our btree nodes are.
 112 *
 113 * (If buckets are really big we'll only use part of the bucket for a btree node
 114 * - no less than 1/4th - but a bucket still contains no more than a single
 115 * btree node. I'd actually like to change this, but for now we rely on the
 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 117 *
 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 119 * btree implementation.
 120 *
 121 * The way this is solved is that btree nodes are internally log structured; we
 122 * can append new keys to an existing btree node without rewriting it. This
 123 * means each set of keys we write is sorted, but the node is not.
 124 *
 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 126 * be expensive, and we have to distinguish between the keys we have written and
 127 * the keys we haven't. So to do a lookup in a btree node, we have to search
 128 * each sorted set. But we do merge written sets together lazily, so the cost of
 129 * these extra searches is quite low (normally most of the keys in a btree node
 130 * will be in one big set, and then there'll be one or two sets that are much
 131 * smaller).
 132 *
 133 * This log structure makes bcache's btree more of a hybrid between a
 134 * conventional btree and a compacting data structure, with some of the
 135 * advantages of both.
 136 *
 137 * GARBAGE COLLECTION:
 138 *
 139 * We can't just invalidate any bucket - it might contain dirty data or
 140 * metadata. If it once contained dirty data, other writes might overwrite it
 141 * later, leaving no valid pointers into that bucket in the index.
 142 *
 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 144 * It also counts how much valid data it each bucket currently contains, so that
 145 * allocation can reuse buckets sooner when they've been mostly overwritten.
 146 *
 147 * It also does some things that are really internal to the btree
 148 * implementation. If a btree node contains pointers that are stale by more than
 149 * some threshold, it rewrites the btree node to avoid the bucket's generation
 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 151 *
 152 * THE JOURNAL:
 153 *
 154 * Bcache's journal is not necessary for consistency; we always strictly
 155 * order metadata writes so that the btree and everything else is consistent on
 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 157 * caching (with recovery from unclean shutdown) before journalling was
 158 * implemented.
 159 *
 160 * Rather, the journal is purely a performance optimization; we can't complete a
 161 * write until we've updated the index on disk, otherwise the cache would be
 162 * inconsistent in the event of an unclean shutdown. This means that without the
 163 * journal, on random write workloads we constantly have to update all the leaf
 164 * nodes in the btree, and those writes will be mostly empty (appending at most
 165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 166 * and it puts more strain on the various btree resorting/compacting code.
 167 *
 168 * The journal is just a log of keys we've inserted; on startup we just reinsert
 169 * all the keys in the open journal entries. That means that when we're updating
 170 * a node in the btree, we can wait until a 4k block of keys fills up before
 171 * writing them out.
 172 *
 173 * For simplicity, we only journal updates to leaf nodes; updates to parent
 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 175 * the complexity to deal with journalling them (in particular, journal replay)
 176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 177 */
 178
 179#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
 180
 181#include <linux/bio.h>
 182#include <linux/closure.h>
 183#include <linux/kobject.h>
 184#include <linux/list.h>
 185#include <linux/mutex.h>
 186#include <linux/rbtree.h>
 187#include <linux/rwsem.h>
 188#include <linux/refcount.h>
 189#include <linux/types.h>
 190#include <linux/workqueue.h>
 191#include <linux/kthread.h>
 192
 193#include "bcache_ondisk.h"
 194#include "bset.h"
 195#include "util.h"
 
 196
 197struct bucket {
 198	atomic_t	pin;
 199	uint16_t	prio;
 200	uint8_t		gen;
 201	uint8_t		last_gc; /* Most out of date gen in the btree */
 202	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
 203};
 204
 205/*
 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 207 * as multiple threads touch struct bucket without locking
 208 */
 209
 210BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
 211#define GC_MARK_RECLAIMABLE	1
 212#define GC_MARK_DIRTY		2
 213#define GC_MARK_METADATA	3
 214#define GC_SECTORS_USED_SIZE	13
 215#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
 216BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 217BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 218
 219#include "journal.h"
 220#include "stats.h"
 221struct search;
 222struct btree;
 223struct keybuf;
 224
 225struct keybuf_key {
 226	struct rb_node		node;
 227	BKEY_PADDED(key);
 228	void			*private;
 229};
 230
 231struct keybuf {
 232	struct bkey		last_scanned;
 233	spinlock_t		lock;
 234
 235	/*
 236	 * Beginning and end of range in rb tree - so that we can skip taking
 237	 * lock and checking the rb tree when we need to check for overlapping
 238	 * keys.
 239	 */
 240	struct bkey		start;
 241	struct bkey		end;
 242
 243	struct rb_root		keys;
 244
 245#define KEYBUF_NR		500
 246	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 247};
 248
 249struct bcache_device {
 250	struct closure		cl;
 251
 252	struct kobject		kobj;
 253
 254	struct cache_set	*c;
 255	unsigned int		id;
 256#define BCACHEDEVNAME_SIZE	12
 257	char			name[BCACHEDEVNAME_SIZE];
 258
 259	struct gendisk		*disk;
 260
 261	unsigned long		flags;
 262#define BCACHE_DEV_CLOSING		0
 263#define BCACHE_DEV_DETACHING		1
 264#define BCACHE_DEV_UNLINK_DONE		2
 265#define BCACHE_DEV_WB_RUNNING		3
 266#define BCACHE_DEV_RATE_DW_RUNNING	4
 267	int			nr_stripes;
 268#define BCH_MIN_STRIPE_SZ		((4 << 20) >> SECTOR_SHIFT)
 269	unsigned int		stripe_size;
 270	atomic_t		*stripe_sectors_dirty;
 271	unsigned long		*full_dirty_stripes;
 272
 273	struct bio_set		bio_split;
 274
 275	unsigned int		data_csum:1;
 276
 277	int (*cache_miss)(struct btree *b, struct search *s,
 278			  struct bio *bio, unsigned int sectors);
 279	int (*ioctl)(struct bcache_device *d, blk_mode_t mode,
 280		     unsigned int cmd, unsigned long arg);
 281};
 282
 283struct io {
 284	/* Used to track sequential IO so it can be skipped */
 285	struct hlist_node	hash;
 286	struct list_head	lru;
 287
 288	unsigned long		jiffies;
 289	unsigned int		sequential;
 290	sector_t		last;
 291};
 292
 293enum stop_on_failure {
 294	BCH_CACHED_DEV_STOP_AUTO = 0,
 295	BCH_CACHED_DEV_STOP_ALWAYS,
 296	BCH_CACHED_DEV_STOP_MODE_MAX,
 297};
 298
 299struct cached_dev {
 300	struct list_head	list;
 301	struct bcache_device	disk;
 302	struct block_device	*bdev;
 303	struct bdev_handle	*bdev_handle;
 304
 305	struct cache_sb		sb;
 306	struct cache_sb_disk	*sb_disk;
 307	struct bio		sb_bio;
 308	struct bio_vec		sb_bv[1];
 309	struct closure		sb_write;
 310	struct semaphore	sb_write_mutex;
 311
 312	/* Refcount on the cache set. Always nonzero when we're caching. */
 313	refcount_t		count;
 314	struct work_struct	detach;
 315
 316	/*
 317	 * Device might not be running if it's dirty and the cache set hasn't
 318	 * showed up yet.
 319	 */
 320	atomic_t		running;
 321
 322	/*
 323	 * Writes take a shared lock from start to finish; scanning for dirty
 324	 * data to refill the rb tree requires an exclusive lock.
 325	 */
 326	struct rw_semaphore	writeback_lock;
 327
 328	/*
 329	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 330	 * data in the cache. Protected by writeback_lock; must have an
 331	 * shared lock to set and exclusive lock to clear.
 332	 */
 333	atomic_t		has_dirty;
 334
 335#define BCH_CACHE_READA_ALL		0
 336#define BCH_CACHE_READA_META_ONLY	1
 337	unsigned int		cache_readahead_policy;
 338	struct bch_ratelimit	writeback_rate;
 339	struct delayed_work	writeback_rate_update;
 340
 341	/* Limit number of writeback bios in flight */
 342	struct semaphore	in_flight;
 343	struct task_struct	*writeback_thread;
 344	struct workqueue_struct	*writeback_write_wq;
 345
 346	struct keybuf		writeback_keys;
 347
 348	struct task_struct	*status_update_thread;
 349	/*
 350	 * Order the write-half of writeback operations strongly in dispatch
 351	 * order.  (Maintain LBA order; don't allow reads completing out of
 352	 * order to re-order the writes...)
 353	 */
 354	struct closure_waitlist writeback_ordering_wait;
 355	atomic_t		writeback_sequence_next;
 356
 357	/* For tracking sequential IO */
 358#define RECENT_IO_BITS	7
 359#define RECENT_IO	(1 << RECENT_IO_BITS)
 360	struct io		io[RECENT_IO];
 361	struct hlist_head	io_hash[RECENT_IO + 1];
 362	struct list_head	io_lru;
 363	spinlock_t		io_lock;
 364
 365	struct cache_accounting	accounting;
 366
 367	/* The rest of this all shows up in sysfs */
 368	unsigned int		sequential_cutoff;
 369
 370	unsigned int		io_disable:1;
 371	unsigned int		verify:1;
 372	unsigned int		bypass_torture_test:1;
 373
 374	unsigned int		partial_stripes_expensive:1;
 375	unsigned int		writeback_metadata:1;
 376	unsigned int		writeback_running:1;
 377	unsigned int		writeback_consider_fragment:1;
 378	unsigned char		writeback_percent;
 379	unsigned int		writeback_delay;
 380
 381	uint64_t		writeback_rate_target;
 382	int64_t			writeback_rate_proportional;
 383	int64_t			writeback_rate_integral;
 384	int64_t			writeback_rate_integral_scaled;
 385	int32_t			writeback_rate_change;
 386
 387	unsigned int		writeback_rate_update_seconds;
 388	unsigned int		writeback_rate_i_term_inverse;
 389	unsigned int		writeback_rate_p_term_inverse;
 390	unsigned int		writeback_rate_fp_term_low;
 391	unsigned int		writeback_rate_fp_term_mid;
 392	unsigned int		writeback_rate_fp_term_high;
 393	unsigned int		writeback_rate_minimum;
 394
 395	enum stop_on_failure	stop_when_cache_set_failed;
 396#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
 397	atomic_t		io_errors;
 398	unsigned int		error_limit;
 399	unsigned int		offline_seconds;
 400
 401	/*
 402	 * Retry to update writeback_rate if contention happens for
 403	 * down_read(dc->writeback_lock) in update_writeback_rate()
 404	 */
 405#define BCH_WBRATE_UPDATE_MAX_SKIPS	15
 406	unsigned int		rate_update_retry;
 407};
 408
 409enum alloc_reserve {
 410	RESERVE_BTREE,
 411	RESERVE_PRIO,
 412	RESERVE_MOVINGGC,
 413	RESERVE_NONE,
 414	RESERVE_NR,
 415};
 416
 417struct cache {
 418	struct cache_set	*set;
 419	struct cache_sb		sb;
 420	struct cache_sb_disk	*sb_disk;
 421	struct bio		sb_bio;
 422	struct bio_vec		sb_bv[1];
 423
 424	struct kobject		kobj;
 425	struct block_device	*bdev;
 426	struct bdev_handle	*bdev_handle;
 427
 428	struct task_struct	*alloc_thread;
 429
 430	struct closure		prio;
 431	struct prio_set		*disk_buckets;
 432
 433	/*
 434	 * When allocating new buckets, prio_write() gets first dibs - since we
 435	 * may not be allocate at all without writing priorities and gens.
 436	 * prio_last_buckets[] contains the last buckets we wrote priorities to
 437	 * (so gc can mark them as metadata), prio_buckets[] contains the
 438	 * buckets allocated for the next prio write.
 439	 */
 440	uint64_t		*prio_buckets;
 441	uint64_t		*prio_last_buckets;
 442
 443	/*
 444	 * free: Buckets that are ready to be used
 445	 *
 446	 * free_inc: Incoming buckets - these are buckets that currently have
 447	 * cached data in them, and we can't reuse them until after we write
 448	 * their new gen to disk. After prio_write() finishes writing the new
 449	 * gens/prios, they'll be moved to the free list (and possibly discarded
 450	 * in the process)
 451	 */
 452	DECLARE_FIFO(long, free)[RESERVE_NR];
 453	DECLARE_FIFO(long, free_inc);
 454
 455	size_t			fifo_last_bucket;
 456
 457	/* Allocation stuff: */
 458	struct bucket		*buckets;
 459
 460	DECLARE_HEAP(struct bucket *, heap);
 461
 462	/*
 463	 * If nonzero, we know we aren't going to find any buckets to invalidate
 464	 * until a gc finishes - otherwise we could pointlessly burn a ton of
 465	 * cpu
 466	 */
 467	unsigned int		invalidate_needs_gc;
 468
 469	bool			discard; /* Get rid of? */
 470
 471	struct journal_device	journal;
 472
 473	/* The rest of this all shows up in sysfs */
 474#define IO_ERROR_SHIFT		20
 475	atomic_t		io_errors;
 476	atomic_t		io_count;
 477
 478	atomic_long_t		meta_sectors_written;
 479	atomic_long_t		btree_sectors_written;
 480	atomic_long_t		sectors_written;
 481};
 482
 483struct gc_stat {
 484	size_t			nodes;
 485	size_t			nodes_pre;
 486	size_t			key_bytes;
 487
 488	size_t			nkeys;
 489	uint64_t		data;	/* sectors */
 490	unsigned int		in_use; /* percent */
 491};
 492
 493/*
 494 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 495 *
 496 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 497 * all the backing devices first (their cached data gets invalidated, and they
 498 * won't automatically reattach).
 499 *
 500 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 501 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 502 * flushing dirty data).
 503 *
 504 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 505 * replay is complete.
 506 *
 507 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
 508 * external and internal I/O should be denied when this flag is set.
 509 *
 510 */
 511#define CACHE_SET_UNREGISTERING		0
 512#define	CACHE_SET_STOPPING		1
 513#define	CACHE_SET_RUNNING		2
 514#define CACHE_SET_IO_DISABLE		3
 515
 516struct cache_set {
 517	struct closure		cl;
 518
 519	struct list_head	list;
 520	struct kobject		kobj;
 521	struct kobject		internal;
 522	struct dentry		*debug;
 523	struct cache_accounting accounting;
 524
 525	unsigned long		flags;
 526	atomic_t		idle_counter;
 527	atomic_t		at_max_writeback_rate;
 528
 529	struct cache		*cache;
 530
 531	struct bcache_device	**devices;
 532	unsigned int		devices_max_used;
 533	atomic_t		attached_dev_nr;
 534	struct list_head	cached_devs;
 535	uint64_t		cached_dev_sectors;
 536	atomic_long_t		flash_dev_dirty_sectors;
 537	struct closure		caching;
 538
 539	struct closure		sb_write;
 540	struct semaphore	sb_write_mutex;
 541
 542	mempool_t		search;
 543	mempool_t		bio_meta;
 544	struct bio_set		bio_split;
 545
 546	/* For the btree cache */
 547	struct shrinker		*shrink;
 548
 549	/* For the btree cache and anything allocation related */
 550	struct mutex		bucket_lock;
 551
 552	/* log2(bucket_size), in sectors */
 553	unsigned short		bucket_bits;
 554
 555	/* log2(block_size), in sectors */
 556	unsigned short		block_bits;
 557
 558	/*
 559	 * Default number of pages for a new btree node - may be less than a
 560	 * full bucket
 561	 */
 562	unsigned int		btree_pages;
 563
 564	/*
 565	 * Lists of struct btrees; lru is the list for structs that have memory
 566	 * allocated for actual btree node, freed is for structs that do not.
 567	 *
 568	 * We never free a struct btree, except on shutdown - we just put it on
 569	 * the btree_cache_freed list and reuse it later. This simplifies the
 570	 * code, and it doesn't cost us much memory as the memory usage is
 571	 * dominated by buffers that hold the actual btree node data and those
 572	 * can be freed - and the number of struct btrees allocated is
 573	 * effectively bounded.
 574	 *
 575	 * btree_cache_freeable effectively is a small cache - we use it because
 576	 * high order page allocations can be rather expensive, and it's quite
 577	 * common to delete and allocate btree nodes in quick succession. It
 578	 * should never grow past ~2-3 nodes in practice.
 579	 */
 580	struct list_head	btree_cache;
 581	struct list_head	btree_cache_freeable;
 582	struct list_head	btree_cache_freed;
 583
 584	/* Number of elements in btree_cache + btree_cache_freeable lists */
 585	unsigned int		btree_cache_used;
 586
 587	/*
 588	 * If we need to allocate memory for a new btree node and that
 589	 * allocation fails, we can cannibalize another node in the btree cache
 590	 * to satisfy the allocation - lock to guarantee only one thread does
 591	 * this at a time:
 592	 */
 593	wait_queue_head_t	btree_cache_wait;
 594	struct task_struct	*btree_cache_alloc_lock;
 595	spinlock_t		btree_cannibalize_lock;
 596
 597	/*
 598	 * When we free a btree node, we increment the gen of the bucket the
 599	 * node is in - but we can't rewrite the prios and gens until we
 600	 * finished whatever it is we were doing, otherwise after a crash the
 601	 * btree node would be freed but for say a split, we might not have the
 602	 * pointers to the new nodes inserted into the btree yet.
 603	 *
 604	 * This is a refcount that blocks prio_write() until the new keys are
 605	 * written.
 606	 */
 607	atomic_t		prio_blocked;
 608	wait_queue_head_t	bucket_wait;
 609
 610	/*
 611	 * For any bio we don't skip we subtract the number of sectors from
 612	 * rescale; when it hits 0 we rescale all the bucket priorities.
 613	 */
 614	atomic_t		rescale;
 615	/*
 616	 * used for GC, identify if any front side I/Os is inflight
 617	 */
 618	atomic_t		search_inflight;
 619	/*
 620	 * When we invalidate buckets, we use both the priority and the amount
 621	 * of good data to determine which buckets to reuse first - to weight
 622	 * those together consistently we keep track of the smallest nonzero
 623	 * priority of any bucket.
 624	 */
 625	uint16_t		min_prio;
 626
 627	/*
 628	 * max(gen - last_gc) for all buckets. When it gets too big we have to
 629	 * gc to keep gens from wrapping around.
 630	 */
 631	uint8_t			need_gc;
 632	struct gc_stat		gc_stats;
 633	size_t			nbuckets;
 634	size_t			avail_nbuckets;
 635
 636	struct task_struct	*gc_thread;
 637	/* Where in the btree gc currently is */
 638	struct bkey		gc_done;
 639
 640	/*
 641	 * For automatical garbage collection after writeback completed, this
 642	 * varialbe is used as bit fields,
 643	 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
 644	 * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
 645	 * This is an optimization for following write request after writeback
 646	 * finished, but read hit rate dropped due to clean data on cache is
 647	 * discarded. Unless user explicitly sets it via sysfs, it won't be
 648	 * enabled.
 649	 */
 650#define BCH_ENABLE_AUTO_GC	1
 651#define BCH_DO_AUTO_GC		2
 652	uint8_t			gc_after_writeback;
 653
 654	/*
 655	 * The allocation code needs gc_mark in struct bucket to be correct, but
 656	 * it's not while a gc is in progress. Protected by bucket_lock.
 657	 */
 658	int			gc_mark_valid;
 659
 660	/* Counts how many sectors bio_insert has added to the cache */
 661	atomic_t		sectors_to_gc;
 662	wait_queue_head_t	gc_wait;
 663
 664	struct keybuf		moving_gc_keys;
 665	/* Number of moving GC bios in flight */
 666	struct semaphore	moving_in_flight;
 667
 668	struct workqueue_struct	*moving_gc_wq;
 669
 670	struct btree		*root;
 671
 672#ifdef CONFIG_BCACHE_DEBUG
 673	struct btree		*verify_data;
 674	struct bset		*verify_ondisk;
 675	struct mutex		verify_lock;
 676#endif
 677
 678	uint8_t			set_uuid[16];
 679	unsigned int		nr_uuids;
 680	struct uuid_entry	*uuids;
 681	BKEY_PADDED(uuid_bucket);
 682	struct closure		uuid_write;
 683	struct semaphore	uuid_write_mutex;
 684
 685	/*
 686	 * A btree node on disk could have too many bsets for an iterator to fit
 687	 * on the stack - have to dynamically allocate them.
 688	 * bch_cache_set_alloc() will make sure the pool can allocate iterators
 689	 * equipped with enough room that can host
 690	 *     (sb.bucket_size / sb.block_size)
 691	 * btree_iter_sets, which is more than static MAX_BSETS.
 692	 */
 693	mempool_t		fill_iter;
 694
 695	struct bset_sort_state	sort;
 696
 697	/* List of buckets we're currently writing data to */
 698	struct list_head	data_buckets;
 699	spinlock_t		data_bucket_lock;
 700
 701	struct journal		journal;
 702
 703#define CONGESTED_MAX		1024
 704	unsigned int		congested_last_us;
 705	atomic_t		congested;
 706
 707	/* The rest of this all shows up in sysfs */
 708	unsigned int		congested_read_threshold_us;
 709	unsigned int		congested_write_threshold_us;
 710
 711	struct time_stats	btree_gc_time;
 712	struct time_stats	btree_split_time;
 713	struct time_stats	btree_read_time;
 714
 715	atomic_long_t		cache_read_races;
 716	atomic_long_t		writeback_keys_done;
 717	atomic_long_t		writeback_keys_failed;
 718
 719	atomic_long_t		reclaim;
 720	atomic_long_t		reclaimed_journal_buckets;
 721	atomic_long_t		flush_write;
 722
 723	enum			{
 724		ON_ERROR_UNREGISTER,
 725		ON_ERROR_PANIC,
 726	}			on_error;
 727#define DEFAULT_IO_ERROR_LIMIT 8
 728	unsigned int		error_limit;
 729	unsigned int		error_decay;
 730
 731	unsigned short		journal_delay_ms;
 732	bool			expensive_debug_checks;
 733	unsigned int		verify:1;
 734	unsigned int		key_merging_disabled:1;
 735	unsigned int		gc_always_rewrite:1;
 736	unsigned int		shrinker_disabled:1;
 737	unsigned int		copy_gc_enabled:1;
 738	unsigned int		idle_max_writeback_rate_enabled:1;
 739
 740#define BUCKET_HASH_BITS	12
 741	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
 742};
 743
 744struct bbio {
 745	unsigned int		submit_time_us;
 746	union {
 747		struct bkey	key;
 748		uint64_t	_pad[3];
 749		/*
 750		 * We only need pad = 3 here because we only ever carry around a
 751		 * single pointer - i.e. the pointer we're doing io to/from.
 752		 */
 753	};
 754	struct bio		bio;
 755};
 756
 757#define BTREE_PRIO		USHRT_MAX
 758#define INITIAL_PRIO		32768U
 759
 760#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
 761#define btree_blocks(b)							\
 762	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 763
 764#define btree_default_blocks(c)						\
 765	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 766
 767#define bucket_bytes(ca)	((ca)->sb.bucket_size << 9)
 768#define block_bytes(ca)		((ca)->sb.block_size << 9)
 769
 770static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
 771{
 772	unsigned int n, max_pages;
 773
 774	max_pages = min_t(unsigned int,
 775			  __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
 776			  MAX_ORDER_NR_PAGES);
 777
 778	n = sb->bucket_size / PAGE_SECTORS;
 779	if (n > max_pages)
 780		n = max_pages;
 781
 782	return n;
 783}
 784
 785static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
 786{
 787	return meta_bucket_pages(sb) << PAGE_SHIFT;
 788}
 789
 790#define prios_per_bucket(ca)						\
 791	((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) /	\
 792	 sizeof(struct bucket_disk))
 793
 794#define prio_buckets(ca)						\
 795	DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
 796
 797static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 798{
 799	return s >> c->bucket_bits;
 800}
 801
 802static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 803{
 804	return ((sector_t) b) << c->bucket_bits;
 805}
 806
 807static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 808{
 809	return s & (c->cache->sb.bucket_size - 1);
 810}
 811
 812static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 813				   const struct bkey *k,
 814				   unsigned int ptr)
 815{
 816	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 817}
 818
 819static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 820					const struct bkey *k,
 821					unsigned int ptr)
 822{
 823	return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
 824}
 825
 826static inline uint8_t gen_after(uint8_t a, uint8_t b)
 827{
 828	uint8_t r = a - b;
 829
 830	return r > 128U ? 0 : r;
 831}
 832
 833static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 834				unsigned int i)
 835{
 836	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 837}
 838
 839static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 840				 unsigned int i)
 841{
 842	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
 843}
 844
 845/* Btree key macros */
 846
 847/*
 848 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 849 * jset: The checksum is _always_ the first 8 bytes of these structs
 850 */
 851#define csum_set(i)							\
 852	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
 853		  ((void *) bset_bkey_last(i)) -			\
 854		  (((void *) (i)) + sizeof(uint64_t)))
 855
 856/* Error handling macros */
 857
 858#define btree_bug(b, ...)						\
 859do {									\
 860	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
 861		dump_stack();						\
 862} while (0)
 863
 864#define cache_bug(c, ...)						\
 865do {									\
 866	if (bch_cache_set_error(c, __VA_ARGS__))			\
 867		dump_stack();						\
 868} while (0)
 869
 870#define btree_bug_on(cond, b, ...)					\
 871do {									\
 872	if (cond)							\
 873		btree_bug(b, __VA_ARGS__);				\
 874} while (0)
 875
 876#define cache_bug_on(cond, c, ...)					\
 877do {									\
 878	if (cond)							\
 879		cache_bug(c, __VA_ARGS__);				\
 880} while (0)
 881
 882#define cache_set_err_on(cond, c, ...)					\
 883do {									\
 884	if (cond)							\
 885		bch_cache_set_error(c, __VA_ARGS__);			\
 886} while (0)
 887
 888/* Looping macros */
 889
 890#define for_each_bucket(b, ca)						\
 891	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
 892	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 893
 894static inline void cached_dev_put(struct cached_dev *dc)
 895{
 896	if (refcount_dec_and_test(&dc->count))
 897		schedule_work(&dc->detach);
 898}
 899
 900static inline bool cached_dev_get(struct cached_dev *dc)
 901{
 902	if (!refcount_inc_not_zero(&dc->count))
 903		return false;
 904
 905	/* Paired with the mb in cached_dev_attach */
 906	smp_mb__after_atomic();
 907	return true;
 908}
 909
 910/*
 911 * bucket_gc_gen() returns the difference between the bucket's current gen and
 912 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 913 */
 914
 915static inline uint8_t bucket_gc_gen(struct bucket *b)
 916{
 917	return b->gen - b->last_gc;
 918}
 919
 920#define BUCKET_GC_GEN_MAX	96U
 921
 922#define kobj_attribute_write(n, fn)					\
 923	static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
 924
 925#define kobj_attribute_rw(n, show, store)				\
 926	static struct kobj_attribute ksysfs_##n =			\
 927		__ATTR(n, 0600, show, store)
 928
 929static inline void wake_up_allocators(struct cache_set *c)
 930{
 931	struct cache *ca = c->cache;
 932
 933	wake_up_process(ca->alloc_thread);
 934}
 935
 936static inline void closure_bio_submit(struct cache_set *c,
 937				      struct bio *bio,
 938				      struct closure *cl)
 939{
 940	closure_get(cl);
 941	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
 942		bio->bi_status = BLK_STS_IOERR;
 943		bio_endio(bio);
 944		return;
 945	}
 946	submit_bio_noacct(bio);
 947}
 948
 949/*
 950 * Prevent the kthread exits directly, and make sure when kthread_stop()
 951 * is called to stop a kthread, it is still alive. If a kthread might be
 952 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
 953 * necessary before the kthread returns.
 954 */
 955static inline void wait_for_kthread_stop(void)
 956{
 957	while (!kthread_should_stop()) {
 958		set_current_state(TASK_INTERRUPTIBLE);
 959		schedule();
 960	}
 961}
 962
 963/* Forward declarations */
 964
 965void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 966void bch_count_io_errors(struct cache *ca, blk_status_t error,
 967			 int is_read, const char *m);
 968void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 969			      blk_status_t error, const char *m);
 970void bch_bbio_endio(struct cache_set *c, struct bio *bio,
 971		    blk_status_t error, const char *m);
 972void bch_bbio_free(struct bio *bio, struct cache_set *c);
 973struct bio *bch_bbio_alloc(struct cache_set *c);
 974
 975void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
 976void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 977		     struct bkey *k, unsigned int ptr);
 978
 979uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
 980void bch_rescale_priorities(struct cache_set *c, int sectors);
 981
 982bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
 983void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
 984
 985void __bch_bucket_free(struct cache *ca, struct bucket *b);
 986void bch_bucket_free(struct cache_set *c, struct bkey *k);
 987
 988long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
 989int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 990			   struct bkey *k, bool wait);
 991int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 992			 struct bkey *k, bool wait);
 993bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
 994		       unsigned int sectors, unsigned int write_point,
 995		       unsigned int write_prio, bool wait);
 996bool bch_cached_dev_error(struct cached_dev *dc);
 997
 998__printf(2, 3)
 999bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
1000
1001int bch_prio_write(struct cache *ca, bool wait);
1002void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
1003
1004extern struct workqueue_struct *bcache_wq;
1005extern struct workqueue_struct *bch_journal_wq;
1006extern struct workqueue_struct *bch_flush_wq;
1007extern struct mutex bch_register_lock;
1008extern struct list_head bch_cache_sets;
1009
1010extern const struct kobj_type bch_cached_dev_ktype;
1011extern const struct kobj_type bch_flash_dev_ktype;
1012extern const struct kobj_type bch_cache_set_ktype;
1013extern const struct kobj_type bch_cache_set_internal_ktype;
1014extern const struct kobj_type bch_cache_ktype;
1015
1016void bch_cached_dev_release(struct kobject *kobj);
1017void bch_flash_dev_release(struct kobject *kobj);
1018void bch_cache_set_release(struct kobject *kobj);
1019void bch_cache_release(struct kobject *kobj);
1020
1021int bch_uuid_write(struct cache_set *c);
1022void bcache_write_super(struct cache_set *c);
1023
1024int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1025
1026int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1027			  uint8_t *set_uuid);
1028void bch_cached_dev_detach(struct cached_dev *dc);
1029int bch_cached_dev_run(struct cached_dev *dc);
1030void bcache_device_stop(struct bcache_device *d);
1031
1032void bch_cache_set_unregister(struct cache_set *c);
1033void bch_cache_set_stop(struct cache_set *c);
1034
1035struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1036void bch_btree_cache_free(struct cache_set *c);
1037int bch_btree_cache_alloc(struct cache_set *c);
1038void bch_moving_init_cache_set(struct cache_set *c);
1039int bch_open_buckets_alloc(struct cache_set *c);
1040void bch_open_buckets_free(struct cache_set *c);
1041
1042int bch_cache_allocator_start(struct cache *ca);
1043
1044void bch_debug_exit(void);
1045void bch_debug_init(void);
1046void bch_request_exit(void);
1047int bch_request_init(void);
1048void bch_btree_exit(void);
1049int bch_btree_init(void);
1050
1051#endif /* _BCACHE_H */
v6.2
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BCACHE_H
   3#define _BCACHE_H
   4
   5/*
   6 * SOME HIGH LEVEL CODE DOCUMENTATION:
   7 *
   8 * Bcache mostly works with cache sets, cache devices, and backing devices.
   9 *
  10 * Support for multiple cache devices hasn't quite been finished off yet, but
  11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  12 * like a md raid array and its component devices. Most of the code doesn't care
  13 * about individual cache devices, the main abstraction is the cache set.
  14 *
  15 * Multiple cache devices is intended to give us the ability to mirror dirty
  16 * cached data and metadata, without mirroring clean cached data.
  17 *
  18 * Backing devices are different, in that they have a lifetime independent of a
  19 * cache set. When you register a newly formatted backing device it'll come up
  20 * in passthrough mode, and then you can attach and detach a backing device from
  21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  22 * invalidates any cached data for that backing device.
  23 *
  24 * A cache set can have multiple (many) backing devices attached to it.
  25 *
  26 * There's also flash only volumes - this is the reason for the distinction
  27 * between struct cached_dev and struct bcache_device. A flash only volume
  28 * works much like a bcache device that has a backing device, except the
  29 * "cached" data is always dirty. The end result is that we get thin
  30 * provisioning with very little additional code.
  31 *
  32 * Flash only volumes work but they're not production ready because the moving
  33 * garbage collector needs more work. More on that later.
  34 *
  35 * BUCKETS/ALLOCATION:
  36 *
  37 * Bcache is primarily designed for caching, which means that in normal
  38 * operation all of our available space will be allocated. Thus, we need an
  39 * efficient way of deleting things from the cache so we can write new things to
  40 * it.
  41 *
  42 * To do this, we first divide the cache device up into buckets. A bucket is the
  43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  44 * works efficiently.
  45 *
  46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  47 * it. The gens and priorities for all the buckets are stored contiguously and
  48 * packed on disk (in a linked list of buckets - aside from the superblock, all
  49 * of bcache's metadata is stored in buckets).
  50 *
  51 * The priority is used to implement an LRU. We reset a bucket's priority when
  52 * we allocate it or on cache it, and every so often we decrement the priority
  53 * of each bucket. It could be used to implement something more sophisticated,
  54 * if anyone ever gets around to it.
  55 *
  56 * The generation is used for invalidating buckets. Each pointer also has an 8
  57 * bit generation embedded in it; for a pointer to be considered valid, its gen
  58 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  59 * we have to do is increment its gen (and write its new gen to disk; we batch
  60 * this up).
  61 *
  62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  63 * contain metadata (including btree nodes).
  64 *
  65 * THE BTREE:
  66 *
  67 * Bcache is in large part design around the btree.
  68 *
  69 * At a high level, the btree is just an index of key -> ptr tuples.
  70 *
  71 * Keys represent extents, and thus have a size field. Keys also have a variable
  72 * number of pointers attached to them (potentially zero, which is handy for
  73 * invalidating the cache).
  74 *
  75 * The key itself is an inode:offset pair. The inode number corresponds to a
  76 * backing device or a flash only volume. The offset is the ending offset of the
  77 * extent within the inode - not the starting offset; this makes lookups
  78 * slightly more convenient.
  79 *
  80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  81 * generation number. More on the gen later.
  82 *
  83 * Index lookups are not fully abstracted - cache lookups in particular are
  84 * still somewhat mixed in with the btree code, but things are headed in that
  85 * direction.
  86 *
  87 * Updates are fairly well abstracted, though. There are two different ways of
  88 * updating the btree; insert and replace.
  89 *
  90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  91 * overwriting (possibly only partially) any extents they overlap with. This is
  92 * used to update the index after a write.
  93 *
  94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  95 * overwriting a key that matches another given key. This is used for inserting
  96 * data into the cache after a cache miss, and for background writeback, and for
  97 * the moving garbage collector.
  98 *
  99 * There is no "delete" operation; deleting things from the index is
 100 * accomplished by either by invalidating pointers (by incrementing a bucket's
 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 102 * previously present at that location in the index.
 103 *
 104 * This means that there are always stale/invalid keys in the btree. They're
 105 * filtered out by the code that iterates through a btree node, and removed when
 106 * a btree node is rewritten.
 107 *
 108 * BTREE NODES:
 109 *
 110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
 111 * free smaller than a bucket - so, that's how big our btree nodes are.
 112 *
 113 * (If buckets are really big we'll only use part of the bucket for a btree node
 114 * - no less than 1/4th - but a bucket still contains no more than a single
 115 * btree node. I'd actually like to change this, but for now we rely on the
 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 117 *
 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 119 * btree implementation.
 120 *
 121 * The way this is solved is that btree nodes are internally log structured; we
 122 * can append new keys to an existing btree node without rewriting it. This
 123 * means each set of keys we write is sorted, but the node is not.
 124 *
 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 126 * be expensive, and we have to distinguish between the keys we have written and
 127 * the keys we haven't. So to do a lookup in a btree node, we have to search
 128 * each sorted set. But we do merge written sets together lazily, so the cost of
 129 * these extra searches is quite low (normally most of the keys in a btree node
 130 * will be in one big set, and then there'll be one or two sets that are much
 131 * smaller).
 132 *
 133 * This log structure makes bcache's btree more of a hybrid between a
 134 * conventional btree and a compacting data structure, with some of the
 135 * advantages of both.
 136 *
 137 * GARBAGE COLLECTION:
 138 *
 139 * We can't just invalidate any bucket - it might contain dirty data or
 140 * metadata. If it once contained dirty data, other writes might overwrite it
 141 * later, leaving no valid pointers into that bucket in the index.
 142 *
 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 144 * It also counts how much valid data it each bucket currently contains, so that
 145 * allocation can reuse buckets sooner when they've been mostly overwritten.
 146 *
 147 * It also does some things that are really internal to the btree
 148 * implementation. If a btree node contains pointers that are stale by more than
 149 * some threshold, it rewrites the btree node to avoid the bucket's generation
 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 151 *
 152 * THE JOURNAL:
 153 *
 154 * Bcache's journal is not necessary for consistency; we always strictly
 155 * order metadata writes so that the btree and everything else is consistent on
 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 157 * caching (with recovery from unclean shutdown) before journalling was
 158 * implemented.
 159 *
 160 * Rather, the journal is purely a performance optimization; we can't complete a
 161 * write until we've updated the index on disk, otherwise the cache would be
 162 * inconsistent in the event of an unclean shutdown. This means that without the
 163 * journal, on random write workloads we constantly have to update all the leaf
 164 * nodes in the btree, and those writes will be mostly empty (appending at most
 165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 166 * and it puts more strain on the various btree resorting/compacting code.
 167 *
 168 * The journal is just a log of keys we've inserted; on startup we just reinsert
 169 * all the keys in the open journal entries. That means that when we're updating
 170 * a node in the btree, we can wait until a 4k block of keys fills up before
 171 * writing them out.
 172 *
 173 * For simplicity, we only journal updates to leaf nodes; updates to parent
 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 175 * the complexity to deal with journalling them (in particular, journal replay)
 176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 177 */
 178
 179#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
 180
 181#include <linux/bio.h>
 
 182#include <linux/kobject.h>
 183#include <linux/list.h>
 184#include <linux/mutex.h>
 185#include <linux/rbtree.h>
 186#include <linux/rwsem.h>
 187#include <linux/refcount.h>
 188#include <linux/types.h>
 189#include <linux/workqueue.h>
 190#include <linux/kthread.h>
 191
 192#include "bcache_ondisk.h"
 193#include "bset.h"
 194#include "util.h"
 195#include "closure.h"
 196
 197struct bucket {
 198	atomic_t	pin;
 199	uint16_t	prio;
 200	uint8_t		gen;
 201	uint8_t		last_gc; /* Most out of date gen in the btree */
 202	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
 203};
 204
 205/*
 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 207 * as multiple threads touch struct bucket without locking
 208 */
 209
 210BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
 211#define GC_MARK_RECLAIMABLE	1
 212#define GC_MARK_DIRTY		2
 213#define GC_MARK_METADATA	3
 214#define GC_SECTORS_USED_SIZE	13
 215#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
 216BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 217BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 218
 219#include "journal.h"
 220#include "stats.h"
 221struct search;
 222struct btree;
 223struct keybuf;
 224
 225struct keybuf_key {
 226	struct rb_node		node;
 227	BKEY_PADDED(key);
 228	void			*private;
 229};
 230
 231struct keybuf {
 232	struct bkey		last_scanned;
 233	spinlock_t		lock;
 234
 235	/*
 236	 * Beginning and end of range in rb tree - so that we can skip taking
 237	 * lock and checking the rb tree when we need to check for overlapping
 238	 * keys.
 239	 */
 240	struct bkey		start;
 241	struct bkey		end;
 242
 243	struct rb_root		keys;
 244
 245#define KEYBUF_NR		500
 246	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 247};
 248
 249struct bcache_device {
 250	struct closure		cl;
 251
 252	struct kobject		kobj;
 253
 254	struct cache_set	*c;
 255	unsigned int		id;
 256#define BCACHEDEVNAME_SIZE	12
 257	char			name[BCACHEDEVNAME_SIZE];
 258
 259	struct gendisk		*disk;
 260
 261	unsigned long		flags;
 262#define BCACHE_DEV_CLOSING		0
 263#define BCACHE_DEV_DETACHING		1
 264#define BCACHE_DEV_UNLINK_DONE		2
 265#define BCACHE_DEV_WB_RUNNING		3
 266#define BCACHE_DEV_RATE_DW_RUNNING	4
 267	int			nr_stripes;
 
 268	unsigned int		stripe_size;
 269	atomic_t		*stripe_sectors_dirty;
 270	unsigned long		*full_dirty_stripes;
 271
 272	struct bio_set		bio_split;
 273
 274	unsigned int		data_csum:1;
 275
 276	int (*cache_miss)(struct btree *b, struct search *s,
 277			  struct bio *bio, unsigned int sectors);
 278	int (*ioctl)(struct bcache_device *d, fmode_t mode,
 279		     unsigned int cmd, unsigned long arg);
 280};
 281
 282struct io {
 283	/* Used to track sequential IO so it can be skipped */
 284	struct hlist_node	hash;
 285	struct list_head	lru;
 286
 287	unsigned long		jiffies;
 288	unsigned int		sequential;
 289	sector_t		last;
 290};
 291
 292enum stop_on_failure {
 293	BCH_CACHED_DEV_STOP_AUTO = 0,
 294	BCH_CACHED_DEV_STOP_ALWAYS,
 295	BCH_CACHED_DEV_STOP_MODE_MAX,
 296};
 297
 298struct cached_dev {
 299	struct list_head	list;
 300	struct bcache_device	disk;
 301	struct block_device	*bdev;
 
 302
 303	struct cache_sb		sb;
 304	struct cache_sb_disk	*sb_disk;
 305	struct bio		sb_bio;
 306	struct bio_vec		sb_bv[1];
 307	struct closure		sb_write;
 308	struct semaphore	sb_write_mutex;
 309
 310	/* Refcount on the cache set. Always nonzero when we're caching. */
 311	refcount_t		count;
 312	struct work_struct	detach;
 313
 314	/*
 315	 * Device might not be running if it's dirty and the cache set hasn't
 316	 * showed up yet.
 317	 */
 318	atomic_t		running;
 319
 320	/*
 321	 * Writes take a shared lock from start to finish; scanning for dirty
 322	 * data to refill the rb tree requires an exclusive lock.
 323	 */
 324	struct rw_semaphore	writeback_lock;
 325
 326	/*
 327	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 328	 * data in the cache. Protected by writeback_lock; must have an
 329	 * shared lock to set and exclusive lock to clear.
 330	 */
 331	atomic_t		has_dirty;
 332
 333#define BCH_CACHE_READA_ALL		0
 334#define BCH_CACHE_READA_META_ONLY	1
 335	unsigned int		cache_readahead_policy;
 336	struct bch_ratelimit	writeback_rate;
 337	struct delayed_work	writeback_rate_update;
 338
 339	/* Limit number of writeback bios in flight */
 340	struct semaphore	in_flight;
 341	struct task_struct	*writeback_thread;
 342	struct workqueue_struct	*writeback_write_wq;
 343
 344	struct keybuf		writeback_keys;
 345
 346	struct task_struct	*status_update_thread;
 347	/*
 348	 * Order the write-half of writeback operations strongly in dispatch
 349	 * order.  (Maintain LBA order; don't allow reads completing out of
 350	 * order to re-order the writes...)
 351	 */
 352	struct closure_waitlist writeback_ordering_wait;
 353	atomic_t		writeback_sequence_next;
 354
 355	/* For tracking sequential IO */
 356#define RECENT_IO_BITS	7
 357#define RECENT_IO	(1 << RECENT_IO_BITS)
 358	struct io		io[RECENT_IO];
 359	struct hlist_head	io_hash[RECENT_IO + 1];
 360	struct list_head	io_lru;
 361	spinlock_t		io_lock;
 362
 363	struct cache_accounting	accounting;
 364
 365	/* The rest of this all shows up in sysfs */
 366	unsigned int		sequential_cutoff;
 367
 368	unsigned int		io_disable:1;
 369	unsigned int		verify:1;
 370	unsigned int		bypass_torture_test:1;
 371
 372	unsigned int		partial_stripes_expensive:1;
 373	unsigned int		writeback_metadata:1;
 374	unsigned int		writeback_running:1;
 375	unsigned int		writeback_consider_fragment:1;
 376	unsigned char		writeback_percent;
 377	unsigned int		writeback_delay;
 378
 379	uint64_t		writeback_rate_target;
 380	int64_t			writeback_rate_proportional;
 381	int64_t			writeback_rate_integral;
 382	int64_t			writeback_rate_integral_scaled;
 383	int32_t			writeback_rate_change;
 384
 385	unsigned int		writeback_rate_update_seconds;
 386	unsigned int		writeback_rate_i_term_inverse;
 387	unsigned int		writeback_rate_p_term_inverse;
 388	unsigned int		writeback_rate_fp_term_low;
 389	unsigned int		writeback_rate_fp_term_mid;
 390	unsigned int		writeback_rate_fp_term_high;
 391	unsigned int		writeback_rate_minimum;
 392
 393	enum stop_on_failure	stop_when_cache_set_failed;
 394#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
 395	atomic_t		io_errors;
 396	unsigned int		error_limit;
 397	unsigned int		offline_seconds;
 398
 399	/*
 400	 * Retry to update writeback_rate if contention happens for
 401	 * down_read(dc->writeback_lock) in update_writeback_rate()
 402	 */
 403#define BCH_WBRATE_UPDATE_MAX_SKIPS	15
 404	unsigned int		rate_update_retry;
 405};
 406
 407enum alloc_reserve {
 408	RESERVE_BTREE,
 409	RESERVE_PRIO,
 410	RESERVE_MOVINGGC,
 411	RESERVE_NONE,
 412	RESERVE_NR,
 413};
 414
 415struct cache {
 416	struct cache_set	*set;
 417	struct cache_sb		sb;
 418	struct cache_sb_disk	*sb_disk;
 419	struct bio		sb_bio;
 420	struct bio_vec		sb_bv[1];
 421
 422	struct kobject		kobj;
 423	struct block_device	*bdev;
 
 424
 425	struct task_struct	*alloc_thread;
 426
 427	struct closure		prio;
 428	struct prio_set		*disk_buckets;
 429
 430	/*
 431	 * When allocating new buckets, prio_write() gets first dibs - since we
 432	 * may not be allocate at all without writing priorities and gens.
 433	 * prio_last_buckets[] contains the last buckets we wrote priorities to
 434	 * (so gc can mark them as metadata), prio_buckets[] contains the
 435	 * buckets allocated for the next prio write.
 436	 */
 437	uint64_t		*prio_buckets;
 438	uint64_t		*prio_last_buckets;
 439
 440	/*
 441	 * free: Buckets that are ready to be used
 442	 *
 443	 * free_inc: Incoming buckets - these are buckets that currently have
 444	 * cached data in them, and we can't reuse them until after we write
 445	 * their new gen to disk. After prio_write() finishes writing the new
 446	 * gens/prios, they'll be moved to the free list (and possibly discarded
 447	 * in the process)
 448	 */
 449	DECLARE_FIFO(long, free)[RESERVE_NR];
 450	DECLARE_FIFO(long, free_inc);
 451
 452	size_t			fifo_last_bucket;
 453
 454	/* Allocation stuff: */
 455	struct bucket		*buckets;
 456
 457	DECLARE_HEAP(struct bucket *, heap);
 458
 459	/*
 460	 * If nonzero, we know we aren't going to find any buckets to invalidate
 461	 * until a gc finishes - otherwise we could pointlessly burn a ton of
 462	 * cpu
 463	 */
 464	unsigned int		invalidate_needs_gc;
 465
 466	bool			discard; /* Get rid of? */
 467
 468	struct journal_device	journal;
 469
 470	/* The rest of this all shows up in sysfs */
 471#define IO_ERROR_SHIFT		20
 472	atomic_t		io_errors;
 473	atomic_t		io_count;
 474
 475	atomic_long_t		meta_sectors_written;
 476	atomic_long_t		btree_sectors_written;
 477	atomic_long_t		sectors_written;
 478};
 479
 480struct gc_stat {
 481	size_t			nodes;
 482	size_t			nodes_pre;
 483	size_t			key_bytes;
 484
 485	size_t			nkeys;
 486	uint64_t		data;	/* sectors */
 487	unsigned int		in_use; /* percent */
 488};
 489
 490/*
 491 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 492 *
 493 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 494 * all the backing devices first (their cached data gets invalidated, and they
 495 * won't automatically reattach).
 496 *
 497 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 498 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 499 * flushing dirty data).
 500 *
 501 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 502 * replay is complete.
 503 *
 504 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
 505 * external and internal I/O should be denied when this flag is set.
 506 *
 507 */
 508#define CACHE_SET_UNREGISTERING		0
 509#define	CACHE_SET_STOPPING		1
 510#define	CACHE_SET_RUNNING		2
 511#define CACHE_SET_IO_DISABLE		3
 512
 513struct cache_set {
 514	struct closure		cl;
 515
 516	struct list_head	list;
 517	struct kobject		kobj;
 518	struct kobject		internal;
 519	struct dentry		*debug;
 520	struct cache_accounting accounting;
 521
 522	unsigned long		flags;
 523	atomic_t		idle_counter;
 524	atomic_t		at_max_writeback_rate;
 525
 526	struct cache		*cache;
 527
 528	struct bcache_device	**devices;
 529	unsigned int		devices_max_used;
 530	atomic_t		attached_dev_nr;
 531	struct list_head	cached_devs;
 532	uint64_t		cached_dev_sectors;
 533	atomic_long_t		flash_dev_dirty_sectors;
 534	struct closure		caching;
 535
 536	struct closure		sb_write;
 537	struct semaphore	sb_write_mutex;
 538
 539	mempool_t		search;
 540	mempool_t		bio_meta;
 541	struct bio_set		bio_split;
 542
 543	/* For the btree cache */
 544	struct shrinker		shrink;
 545
 546	/* For the btree cache and anything allocation related */
 547	struct mutex		bucket_lock;
 548
 549	/* log2(bucket_size), in sectors */
 550	unsigned short		bucket_bits;
 551
 552	/* log2(block_size), in sectors */
 553	unsigned short		block_bits;
 554
 555	/*
 556	 * Default number of pages for a new btree node - may be less than a
 557	 * full bucket
 558	 */
 559	unsigned int		btree_pages;
 560
 561	/*
 562	 * Lists of struct btrees; lru is the list for structs that have memory
 563	 * allocated for actual btree node, freed is for structs that do not.
 564	 *
 565	 * We never free a struct btree, except on shutdown - we just put it on
 566	 * the btree_cache_freed list and reuse it later. This simplifies the
 567	 * code, and it doesn't cost us much memory as the memory usage is
 568	 * dominated by buffers that hold the actual btree node data and those
 569	 * can be freed - and the number of struct btrees allocated is
 570	 * effectively bounded.
 571	 *
 572	 * btree_cache_freeable effectively is a small cache - we use it because
 573	 * high order page allocations can be rather expensive, and it's quite
 574	 * common to delete and allocate btree nodes in quick succession. It
 575	 * should never grow past ~2-3 nodes in practice.
 576	 */
 577	struct list_head	btree_cache;
 578	struct list_head	btree_cache_freeable;
 579	struct list_head	btree_cache_freed;
 580
 581	/* Number of elements in btree_cache + btree_cache_freeable lists */
 582	unsigned int		btree_cache_used;
 583
 584	/*
 585	 * If we need to allocate memory for a new btree node and that
 586	 * allocation fails, we can cannibalize another node in the btree cache
 587	 * to satisfy the allocation - lock to guarantee only one thread does
 588	 * this at a time:
 589	 */
 590	wait_queue_head_t	btree_cache_wait;
 591	struct task_struct	*btree_cache_alloc_lock;
 592	spinlock_t		btree_cannibalize_lock;
 593
 594	/*
 595	 * When we free a btree node, we increment the gen of the bucket the
 596	 * node is in - but we can't rewrite the prios and gens until we
 597	 * finished whatever it is we were doing, otherwise after a crash the
 598	 * btree node would be freed but for say a split, we might not have the
 599	 * pointers to the new nodes inserted into the btree yet.
 600	 *
 601	 * This is a refcount that blocks prio_write() until the new keys are
 602	 * written.
 603	 */
 604	atomic_t		prio_blocked;
 605	wait_queue_head_t	bucket_wait;
 606
 607	/*
 608	 * For any bio we don't skip we subtract the number of sectors from
 609	 * rescale; when it hits 0 we rescale all the bucket priorities.
 610	 */
 611	atomic_t		rescale;
 612	/*
 613	 * used for GC, identify if any front side I/Os is inflight
 614	 */
 615	atomic_t		search_inflight;
 616	/*
 617	 * When we invalidate buckets, we use both the priority and the amount
 618	 * of good data to determine which buckets to reuse first - to weight
 619	 * those together consistently we keep track of the smallest nonzero
 620	 * priority of any bucket.
 621	 */
 622	uint16_t		min_prio;
 623
 624	/*
 625	 * max(gen - last_gc) for all buckets. When it gets too big we have to
 626	 * gc to keep gens from wrapping around.
 627	 */
 628	uint8_t			need_gc;
 629	struct gc_stat		gc_stats;
 630	size_t			nbuckets;
 631	size_t			avail_nbuckets;
 632
 633	struct task_struct	*gc_thread;
 634	/* Where in the btree gc currently is */
 635	struct bkey		gc_done;
 636
 637	/*
 638	 * For automatical garbage collection after writeback completed, this
 639	 * varialbe is used as bit fields,
 640	 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
 641	 * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
 642	 * This is an optimization for following write request after writeback
 643	 * finished, but read hit rate dropped due to clean data on cache is
 644	 * discarded. Unless user explicitly sets it via sysfs, it won't be
 645	 * enabled.
 646	 */
 647#define BCH_ENABLE_AUTO_GC	1
 648#define BCH_DO_AUTO_GC		2
 649	uint8_t			gc_after_writeback;
 650
 651	/*
 652	 * The allocation code needs gc_mark in struct bucket to be correct, but
 653	 * it's not while a gc is in progress. Protected by bucket_lock.
 654	 */
 655	int			gc_mark_valid;
 656
 657	/* Counts how many sectors bio_insert has added to the cache */
 658	atomic_t		sectors_to_gc;
 659	wait_queue_head_t	gc_wait;
 660
 661	struct keybuf		moving_gc_keys;
 662	/* Number of moving GC bios in flight */
 663	struct semaphore	moving_in_flight;
 664
 665	struct workqueue_struct	*moving_gc_wq;
 666
 667	struct btree		*root;
 668
 669#ifdef CONFIG_BCACHE_DEBUG
 670	struct btree		*verify_data;
 671	struct bset		*verify_ondisk;
 672	struct mutex		verify_lock;
 673#endif
 674
 675	uint8_t			set_uuid[16];
 676	unsigned int		nr_uuids;
 677	struct uuid_entry	*uuids;
 678	BKEY_PADDED(uuid_bucket);
 679	struct closure		uuid_write;
 680	struct semaphore	uuid_write_mutex;
 681
 682	/*
 683	 * A btree node on disk could have too many bsets for an iterator to fit
 684	 * on the stack - have to dynamically allocate them.
 685	 * bch_cache_set_alloc() will make sure the pool can allocate iterators
 686	 * equipped with enough room that can host
 687	 *     (sb.bucket_size / sb.block_size)
 688	 * btree_iter_sets, which is more than static MAX_BSETS.
 689	 */
 690	mempool_t		fill_iter;
 691
 692	struct bset_sort_state	sort;
 693
 694	/* List of buckets we're currently writing data to */
 695	struct list_head	data_buckets;
 696	spinlock_t		data_bucket_lock;
 697
 698	struct journal		journal;
 699
 700#define CONGESTED_MAX		1024
 701	unsigned int		congested_last_us;
 702	atomic_t		congested;
 703
 704	/* The rest of this all shows up in sysfs */
 705	unsigned int		congested_read_threshold_us;
 706	unsigned int		congested_write_threshold_us;
 707
 708	struct time_stats	btree_gc_time;
 709	struct time_stats	btree_split_time;
 710	struct time_stats	btree_read_time;
 711
 712	atomic_long_t		cache_read_races;
 713	atomic_long_t		writeback_keys_done;
 714	atomic_long_t		writeback_keys_failed;
 715
 716	atomic_long_t		reclaim;
 717	atomic_long_t		reclaimed_journal_buckets;
 718	atomic_long_t		flush_write;
 719
 720	enum			{
 721		ON_ERROR_UNREGISTER,
 722		ON_ERROR_PANIC,
 723	}			on_error;
 724#define DEFAULT_IO_ERROR_LIMIT 8
 725	unsigned int		error_limit;
 726	unsigned int		error_decay;
 727
 728	unsigned short		journal_delay_ms;
 729	bool			expensive_debug_checks;
 730	unsigned int		verify:1;
 731	unsigned int		key_merging_disabled:1;
 732	unsigned int		gc_always_rewrite:1;
 733	unsigned int		shrinker_disabled:1;
 734	unsigned int		copy_gc_enabled:1;
 735	unsigned int		idle_max_writeback_rate_enabled:1;
 736
 737#define BUCKET_HASH_BITS	12
 738	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
 739};
 740
 741struct bbio {
 742	unsigned int		submit_time_us;
 743	union {
 744		struct bkey	key;
 745		uint64_t	_pad[3];
 746		/*
 747		 * We only need pad = 3 here because we only ever carry around a
 748		 * single pointer - i.e. the pointer we're doing io to/from.
 749		 */
 750	};
 751	struct bio		bio;
 752};
 753
 754#define BTREE_PRIO		USHRT_MAX
 755#define INITIAL_PRIO		32768U
 756
 757#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
 758#define btree_blocks(b)							\
 759	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 760
 761#define btree_default_blocks(c)						\
 762	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 763
 764#define bucket_bytes(ca)	((ca)->sb.bucket_size << 9)
 765#define block_bytes(ca)		((ca)->sb.block_size << 9)
 766
 767static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
 768{
 769	unsigned int n, max_pages;
 770
 771	max_pages = min_t(unsigned int,
 772			  __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
 773			  MAX_ORDER_NR_PAGES);
 774
 775	n = sb->bucket_size / PAGE_SECTORS;
 776	if (n > max_pages)
 777		n = max_pages;
 778
 779	return n;
 780}
 781
 782static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
 783{
 784	return meta_bucket_pages(sb) << PAGE_SHIFT;
 785}
 786
 787#define prios_per_bucket(ca)						\
 788	((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) /	\
 789	 sizeof(struct bucket_disk))
 790
 791#define prio_buckets(ca)						\
 792	DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
 793
 794static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 795{
 796	return s >> c->bucket_bits;
 797}
 798
 799static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 800{
 801	return ((sector_t) b) << c->bucket_bits;
 802}
 803
 804static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 805{
 806	return s & (c->cache->sb.bucket_size - 1);
 807}
 808
 809static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 810				   const struct bkey *k,
 811				   unsigned int ptr)
 812{
 813	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 814}
 815
 816static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 817					const struct bkey *k,
 818					unsigned int ptr)
 819{
 820	return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
 821}
 822
 823static inline uint8_t gen_after(uint8_t a, uint8_t b)
 824{
 825	uint8_t r = a - b;
 826
 827	return r > 128U ? 0 : r;
 828}
 829
 830static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 831				unsigned int i)
 832{
 833	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 834}
 835
 836static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 837				 unsigned int i)
 838{
 839	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
 840}
 841
 842/* Btree key macros */
 843
 844/*
 845 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 846 * jset: The checksum is _always_ the first 8 bytes of these structs
 847 */
 848#define csum_set(i)							\
 849	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
 850		  ((void *) bset_bkey_last(i)) -			\
 851		  (((void *) (i)) + sizeof(uint64_t)))
 852
 853/* Error handling macros */
 854
 855#define btree_bug(b, ...)						\
 856do {									\
 857	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
 858		dump_stack();						\
 859} while (0)
 860
 861#define cache_bug(c, ...)						\
 862do {									\
 863	if (bch_cache_set_error(c, __VA_ARGS__))			\
 864		dump_stack();						\
 865} while (0)
 866
 867#define btree_bug_on(cond, b, ...)					\
 868do {									\
 869	if (cond)							\
 870		btree_bug(b, __VA_ARGS__);				\
 871} while (0)
 872
 873#define cache_bug_on(cond, c, ...)					\
 874do {									\
 875	if (cond)							\
 876		cache_bug(c, __VA_ARGS__);				\
 877} while (0)
 878
 879#define cache_set_err_on(cond, c, ...)					\
 880do {									\
 881	if (cond)							\
 882		bch_cache_set_error(c, __VA_ARGS__);			\
 883} while (0)
 884
 885/* Looping macros */
 886
 887#define for_each_bucket(b, ca)						\
 888	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
 889	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 890
 891static inline void cached_dev_put(struct cached_dev *dc)
 892{
 893	if (refcount_dec_and_test(&dc->count))
 894		schedule_work(&dc->detach);
 895}
 896
 897static inline bool cached_dev_get(struct cached_dev *dc)
 898{
 899	if (!refcount_inc_not_zero(&dc->count))
 900		return false;
 901
 902	/* Paired with the mb in cached_dev_attach */
 903	smp_mb__after_atomic();
 904	return true;
 905}
 906
 907/*
 908 * bucket_gc_gen() returns the difference between the bucket's current gen and
 909 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 910 */
 911
 912static inline uint8_t bucket_gc_gen(struct bucket *b)
 913{
 914	return b->gen - b->last_gc;
 915}
 916
 917#define BUCKET_GC_GEN_MAX	96U
 918
 919#define kobj_attribute_write(n, fn)					\
 920	static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
 921
 922#define kobj_attribute_rw(n, show, store)				\
 923	static struct kobj_attribute ksysfs_##n =			\
 924		__ATTR(n, 0600, show, store)
 925
 926static inline void wake_up_allocators(struct cache_set *c)
 927{
 928	struct cache *ca = c->cache;
 929
 930	wake_up_process(ca->alloc_thread);
 931}
 932
 933static inline void closure_bio_submit(struct cache_set *c,
 934				      struct bio *bio,
 935				      struct closure *cl)
 936{
 937	closure_get(cl);
 938	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
 939		bio->bi_status = BLK_STS_IOERR;
 940		bio_endio(bio);
 941		return;
 942	}
 943	submit_bio_noacct(bio);
 944}
 945
 946/*
 947 * Prevent the kthread exits directly, and make sure when kthread_stop()
 948 * is called to stop a kthread, it is still alive. If a kthread might be
 949 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
 950 * necessary before the kthread returns.
 951 */
 952static inline void wait_for_kthread_stop(void)
 953{
 954	while (!kthread_should_stop()) {
 955		set_current_state(TASK_INTERRUPTIBLE);
 956		schedule();
 957	}
 958}
 959
 960/* Forward declarations */
 961
 962void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 963void bch_count_io_errors(struct cache *ca, blk_status_t error,
 964			 int is_read, const char *m);
 965void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 966			      blk_status_t error, const char *m);
 967void bch_bbio_endio(struct cache_set *c, struct bio *bio,
 968		    blk_status_t error, const char *m);
 969void bch_bbio_free(struct bio *bio, struct cache_set *c);
 970struct bio *bch_bbio_alloc(struct cache_set *c);
 971
 972void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
 973void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 974		     struct bkey *k, unsigned int ptr);
 975
 976uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
 977void bch_rescale_priorities(struct cache_set *c, int sectors);
 978
 979bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
 980void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
 981
 982void __bch_bucket_free(struct cache *ca, struct bucket *b);
 983void bch_bucket_free(struct cache_set *c, struct bkey *k);
 984
 985long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
 986int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 987			   struct bkey *k, bool wait);
 988int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 989			 struct bkey *k, bool wait);
 990bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
 991		       unsigned int sectors, unsigned int write_point,
 992		       unsigned int write_prio, bool wait);
 993bool bch_cached_dev_error(struct cached_dev *dc);
 994
 995__printf(2, 3)
 996bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
 997
 998int bch_prio_write(struct cache *ca, bool wait);
 999void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
1000
1001extern struct workqueue_struct *bcache_wq;
1002extern struct workqueue_struct *bch_journal_wq;
1003extern struct workqueue_struct *bch_flush_wq;
1004extern struct mutex bch_register_lock;
1005extern struct list_head bch_cache_sets;
1006
1007extern struct kobj_type bch_cached_dev_ktype;
1008extern struct kobj_type bch_flash_dev_ktype;
1009extern struct kobj_type bch_cache_set_ktype;
1010extern struct kobj_type bch_cache_set_internal_ktype;
1011extern struct kobj_type bch_cache_ktype;
1012
1013void bch_cached_dev_release(struct kobject *kobj);
1014void bch_flash_dev_release(struct kobject *kobj);
1015void bch_cache_set_release(struct kobject *kobj);
1016void bch_cache_release(struct kobject *kobj);
1017
1018int bch_uuid_write(struct cache_set *c);
1019void bcache_write_super(struct cache_set *c);
1020
1021int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1022
1023int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1024			  uint8_t *set_uuid);
1025void bch_cached_dev_detach(struct cached_dev *dc);
1026int bch_cached_dev_run(struct cached_dev *dc);
1027void bcache_device_stop(struct bcache_device *d);
1028
1029void bch_cache_set_unregister(struct cache_set *c);
1030void bch_cache_set_stop(struct cache_set *c);
1031
1032struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1033void bch_btree_cache_free(struct cache_set *c);
1034int bch_btree_cache_alloc(struct cache_set *c);
1035void bch_moving_init_cache_set(struct cache_set *c);
1036int bch_open_buckets_alloc(struct cache_set *c);
1037void bch_open_buckets_free(struct cache_set *c);
1038
1039int bch_cache_allocator_start(struct cache *ca);
1040
1041void bch_debug_exit(void);
1042void bch_debug_init(void);
1043void bch_request_exit(void);
1044int bch_request_init(void);
1045void bch_btree_exit(void);
1046int bch_btree_init(void);
1047
1048#endif /* _BCACHE_H */