Linux Audio

Check our new training course

Loading...
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BCACHE_H
   3#define _BCACHE_H
   4
   5/*
   6 * SOME HIGH LEVEL CODE DOCUMENTATION:
   7 *
   8 * Bcache mostly works with cache sets, cache devices, and backing devices.
   9 *
  10 * Support for multiple cache devices hasn't quite been finished off yet, but
  11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  12 * like a md raid array and its component devices. Most of the code doesn't care
  13 * about individual cache devices, the main abstraction is the cache set.
  14 *
  15 * Multiple cache devices is intended to give us the ability to mirror dirty
  16 * cached data and metadata, without mirroring clean cached data.
  17 *
  18 * Backing devices are different, in that they have a lifetime independent of a
  19 * cache set. When you register a newly formatted backing device it'll come up
  20 * in passthrough mode, and then you can attach and detach a backing device from
  21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  22 * invalidates any cached data for that backing device.
  23 *
  24 * A cache set can have multiple (many) backing devices attached to it.
  25 *
  26 * There's also flash only volumes - this is the reason for the distinction
  27 * between struct cached_dev and struct bcache_device. A flash only volume
  28 * works much like a bcache device that has a backing device, except the
  29 * "cached" data is always dirty. The end result is that we get thin
  30 * provisioning with very little additional code.
  31 *
  32 * Flash only volumes work but they're not production ready because the moving
  33 * garbage collector needs more work. More on that later.
  34 *
  35 * BUCKETS/ALLOCATION:
  36 *
  37 * Bcache is primarily designed for caching, which means that in normal
  38 * operation all of our available space will be allocated. Thus, we need an
  39 * efficient way of deleting things from the cache so we can write new things to
  40 * it.
  41 *
  42 * To do this, we first divide the cache device up into buckets. A bucket is the
  43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  44 * works efficiently.
  45 *
  46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  47 * it. The gens and priorities for all the buckets are stored contiguously and
  48 * packed on disk (in a linked list of buckets - aside from the superblock, all
  49 * of bcache's metadata is stored in buckets).
  50 *
  51 * The priority is used to implement an LRU. We reset a bucket's priority when
  52 * we allocate it or on cache it, and every so often we decrement the priority
  53 * of each bucket. It could be used to implement something more sophisticated,
  54 * if anyone ever gets around to it.
  55 *
  56 * The generation is used for invalidating buckets. Each pointer also has an 8
  57 * bit generation embedded in it; for a pointer to be considered valid, its gen
  58 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  59 * we have to do is increment its gen (and write its new gen to disk; we batch
  60 * this up).
  61 *
  62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  63 * contain metadata (including btree nodes).
  64 *
  65 * THE BTREE:
  66 *
  67 * Bcache is in large part design around the btree.
  68 *
  69 * At a high level, the btree is just an index of key -> ptr tuples.
  70 *
  71 * Keys represent extents, and thus have a size field. Keys also have a variable
  72 * number of pointers attached to them (potentially zero, which is handy for
  73 * invalidating the cache).
  74 *
  75 * The key itself is an inode:offset pair. The inode number corresponds to a
  76 * backing device or a flash only volume. The offset is the ending offset of the
  77 * extent within the inode - not the starting offset; this makes lookups
  78 * slightly more convenient.
  79 *
  80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  81 * generation number. More on the gen later.
  82 *
  83 * Index lookups are not fully abstracted - cache lookups in particular are
  84 * still somewhat mixed in with the btree code, but things are headed in that
  85 * direction.
  86 *
  87 * Updates are fairly well abstracted, though. There are two different ways of
  88 * updating the btree; insert and replace.
  89 *
  90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  91 * overwriting (possibly only partially) any extents they overlap with. This is
  92 * used to update the index after a write.
  93 *
  94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  95 * overwriting a key that matches another given key. This is used for inserting
  96 * data into the cache after a cache miss, and for background writeback, and for
  97 * the moving garbage collector.
  98 *
  99 * There is no "delete" operation; deleting things from the index is
 100 * accomplished by either by invalidating pointers (by incrementing a bucket's
 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 102 * previously present at that location in the index.
 103 *
 104 * This means that there are always stale/invalid keys in the btree. They're
 105 * filtered out by the code that iterates through a btree node, and removed when
 106 * a btree node is rewritten.
 107 *
 108 * BTREE NODES:
 109 *
 110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
 111 * free smaller than a bucket - so, that's how big our btree nodes are.
 112 *
 113 * (If buckets are really big we'll only use part of the bucket for a btree node
 114 * - no less than 1/4th - but a bucket still contains no more than a single
 115 * btree node. I'd actually like to change this, but for now we rely on the
 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 117 *
 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 119 * btree implementation.
 120 *
 121 * The way this is solved is that btree nodes are internally log structured; we
 122 * can append new keys to an existing btree node without rewriting it. This
 123 * means each set of keys we write is sorted, but the node is not.
 124 *
 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 126 * be expensive, and we have to distinguish between the keys we have written and
 127 * the keys we haven't. So to do a lookup in a btree node, we have to search
 128 * each sorted set. But we do merge written sets together lazily, so the cost of
 129 * these extra searches is quite low (normally most of the keys in a btree node
 130 * will be in one big set, and then there'll be one or two sets that are much
 131 * smaller).
 132 *
 133 * This log structure makes bcache's btree more of a hybrid between a
 134 * conventional btree and a compacting data structure, with some of the
 135 * advantages of both.
 136 *
 137 * GARBAGE COLLECTION:
 138 *
 139 * We can't just invalidate any bucket - it might contain dirty data or
 140 * metadata. If it once contained dirty data, other writes might overwrite it
 141 * later, leaving no valid pointers into that bucket in the index.
 142 *
 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 144 * It also counts how much valid data it each bucket currently contains, so that
 145 * allocation can reuse buckets sooner when they've been mostly overwritten.
 146 *
 147 * It also does some things that are really internal to the btree
 148 * implementation. If a btree node contains pointers that are stale by more than
 149 * some threshold, it rewrites the btree node to avoid the bucket's generation
 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 151 *
 152 * THE JOURNAL:
 153 *
 154 * Bcache's journal is not necessary for consistency; we always strictly
 155 * order metadata writes so that the btree and everything else is consistent on
 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 157 * caching (with recovery from unclean shutdown) before journalling was
 158 * implemented.
 159 *
 160 * Rather, the journal is purely a performance optimization; we can't complete a
 161 * write until we've updated the index on disk, otherwise the cache would be
 162 * inconsistent in the event of an unclean shutdown. This means that without the
 163 * journal, on random write workloads we constantly have to update all the leaf
 164 * nodes in the btree, and those writes will be mostly empty (appending at most
 165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 166 * and it puts more strain on the various btree resorting/compacting code.
 167 *
 168 * The journal is just a log of keys we've inserted; on startup we just reinsert
 169 * all the keys in the open journal entries. That means that when we're updating
 170 * a node in the btree, we can wait until a 4k block of keys fills up before
 171 * writing them out.
 172 *
 173 * For simplicity, we only journal updates to leaf nodes; updates to parent
 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 175 * the complexity to deal with journalling them (in particular, journal replay)
 176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 177 */
 178
 179#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 180
 181#include <linux/bcache.h>
 182#include <linux/bio.h>
 183#include <linux/kobject.h>
 184#include <linux/list.h>
 185#include <linux/mutex.h>
 186#include <linux/rbtree.h>
 187#include <linux/rwsem.h>
 188#include <linux/refcount.h>
 189#include <linux/types.h>
 190#include <linux/workqueue.h>
 191#include <linux/kthread.h>
 192
 193#include "bset.h"
 194#include "util.h"
 195#include "closure.h"
 196
 197struct bucket {
 198	atomic_t	pin;
 199	uint16_t	prio;
 200	uint8_t		gen;
 201	uint8_t		last_gc; /* Most out of date gen in the btree */
 202	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
 203};
 204
 205/*
 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 207 * as multiple threads touch struct bucket without locking
 208 */
 209
 210BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
 211#define GC_MARK_RECLAIMABLE	1
 212#define GC_MARK_DIRTY		2
 213#define GC_MARK_METADATA	3
 214#define GC_SECTORS_USED_SIZE	13
 215#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
 216BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 217BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 218
 219#include "journal.h"
 220#include "stats.h"
 221struct search;
 222struct btree;
 223struct keybuf;
 224
 225struct keybuf_key {
 226	struct rb_node		node;
 227	BKEY_PADDED(key);
 228	void			*private;
 229};
 230
 231struct keybuf {
 232	struct bkey		last_scanned;
 233	spinlock_t		lock;
 234
 235	/*
 236	 * Beginning and end of range in rb tree - so that we can skip taking
 237	 * lock and checking the rb tree when we need to check for overlapping
 238	 * keys.
 239	 */
 240	struct bkey		start;
 241	struct bkey		end;
 242
 243	struct rb_root		keys;
 244
 245#define KEYBUF_NR		500
 246	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 247};
 248
 249struct bcache_device {
 250	struct closure		cl;
 251
 252	struct kobject		kobj;
 253
 254	struct cache_set	*c;
 255	unsigned		id;
 256#define BCACHEDEVNAME_SIZE	12
 257	char			name[BCACHEDEVNAME_SIZE];
 258
 259	struct gendisk		*disk;
 260
 261	unsigned long		flags;
 262#define BCACHE_DEV_CLOSING		0
 263#define BCACHE_DEV_DETACHING		1
 264#define BCACHE_DEV_UNLINK_DONE		2
 265#define BCACHE_DEV_WB_RUNNING		3
 266#define BCACHE_DEV_RATE_DW_RUNNING	4
 267	unsigned		nr_stripes;
 268	unsigned		stripe_size;
 269	atomic_t		*stripe_sectors_dirty;
 270	unsigned long		*full_dirty_stripes;
 271
 272	struct bio_set		*bio_split;
 273
 274	unsigned		data_csum:1;
 275
 276	int (*cache_miss)(struct btree *, struct search *,
 277			  struct bio *, unsigned);
 278	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
 
 279};
 280
 281struct io {
 282	/* Used to track sequential IO so it can be skipped */
 283	struct hlist_node	hash;
 284	struct list_head	lru;
 285
 286	unsigned long		jiffies;
 287	unsigned		sequential;
 288	sector_t		last;
 289};
 290
 291enum stop_on_failure {
 292	BCH_CACHED_DEV_STOP_AUTO = 0,
 293	BCH_CACHED_DEV_STOP_ALWAYS,
 294	BCH_CACHED_DEV_STOP_MODE_MAX,
 295};
 296
 297struct cached_dev {
 298	struct list_head	list;
 299	struct bcache_device	disk;
 300	struct block_device	*bdev;
 301
 302	struct cache_sb		sb;
 
 303	struct bio		sb_bio;
 304	struct bio_vec		sb_bv[1];
 305	struct closure		sb_write;
 306	struct semaphore	sb_write_mutex;
 307
 308	/* Refcount on the cache set. Always nonzero when we're caching. */
 309	refcount_t		count;
 310	struct work_struct	detach;
 311
 312	/*
 313	 * Device might not be running if it's dirty and the cache set hasn't
 314	 * showed up yet.
 315	 */
 316	atomic_t		running;
 317
 318	/*
 319	 * Writes take a shared lock from start to finish; scanning for dirty
 320	 * data to refill the rb tree requires an exclusive lock.
 321	 */
 322	struct rw_semaphore	writeback_lock;
 323
 324	/*
 325	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 326	 * data in the cache. Protected by writeback_lock; must have an
 327	 * shared lock to set and exclusive lock to clear.
 328	 */
 329	atomic_t		has_dirty;
 330
 331	/*
 332	 * Set to zero by things that touch the backing volume-- except
 333	 * writeback.  Incremented by writeback.  Used to determine when to
 334	 * accelerate idle writeback.
 335	 */
 336	atomic_t		backing_idle;
 337
 338	struct bch_ratelimit	writeback_rate;
 339	struct delayed_work	writeback_rate_update;
 340
 341	/* Limit number of writeback bios in flight */
 342	struct semaphore	in_flight;
 343	struct task_struct	*writeback_thread;
 344	struct workqueue_struct	*writeback_write_wq;
 345
 346	struct keybuf		writeback_keys;
 347
 
 348	/*
 349	 * Order the write-half of writeback operations strongly in dispatch
 350	 * order.  (Maintain LBA order; don't allow reads completing out of
 351	 * order to re-order the writes...)
 352	 */
 353	struct closure_waitlist writeback_ordering_wait;
 354	atomic_t		writeback_sequence_next;
 355
 356	/* For tracking sequential IO */
 357#define RECENT_IO_BITS	7
 358#define RECENT_IO	(1 << RECENT_IO_BITS)
 359	struct io		io[RECENT_IO];
 360	struct hlist_head	io_hash[RECENT_IO + 1];
 361	struct list_head	io_lru;
 362	spinlock_t		io_lock;
 363
 364	struct cache_accounting	accounting;
 365
 366	/* The rest of this all shows up in sysfs */
 367	unsigned		sequential_cutoff;
 368	unsigned		readahead;
 369
 370	unsigned		io_disable:1;
 371	unsigned		verify:1;
 372	unsigned		bypass_torture_test:1;
 373
 374	unsigned		partial_stripes_expensive:1;
 375	unsigned		writeback_metadata:1;
 376	unsigned		writeback_running:1;
 
 377	unsigned char		writeback_percent;
 378	unsigned		writeback_delay;
 379
 380	uint64_t		writeback_rate_target;
 381	int64_t			writeback_rate_proportional;
 382	int64_t			writeback_rate_integral;
 383	int64_t			writeback_rate_integral_scaled;
 384	int32_t			writeback_rate_change;
 385
 386	unsigned		writeback_rate_update_seconds;
 387	unsigned		writeback_rate_i_term_inverse;
 388	unsigned		writeback_rate_p_term_inverse;
 389	unsigned		writeback_rate_minimum;
 
 
 
 390
 391	enum stop_on_failure	stop_when_cache_set_failed;
 392#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
 393	atomic_t		io_errors;
 394	unsigned		error_limit;
 
 395
 396	char			backing_dev_name[BDEVNAME_SIZE];
 397};
 398
 399enum alloc_reserve {
 400	RESERVE_BTREE,
 401	RESERVE_PRIO,
 402	RESERVE_MOVINGGC,
 403	RESERVE_NONE,
 404	RESERVE_NR,
 405};
 406
 407struct cache {
 408	struct cache_set	*set;
 409	struct cache_sb		sb;
 
 410	struct bio		sb_bio;
 411	struct bio_vec		sb_bv[1];
 412
 413	struct kobject		kobj;
 414	struct block_device	*bdev;
 415
 416	struct task_struct	*alloc_thread;
 417
 418	struct closure		prio;
 419	struct prio_set		*disk_buckets;
 420
 421	/*
 422	 * When allocating new buckets, prio_write() gets first dibs - since we
 423	 * may not be allocate at all without writing priorities and gens.
 424	 * prio_buckets[] contains the last buckets we wrote priorities to (so
 425	 * gc can mark them as metadata), prio_next[] contains the buckets
 426	 * allocated for the next prio write.
 427	 */
 428	uint64_t		*prio_buckets;
 429	uint64_t		*prio_last_buckets;
 430
 431	/*
 432	 * free: Buckets that are ready to be used
 433	 *
 434	 * free_inc: Incoming buckets - these are buckets that currently have
 435	 * cached data in them, and we can't reuse them until after we write
 436	 * their new gen to disk. After prio_write() finishes writing the new
 437	 * gens/prios, they'll be moved to the free list (and possibly discarded
 438	 * in the process)
 439	 */
 440	DECLARE_FIFO(long, free)[RESERVE_NR];
 441	DECLARE_FIFO(long, free_inc);
 442
 443	size_t			fifo_last_bucket;
 444
 445	/* Allocation stuff: */
 446	struct bucket		*buckets;
 447
 448	DECLARE_HEAP(struct bucket *, heap);
 449
 450	/*
 451	 * If nonzero, we know we aren't going to find any buckets to invalidate
 452	 * until a gc finishes - otherwise we could pointlessly burn a ton of
 453	 * cpu
 454	 */
 455	unsigned		invalidate_needs_gc;
 456
 457	bool			discard; /* Get rid of? */
 458
 459	struct journal_device	journal;
 460
 461	/* The rest of this all shows up in sysfs */
 462#define IO_ERROR_SHIFT		20
 463	atomic_t		io_errors;
 464	atomic_t		io_count;
 465
 466	atomic_long_t		meta_sectors_written;
 467	atomic_long_t		btree_sectors_written;
 468	atomic_long_t		sectors_written;
 469
 470	char			cache_dev_name[BDEVNAME_SIZE];
 471};
 472
 473struct gc_stat {
 474	size_t			nodes;
 
 475	size_t			key_bytes;
 476
 477	size_t			nkeys;
 478	uint64_t		data;	/* sectors */
 479	unsigned		in_use; /* percent */
 480};
 481
 482/*
 483 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 484 *
 485 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 486 * all the backing devices first (their cached data gets invalidated, and they
 487 * won't automatically reattach).
 488 *
 489 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 490 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 491 * flushing dirty data).
 492 *
 493 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 494 * replay is complete.
 495 *
 496 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
 497 * external and internal I/O should be denied when this flag is set.
 498 *
 499 */
 500#define CACHE_SET_UNREGISTERING		0
 501#define	CACHE_SET_STOPPING		1
 502#define	CACHE_SET_RUNNING		2
 503#define CACHE_SET_IO_DISABLE		3
 504
 505struct cache_set {
 506	struct closure		cl;
 507
 508	struct list_head	list;
 509	struct kobject		kobj;
 510	struct kobject		internal;
 511	struct dentry		*debug;
 512	struct cache_accounting accounting;
 513
 514	unsigned long		flags;
 
 
 515
 516	struct cache_sb		sb;
 517
 518	struct cache		*cache[MAX_CACHES_PER_SET];
 519	struct cache		*cache_by_alloc[MAX_CACHES_PER_SET];
 520	int			caches_loaded;
 521
 522	struct bcache_device	**devices;
 523	unsigned		devices_max_used;
 
 524	struct list_head	cached_devs;
 525	uint64_t		cached_dev_sectors;
 
 526	struct closure		caching;
 527
 528	struct closure		sb_write;
 529	struct semaphore	sb_write_mutex;
 530
 531	mempool_t		*search;
 532	mempool_t		*bio_meta;
 533	struct bio_set		*bio_split;
 534
 535	/* For the btree cache */
 536	struct shrinker		shrink;
 537
 538	/* For the btree cache and anything allocation related */
 539	struct mutex		bucket_lock;
 540
 541	/* log2(bucket_size), in sectors */
 542	unsigned short		bucket_bits;
 543
 544	/* log2(block_size), in sectors */
 545	unsigned short		block_bits;
 546
 547	/*
 548	 * Default number of pages for a new btree node - may be less than a
 549	 * full bucket
 550	 */
 551	unsigned		btree_pages;
 552
 553	/*
 554	 * Lists of struct btrees; lru is the list for structs that have memory
 555	 * allocated for actual btree node, freed is for structs that do not.
 556	 *
 557	 * We never free a struct btree, except on shutdown - we just put it on
 558	 * the btree_cache_freed list and reuse it later. This simplifies the
 559	 * code, and it doesn't cost us much memory as the memory usage is
 560	 * dominated by buffers that hold the actual btree node data and those
 561	 * can be freed - and the number of struct btrees allocated is
 562	 * effectively bounded.
 563	 *
 564	 * btree_cache_freeable effectively is a small cache - we use it because
 565	 * high order page allocations can be rather expensive, and it's quite
 566	 * common to delete and allocate btree nodes in quick succession. It
 567	 * should never grow past ~2-3 nodes in practice.
 568	 */
 569	struct list_head	btree_cache;
 570	struct list_head	btree_cache_freeable;
 571	struct list_head	btree_cache_freed;
 572
 573	/* Number of elements in btree_cache + btree_cache_freeable lists */
 574	unsigned		btree_cache_used;
 575
 576	/*
 577	 * If we need to allocate memory for a new btree node and that
 578	 * allocation fails, we can cannibalize another node in the btree cache
 579	 * to satisfy the allocation - lock to guarantee only one thread does
 580	 * this at a time:
 581	 */
 582	wait_queue_head_t	btree_cache_wait;
 583	struct task_struct	*btree_cache_alloc_lock;
 
 584
 585	/*
 586	 * When we free a btree node, we increment the gen of the bucket the
 587	 * node is in - but we can't rewrite the prios and gens until we
 588	 * finished whatever it is we were doing, otherwise after a crash the
 589	 * btree node would be freed but for say a split, we might not have the
 590	 * pointers to the new nodes inserted into the btree yet.
 591	 *
 592	 * This is a refcount that blocks prio_write() until the new keys are
 593	 * written.
 594	 */
 595	atomic_t		prio_blocked;
 596	wait_queue_head_t	bucket_wait;
 597
 598	/*
 599	 * For any bio we don't skip we subtract the number of sectors from
 600	 * rescale; when it hits 0 we rescale all the bucket priorities.
 601	 */
 602	atomic_t		rescale;
 603	/*
 
 
 
 
 604	 * When we invalidate buckets, we use both the priority and the amount
 605	 * of good data to determine which buckets to reuse first - to weight
 606	 * those together consistently we keep track of the smallest nonzero
 607	 * priority of any bucket.
 608	 */
 609	uint16_t		min_prio;
 610
 611	/*
 612	 * max(gen - last_gc) for all buckets. When it gets too big we have to gc
 613	 * to keep gens from wrapping around.
 614	 */
 615	uint8_t			need_gc;
 616	struct gc_stat		gc_stats;
 617	size_t			nbuckets;
 618	size_t			avail_nbuckets;
 619
 620	struct task_struct	*gc_thread;
 621	/* Where in the btree gc currently is */
 622	struct bkey		gc_done;
 623
 624	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625	 * The allocation code needs gc_mark in struct bucket to be correct, but
 626	 * it's not while a gc is in progress. Protected by bucket_lock.
 627	 */
 628	int			gc_mark_valid;
 629
 630	/* Counts how many sectors bio_insert has added to the cache */
 631	atomic_t		sectors_to_gc;
 632	wait_queue_head_t	gc_wait;
 633
 634	struct keybuf		moving_gc_keys;
 635	/* Number of moving GC bios in flight */
 636	struct semaphore	moving_in_flight;
 637
 638	struct workqueue_struct	*moving_gc_wq;
 639
 640	struct btree		*root;
 641
 642#ifdef CONFIG_BCACHE_DEBUG
 643	struct btree		*verify_data;
 644	struct bset		*verify_ondisk;
 645	struct mutex		verify_lock;
 646#endif
 647
 648	unsigned		nr_uuids;
 
 649	struct uuid_entry	*uuids;
 650	BKEY_PADDED(uuid_bucket);
 651	struct closure		uuid_write;
 652	struct semaphore	uuid_write_mutex;
 653
 654	/*
 655	 * A btree node on disk could have too many bsets for an iterator to fit
 656	 * on the stack - have to dynamically allocate them
 
 
 
 
 657	 */
 658	mempool_t		*fill_iter;
 659
 660	struct bset_sort_state	sort;
 661
 662	/* List of buckets we're currently writing data to */
 663	struct list_head	data_buckets;
 664	spinlock_t		data_bucket_lock;
 665
 666	struct journal		journal;
 667
 668#define CONGESTED_MAX		1024
 669	unsigned		congested_last_us;
 670	atomic_t		congested;
 671
 672	/* The rest of this all shows up in sysfs */
 673	unsigned		congested_read_threshold_us;
 674	unsigned		congested_write_threshold_us;
 675
 676	struct time_stats	btree_gc_time;
 677	struct time_stats	btree_split_time;
 678	struct time_stats	btree_read_time;
 679
 680	atomic_long_t		cache_read_races;
 681	atomic_long_t		writeback_keys_done;
 682	atomic_long_t		writeback_keys_failed;
 683
 684	atomic_long_t		reclaim;
 
 685	atomic_long_t		flush_write;
 686	atomic_long_t		retry_flush_write;
 687
 688	enum			{
 689		ON_ERROR_UNREGISTER,
 690		ON_ERROR_PANIC,
 691	}			on_error;
 692#define DEFAULT_IO_ERROR_LIMIT 8
 693	unsigned		error_limit;
 694	unsigned		error_decay;
 695
 696	unsigned short		journal_delay_ms;
 697	bool			expensive_debug_checks;
 698	unsigned		verify:1;
 699	unsigned		key_merging_disabled:1;
 700	unsigned		gc_always_rewrite:1;
 701	unsigned		shrinker_disabled:1;
 702	unsigned		copy_gc_enabled:1;
 
 703
 704#define BUCKET_HASH_BITS	12
 705	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
 706
 707	DECLARE_HEAP(struct btree *, flush_btree);
 708};
 709
 710struct bbio {
 711	unsigned		submit_time_us;
 712	union {
 713		struct bkey	key;
 714		uint64_t	_pad[3];
 715		/*
 716		 * We only need pad = 3 here because we only ever carry around a
 717		 * single pointer - i.e. the pointer we're doing io to/from.
 718		 */
 719	};
 720	struct bio		bio;
 721};
 722
 723#define BTREE_PRIO		USHRT_MAX
 724#define INITIAL_PRIO		32768U
 725
 726#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
 727#define btree_blocks(b)							\
 728	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 729
 730#define btree_default_blocks(c)						\
 731	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 
 
 
 
 
 
 
 732
 733#define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
 734#define bucket_bytes(c)		((c)->sb.bucket_size << 9)
 735#define block_bytes(c)		((c)->sb.block_size << 9)
 736
 737#define prios_per_bucket(c)				\
 738	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
 
 
 
 
 
 
 
 
 
 
 
 
 739	 sizeof(struct bucket_disk))
 740#define prio_buckets(c)					\
 741	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
 
 742
 743static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 744{
 745	return s >> c->bucket_bits;
 746}
 747
 748static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 749{
 750	return ((sector_t) b) << c->bucket_bits;
 751}
 752
 753static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 754{
 755	return s & (c->sb.bucket_size - 1);
 756}
 757
 758static inline struct cache *PTR_CACHE(struct cache_set *c,
 759				      const struct bkey *k,
 760				      unsigned ptr)
 761{
 762	return c->cache[PTR_DEV(k, ptr)];
 763}
 764
 765static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 766				   const struct bkey *k,
 767				   unsigned ptr)
 768{
 769	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 770}
 771
 772static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 773					const struct bkey *k,
 774					unsigned ptr)
 775{
 776	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 777}
 778
 779static inline uint8_t gen_after(uint8_t a, uint8_t b)
 780{
 781	uint8_t r = a - b;
 
 782	return r > 128U ? 0 : r;
 783}
 784
 785static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 786				unsigned i)
 787{
 788	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 789}
 790
 791static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 792				 unsigned i)
 793{
 794	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 795}
 796
 797/* Btree key macros */
 798
 799/*
 800 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 801 * jset: The checksum is _always_ the first 8 bytes of these structs
 802 */
 803#define csum_set(i)							\
 804	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
 805		  ((void *) bset_bkey_last(i)) -			\
 806		  (((void *) (i)) + sizeof(uint64_t)))
 807
 808/* Error handling macros */
 809
 810#define btree_bug(b, ...)						\
 811do {									\
 812	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
 813		dump_stack();						\
 814} while (0)
 815
 816#define cache_bug(c, ...)						\
 817do {									\
 818	if (bch_cache_set_error(c, __VA_ARGS__))			\
 819		dump_stack();						\
 820} while (0)
 821
 822#define btree_bug_on(cond, b, ...)					\
 823do {									\
 824	if (cond)							\
 825		btree_bug(b, __VA_ARGS__);				\
 826} while (0)
 827
 828#define cache_bug_on(cond, c, ...)					\
 829do {									\
 830	if (cond)							\
 831		cache_bug(c, __VA_ARGS__);				\
 832} while (0)
 833
 834#define cache_set_err_on(cond, c, ...)					\
 835do {									\
 836	if (cond)							\
 837		bch_cache_set_error(c, __VA_ARGS__);			\
 838} while (0)
 839
 840/* Looping macros */
 841
 842#define for_each_cache(ca, cs, iter)					\
 843	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
 844
 845#define for_each_bucket(b, ca)						\
 846	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
 847	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 848
 849static inline void cached_dev_put(struct cached_dev *dc)
 850{
 851	if (refcount_dec_and_test(&dc->count))
 852		schedule_work(&dc->detach);
 853}
 854
 855static inline bool cached_dev_get(struct cached_dev *dc)
 856{
 857	if (!refcount_inc_not_zero(&dc->count))
 858		return false;
 859
 860	/* Paired with the mb in cached_dev_attach */
 861	smp_mb__after_atomic();
 862	return true;
 863}
 864
 865/*
 866 * bucket_gc_gen() returns the difference between the bucket's current gen and
 867 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 868 */
 869
 870static inline uint8_t bucket_gc_gen(struct bucket *b)
 871{
 872	return b->gen - b->last_gc;
 873}
 874
 875#define BUCKET_GC_GEN_MAX	96U
 876
 877#define kobj_attribute_write(n, fn)					\
 878	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
 879
 880#define kobj_attribute_rw(n, show, store)				\
 881	static struct kobj_attribute ksysfs_##n =			\
 882		__ATTR(n, S_IWUSR|S_IRUSR, show, store)
 883
 884static inline void wake_up_allocators(struct cache_set *c)
 885{
 886	struct cache *ca;
 887	unsigned i;
 888
 889	for_each_cache(ca, c, i)
 890		wake_up_process(ca->alloc_thread);
 891}
 892
 893static inline void closure_bio_submit(struct cache_set *c,
 894				      struct bio *bio,
 895				      struct closure *cl)
 896{
 897	closure_get(cl);
 898	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
 899		bio->bi_status = BLK_STS_IOERR;
 900		bio_endio(bio);
 901		return;
 902	}
 903	generic_make_request(bio);
 904}
 905
 906/*
 907 * Prevent the kthread exits directly, and make sure when kthread_stop()
 908 * is called to stop a kthread, it is still alive. If a kthread might be
 909 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
 910 * necessary before the kthread returns.
 911 */
 912static inline void wait_for_kthread_stop(void)
 913{
 914	while (!kthread_should_stop()) {
 915		set_current_state(TASK_INTERRUPTIBLE);
 916		schedule();
 917	}
 918}
 919
 920/* Forward declarations */
 921
 922void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 923void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
 924void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
 925			      blk_status_t, const char *);
 926void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
 927		const char *);
 928void bch_bbio_free(struct bio *, struct cache_set *);
 929struct bio *bch_bbio_alloc(struct cache_set *);
 930
 931void __bch_submit_bbio(struct bio *, struct cache_set *);
 932void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
 933
 934uint8_t bch_inc_gen(struct cache *, struct bucket *);
 935void bch_rescale_priorities(struct cache_set *, int);
 936
 937bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
 938void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
 939
 940void __bch_bucket_free(struct cache *, struct bucket *);
 941void bch_bucket_free(struct cache_set *, struct bkey *);
 942
 943long bch_bucket_alloc(struct cache *, unsigned, bool);
 944int __bch_bucket_alloc_set(struct cache_set *, unsigned,
 945			   struct bkey *, int, bool);
 946int bch_bucket_alloc_set(struct cache_set *, unsigned,
 947			 struct bkey *, int, bool);
 948bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
 949		       unsigned, unsigned, bool);
 
 
 
 950bool bch_cached_dev_error(struct cached_dev *dc);
 951
 952__printf(2, 3)
 953bool bch_cache_set_error(struct cache_set *, const char *, ...);
 954
 955void bch_prio_write(struct cache *);
 956void bch_write_bdev_super(struct cached_dev *, struct closure *);
 957
 958extern struct workqueue_struct *bcache_wq;
 959extern const char * const bch_cache_modes[];
 960extern const char * const bch_stop_on_failure_modes[];
 961extern struct mutex bch_register_lock;
 962extern struct list_head bch_cache_sets;
 963
 964extern struct kobj_type bch_cached_dev_ktype;
 965extern struct kobj_type bch_flash_dev_ktype;
 966extern struct kobj_type bch_cache_set_ktype;
 967extern struct kobj_type bch_cache_set_internal_ktype;
 968extern struct kobj_type bch_cache_ktype;
 969
 970void bch_cached_dev_release(struct kobject *);
 971void bch_flash_dev_release(struct kobject *);
 972void bch_cache_set_release(struct kobject *);
 973void bch_cache_release(struct kobject *);
 974
 975int bch_uuid_write(struct cache_set *);
 976void bcache_write_super(struct cache_set *);
 977
 978int bch_flash_dev_create(struct cache_set *c, uint64_t size);
 979
 980int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
 981void bch_cached_dev_detach(struct cached_dev *);
 982void bch_cached_dev_run(struct cached_dev *);
 983void bcache_device_stop(struct bcache_device *);
 984
 985void bch_cache_set_unregister(struct cache_set *);
 986void bch_cache_set_stop(struct cache_set *);
 987
 988struct cache_set *bch_cache_set_alloc(struct cache_sb *);
 989void bch_btree_cache_free(struct cache_set *);
 990int bch_btree_cache_alloc(struct cache_set *);
 991void bch_moving_init_cache_set(struct cache_set *);
 992int bch_open_buckets_alloc(struct cache_set *);
 993void bch_open_buckets_free(struct cache_set *);
 
 994
 995int bch_cache_allocator_start(struct cache *ca);
 996
 997void bch_debug_exit(void);
 998int bch_debug_init(struct kobject *);
 999void bch_request_exit(void);
1000int bch_request_init(void);
 
 
1001
1002#endif /* _BCACHE_H */
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BCACHE_H
   3#define _BCACHE_H
   4
   5/*
   6 * SOME HIGH LEVEL CODE DOCUMENTATION:
   7 *
   8 * Bcache mostly works with cache sets, cache devices, and backing devices.
   9 *
  10 * Support for multiple cache devices hasn't quite been finished off yet, but
  11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  12 * like a md raid array and its component devices. Most of the code doesn't care
  13 * about individual cache devices, the main abstraction is the cache set.
  14 *
  15 * Multiple cache devices is intended to give us the ability to mirror dirty
  16 * cached data and metadata, without mirroring clean cached data.
  17 *
  18 * Backing devices are different, in that they have a lifetime independent of a
  19 * cache set. When you register a newly formatted backing device it'll come up
  20 * in passthrough mode, and then you can attach and detach a backing device from
  21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  22 * invalidates any cached data for that backing device.
  23 *
  24 * A cache set can have multiple (many) backing devices attached to it.
  25 *
  26 * There's also flash only volumes - this is the reason for the distinction
  27 * between struct cached_dev and struct bcache_device. A flash only volume
  28 * works much like a bcache device that has a backing device, except the
  29 * "cached" data is always dirty. The end result is that we get thin
  30 * provisioning with very little additional code.
  31 *
  32 * Flash only volumes work but they're not production ready because the moving
  33 * garbage collector needs more work. More on that later.
  34 *
  35 * BUCKETS/ALLOCATION:
  36 *
  37 * Bcache is primarily designed for caching, which means that in normal
  38 * operation all of our available space will be allocated. Thus, we need an
  39 * efficient way of deleting things from the cache so we can write new things to
  40 * it.
  41 *
  42 * To do this, we first divide the cache device up into buckets. A bucket is the
  43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  44 * works efficiently.
  45 *
  46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  47 * it. The gens and priorities for all the buckets are stored contiguously and
  48 * packed on disk (in a linked list of buckets - aside from the superblock, all
  49 * of bcache's metadata is stored in buckets).
  50 *
  51 * The priority is used to implement an LRU. We reset a bucket's priority when
  52 * we allocate it or on cache it, and every so often we decrement the priority
  53 * of each bucket. It could be used to implement something more sophisticated,
  54 * if anyone ever gets around to it.
  55 *
  56 * The generation is used for invalidating buckets. Each pointer also has an 8
  57 * bit generation embedded in it; for a pointer to be considered valid, its gen
  58 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  59 * we have to do is increment its gen (and write its new gen to disk; we batch
  60 * this up).
  61 *
  62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  63 * contain metadata (including btree nodes).
  64 *
  65 * THE BTREE:
  66 *
  67 * Bcache is in large part design around the btree.
  68 *
  69 * At a high level, the btree is just an index of key -> ptr tuples.
  70 *
  71 * Keys represent extents, and thus have a size field. Keys also have a variable
  72 * number of pointers attached to them (potentially zero, which is handy for
  73 * invalidating the cache).
  74 *
  75 * The key itself is an inode:offset pair. The inode number corresponds to a
  76 * backing device or a flash only volume. The offset is the ending offset of the
  77 * extent within the inode - not the starting offset; this makes lookups
  78 * slightly more convenient.
  79 *
  80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  81 * generation number. More on the gen later.
  82 *
  83 * Index lookups are not fully abstracted - cache lookups in particular are
  84 * still somewhat mixed in with the btree code, but things are headed in that
  85 * direction.
  86 *
  87 * Updates are fairly well abstracted, though. There are two different ways of
  88 * updating the btree; insert and replace.
  89 *
  90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  91 * overwriting (possibly only partially) any extents they overlap with. This is
  92 * used to update the index after a write.
  93 *
  94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  95 * overwriting a key that matches another given key. This is used for inserting
  96 * data into the cache after a cache miss, and for background writeback, and for
  97 * the moving garbage collector.
  98 *
  99 * There is no "delete" operation; deleting things from the index is
 100 * accomplished by either by invalidating pointers (by incrementing a bucket's
 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 102 * previously present at that location in the index.
 103 *
 104 * This means that there are always stale/invalid keys in the btree. They're
 105 * filtered out by the code that iterates through a btree node, and removed when
 106 * a btree node is rewritten.
 107 *
 108 * BTREE NODES:
 109 *
 110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
 111 * free smaller than a bucket - so, that's how big our btree nodes are.
 112 *
 113 * (If buckets are really big we'll only use part of the bucket for a btree node
 114 * - no less than 1/4th - but a bucket still contains no more than a single
 115 * btree node. I'd actually like to change this, but for now we rely on the
 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 117 *
 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 119 * btree implementation.
 120 *
 121 * The way this is solved is that btree nodes are internally log structured; we
 122 * can append new keys to an existing btree node without rewriting it. This
 123 * means each set of keys we write is sorted, but the node is not.
 124 *
 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 126 * be expensive, and we have to distinguish between the keys we have written and
 127 * the keys we haven't. So to do a lookup in a btree node, we have to search
 128 * each sorted set. But we do merge written sets together lazily, so the cost of
 129 * these extra searches is quite low (normally most of the keys in a btree node
 130 * will be in one big set, and then there'll be one or two sets that are much
 131 * smaller).
 132 *
 133 * This log structure makes bcache's btree more of a hybrid between a
 134 * conventional btree and a compacting data structure, with some of the
 135 * advantages of both.
 136 *
 137 * GARBAGE COLLECTION:
 138 *
 139 * We can't just invalidate any bucket - it might contain dirty data or
 140 * metadata. If it once contained dirty data, other writes might overwrite it
 141 * later, leaving no valid pointers into that bucket in the index.
 142 *
 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 144 * It also counts how much valid data it each bucket currently contains, so that
 145 * allocation can reuse buckets sooner when they've been mostly overwritten.
 146 *
 147 * It also does some things that are really internal to the btree
 148 * implementation. If a btree node contains pointers that are stale by more than
 149 * some threshold, it rewrites the btree node to avoid the bucket's generation
 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 151 *
 152 * THE JOURNAL:
 153 *
 154 * Bcache's journal is not necessary for consistency; we always strictly
 155 * order metadata writes so that the btree and everything else is consistent on
 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 157 * caching (with recovery from unclean shutdown) before journalling was
 158 * implemented.
 159 *
 160 * Rather, the journal is purely a performance optimization; we can't complete a
 161 * write until we've updated the index on disk, otherwise the cache would be
 162 * inconsistent in the event of an unclean shutdown. This means that without the
 163 * journal, on random write workloads we constantly have to update all the leaf
 164 * nodes in the btree, and those writes will be mostly empty (appending at most
 165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 166 * and it puts more strain on the various btree resorting/compacting code.
 167 *
 168 * The journal is just a log of keys we've inserted; on startup we just reinsert
 169 * all the keys in the open journal entries. That means that when we're updating
 170 * a node in the btree, we can wait until a 4k block of keys fills up before
 171 * writing them out.
 172 *
 173 * For simplicity, we only journal updates to leaf nodes; updates to parent
 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 175 * the complexity to deal with journalling them (in particular, journal replay)
 176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 177 */
 178
 179#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
 180
 181#include <linux/bcache.h>
 182#include <linux/bio.h>
 183#include <linux/kobject.h>
 184#include <linux/list.h>
 185#include <linux/mutex.h>
 186#include <linux/rbtree.h>
 187#include <linux/rwsem.h>
 188#include <linux/refcount.h>
 189#include <linux/types.h>
 190#include <linux/workqueue.h>
 191#include <linux/kthread.h>
 192
 193#include "bset.h"
 194#include "util.h"
 195#include "closure.h"
 196
 197struct bucket {
 198	atomic_t	pin;
 199	uint16_t	prio;
 200	uint8_t		gen;
 201	uint8_t		last_gc; /* Most out of date gen in the btree */
 202	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
 203};
 204
 205/*
 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 207 * as multiple threads touch struct bucket without locking
 208 */
 209
 210BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
 211#define GC_MARK_RECLAIMABLE	1
 212#define GC_MARK_DIRTY		2
 213#define GC_MARK_METADATA	3
 214#define GC_SECTORS_USED_SIZE	13
 215#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
 216BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 217BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 218
 219#include "journal.h"
 220#include "stats.h"
 221struct search;
 222struct btree;
 223struct keybuf;
 224
 225struct keybuf_key {
 226	struct rb_node		node;
 227	BKEY_PADDED(key);
 228	void			*private;
 229};
 230
 231struct keybuf {
 232	struct bkey		last_scanned;
 233	spinlock_t		lock;
 234
 235	/*
 236	 * Beginning and end of range in rb tree - so that we can skip taking
 237	 * lock and checking the rb tree when we need to check for overlapping
 238	 * keys.
 239	 */
 240	struct bkey		start;
 241	struct bkey		end;
 242
 243	struct rb_root		keys;
 244
 245#define KEYBUF_NR		500
 246	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 247};
 248
 249struct bcache_device {
 250	struct closure		cl;
 251
 252	struct kobject		kobj;
 253
 254	struct cache_set	*c;
 255	unsigned int		id;
 256#define BCACHEDEVNAME_SIZE	12
 257	char			name[BCACHEDEVNAME_SIZE];
 258
 259	struct gendisk		*disk;
 260
 261	unsigned long		flags;
 262#define BCACHE_DEV_CLOSING		0
 263#define BCACHE_DEV_DETACHING		1
 264#define BCACHE_DEV_UNLINK_DONE		2
 265#define BCACHE_DEV_WB_RUNNING		3
 266#define BCACHE_DEV_RATE_DW_RUNNING	4
 267	int			nr_stripes;
 268	unsigned int		stripe_size;
 269	atomic_t		*stripe_sectors_dirty;
 270	unsigned long		*full_dirty_stripes;
 271
 272	struct bio_set		bio_split;
 273
 274	unsigned int		data_csum:1;
 275
 276	int (*cache_miss)(struct btree *b, struct search *s,
 277			  struct bio *bio, unsigned int sectors);
 278	int (*ioctl)(struct bcache_device *d, fmode_t mode,
 279		     unsigned int cmd, unsigned long arg);
 280};
 281
 282struct io {
 283	/* Used to track sequential IO so it can be skipped */
 284	struct hlist_node	hash;
 285	struct list_head	lru;
 286
 287	unsigned long		jiffies;
 288	unsigned int		sequential;
 289	sector_t		last;
 290};
 291
 292enum stop_on_failure {
 293	BCH_CACHED_DEV_STOP_AUTO = 0,
 294	BCH_CACHED_DEV_STOP_ALWAYS,
 295	BCH_CACHED_DEV_STOP_MODE_MAX,
 296};
 297
 298struct cached_dev {
 299	struct list_head	list;
 300	struct bcache_device	disk;
 301	struct block_device	*bdev;
 302
 303	struct cache_sb		sb;
 304	struct cache_sb_disk	*sb_disk;
 305	struct bio		sb_bio;
 306	struct bio_vec		sb_bv[1];
 307	struct closure		sb_write;
 308	struct semaphore	sb_write_mutex;
 309
 310	/* Refcount on the cache set. Always nonzero when we're caching. */
 311	refcount_t		count;
 312	struct work_struct	detach;
 313
 314	/*
 315	 * Device might not be running if it's dirty and the cache set hasn't
 316	 * showed up yet.
 317	 */
 318	atomic_t		running;
 319
 320	/*
 321	 * Writes take a shared lock from start to finish; scanning for dirty
 322	 * data to refill the rb tree requires an exclusive lock.
 323	 */
 324	struct rw_semaphore	writeback_lock;
 325
 326	/*
 327	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 328	 * data in the cache. Protected by writeback_lock; must have an
 329	 * shared lock to set and exclusive lock to clear.
 330	 */
 331	atomic_t		has_dirty;
 332
 333#define BCH_CACHE_READA_ALL		0
 334#define BCH_CACHE_READA_META_ONLY	1
 335	unsigned int		cache_readahead_policy;
 
 
 
 
 336	struct bch_ratelimit	writeback_rate;
 337	struct delayed_work	writeback_rate_update;
 338
 339	/* Limit number of writeback bios in flight */
 340	struct semaphore	in_flight;
 341	struct task_struct	*writeback_thread;
 342	struct workqueue_struct	*writeback_write_wq;
 343
 344	struct keybuf		writeback_keys;
 345
 346	struct task_struct	*status_update_thread;
 347	/*
 348	 * Order the write-half of writeback operations strongly in dispatch
 349	 * order.  (Maintain LBA order; don't allow reads completing out of
 350	 * order to re-order the writes...)
 351	 */
 352	struct closure_waitlist writeback_ordering_wait;
 353	atomic_t		writeback_sequence_next;
 354
 355	/* For tracking sequential IO */
 356#define RECENT_IO_BITS	7
 357#define RECENT_IO	(1 << RECENT_IO_BITS)
 358	struct io		io[RECENT_IO];
 359	struct hlist_head	io_hash[RECENT_IO + 1];
 360	struct list_head	io_lru;
 361	spinlock_t		io_lock;
 362
 363	struct cache_accounting	accounting;
 364
 365	/* The rest of this all shows up in sysfs */
 366	unsigned int		sequential_cutoff;
 
 367
 368	unsigned int		io_disable:1;
 369	unsigned int		verify:1;
 370	unsigned int		bypass_torture_test:1;
 371
 372	unsigned int		partial_stripes_expensive:1;
 373	unsigned int		writeback_metadata:1;
 374	unsigned int		writeback_running:1;
 375	unsigned int		writeback_consider_fragment:1;
 376	unsigned char		writeback_percent;
 377	unsigned int		writeback_delay;
 378
 379	uint64_t		writeback_rate_target;
 380	int64_t			writeback_rate_proportional;
 381	int64_t			writeback_rate_integral;
 382	int64_t			writeback_rate_integral_scaled;
 383	int32_t			writeback_rate_change;
 384
 385	unsigned int		writeback_rate_update_seconds;
 386	unsigned int		writeback_rate_i_term_inverse;
 387	unsigned int		writeback_rate_p_term_inverse;
 388	unsigned int		writeback_rate_fp_term_low;
 389	unsigned int		writeback_rate_fp_term_mid;
 390	unsigned int		writeback_rate_fp_term_high;
 391	unsigned int		writeback_rate_minimum;
 392
 393	enum stop_on_failure	stop_when_cache_set_failed;
 394#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
 395	atomic_t		io_errors;
 396	unsigned int		error_limit;
 397	unsigned int		offline_seconds;
 398
 399	char			backing_dev_name[BDEVNAME_SIZE];
 400};
 401
 402enum alloc_reserve {
 403	RESERVE_BTREE,
 404	RESERVE_PRIO,
 405	RESERVE_MOVINGGC,
 406	RESERVE_NONE,
 407	RESERVE_NR,
 408};
 409
 410struct cache {
 411	struct cache_set	*set;
 412	struct cache_sb		sb;
 413	struct cache_sb_disk	*sb_disk;
 414	struct bio		sb_bio;
 415	struct bio_vec		sb_bv[1];
 416
 417	struct kobject		kobj;
 418	struct block_device	*bdev;
 419
 420	struct task_struct	*alloc_thread;
 421
 422	struct closure		prio;
 423	struct prio_set		*disk_buckets;
 424
 425	/*
 426	 * When allocating new buckets, prio_write() gets first dibs - since we
 427	 * may not be allocate at all without writing priorities and gens.
 428	 * prio_last_buckets[] contains the last buckets we wrote priorities to
 429	 * (so gc can mark them as metadata), prio_buckets[] contains the
 430	 * buckets allocated for the next prio write.
 431	 */
 432	uint64_t		*prio_buckets;
 433	uint64_t		*prio_last_buckets;
 434
 435	/*
 436	 * free: Buckets that are ready to be used
 437	 *
 438	 * free_inc: Incoming buckets - these are buckets that currently have
 439	 * cached data in them, and we can't reuse them until after we write
 440	 * their new gen to disk. After prio_write() finishes writing the new
 441	 * gens/prios, they'll be moved to the free list (and possibly discarded
 442	 * in the process)
 443	 */
 444	DECLARE_FIFO(long, free)[RESERVE_NR];
 445	DECLARE_FIFO(long, free_inc);
 446
 447	size_t			fifo_last_bucket;
 448
 449	/* Allocation stuff: */
 450	struct bucket		*buckets;
 451
 452	DECLARE_HEAP(struct bucket *, heap);
 453
 454	/*
 455	 * If nonzero, we know we aren't going to find any buckets to invalidate
 456	 * until a gc finishes - otherwise we could pointlessly burn a ton of
 457	 * cpu
 458	 */
 459	unsigned int		invalidate_needs_gc;
 460
 461	bool			discard; /* Get rid of? */
 462
 463	struct journal_device	journal;
 464
 465	/* The rest of this all shows up in sysfs */
 466#define IO_ERROR_SHIFT		20
 467	atomic_t		io_errors;
 468	atomic_t		io_count;
 469
 470	atomic_long_t		meta_sectors_written;
 471	atomic_long_t		btree_sectors_written;
 472	atomic_long_t		sectors_written;
 473
 474	char			cache_dev_name[BDEVNAME_SIZE];
 475};
 476
 477struct gc_stat {
 478	size_t			nodes;
 479	size_t			nodes_pre;
 480	size_t			key_bytes;
 481
 482	size_t			nkeys;
 483	uint64_t		data;	/* sectors */
 484	unsigned int		in_use; /* percent */
 485};
 486
 487/*
 488 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 489 *
 490 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 491 * all the backing devices first (their cached data gets invalidated, and they
 492 * won't automatically reattach).
 493 *
 494 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 495 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 496 * flushing dirty data).
 497 *
 498 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 499 * replay is complete.
 500 *
 501 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
 502 * external and internal I/O should be denied when this flag is set.
 503 *
 504 */
 505#define CACHE_SET_UNREGISTERING		0
 506#define	CACHE_SET_STOPPING		1
 507#define	CACHE_SET_RUNNING		2
 508#define CACHE_SET_IO_DISABLE		3
 509
 510struct cache_set {
 511	struct closure		cl;
 512
 513	struct list_head	list;
 514	struct kobject		kobj;
 515	struct kobject		internal;
 516	struct dentry		*debug;
 517	struct cache_accounting accounting;
 518
 519	unsigned long		flags;
 520	atomic_t		idle_counter;
 521	atomic_t		at_max_writeback_rate;
 522
 523	struct cache		*cache;
 
 
 
 
 524
 525	struct bcache_device	**devices;
 526	unsigned int		devices_max_used;
 527	atomic_t		attached_dev_nr;
 528	struct list_head	cached_devs;
 529	uint64_t		cached_dev_sectors;
 530	atomic_long_t		flash_dev_dirty_sectors;
 531	struct closure		caching;
 532
 533	struct closure		sb_write;
 534	struct semaphore	sb_write_mutex;
 535
 536	mempool_t		search;
 537	mempool_t		bio_meta;
 538	struct bio_set		bio_split;
 539
 540	/* For the btree cache */
 541	struct shrinker		shrink;
 542
 543	/* For the btree cache and anything allocation related */
 544	struct mutex		bucket_lock;
 545
 546	/* log2(bucket_size), in sectors */
 547	unsigned short		bucket_bits;
 548
 549	/* log2(block_size), in sectors */
 550	unsigned short		block_bits;
 551
 552	/*
 553	 * Default number of pages for a new btree node - may be less than a
 554	 * full bucket
 555	 */
 556	unsigned int		btree_pages;
 557
 558	/*
 559	 * Lists of struct btrees; lru is the list for structs that have memory
 560	 * allocated for actual btree node, freed is for structs that do not.
 561	 *
 562	 * We never free a struct btree, except on shutdown - we just put it on
 563	 * the btree_cache_freed list and reuse it later. This simplifies the
 564	 * code, and it doesn't cost us much memory as the memory usage is
 565	 * dominated by buffers that hold the actual btree node data and those
 566	 * can be freed - and the number of struct btrees allocated is
 567	 * effectively bounded.
 568	 *
 569	 * btree_cache_freeable effectively is a small cache - we use it because
 570	 * high order page allocations can be rather expensive, and it's quite
 571	 * common to delete and allocate btree nodes in quick succession. It
 572	 * should never grow past ~2-3 nodes in practice.
 573	 */
 574	struct list_head	btree_cache;
 575	struct list_head	btree_cache_freeable;
 576	struct list_head	btree_cache_freed;
 577
 578	/* Number of elements in btree_cache + btree_cache_freeable lists */
 579	unsigned int		btree_cache_used;
 580
 581	/*
 582	 * If we need to allocate memory for a new btree node and that
 583	 * allocation fails, we can cannibalize another node in the btree cache
 584	 * to satisfy the allocation - lock to guarantee only one thread does
 585	 * this at a time:
 586	 */
 587	wait_queue_head_t	btree_cache_wait;
 588	struct task_struct	*btree_cache_alloc_lock;
 589	spinlock_t		btree_cannibalize_lock;
 590
 591	/*
 592	 * When we free a btree node, we increment the gen of the bucket the
 593	 * node is in - but we can't rewrite the prios and gens until we
 594	 * finished whatever it is we were doing, otherwise after a crash the
 595	 * btree node would be freed but for say a split, we might not have the
 596	 * pointers to the new nodes inserted into the btree yet.
 597	 *
 598	 * This is a refcount that blocks prio_write() until the new keys are
 599	 * written.
 600	 */
 601	atomic_t		prio_blocked;
 602	wait_queue_head_t	bucket_wait;
 603
 604	/*
 605	 * For any bio we don't skip we subtract the number of sectors from
 606	 * rescale; when it hits 0 we rescale all the bucket priorities.
 607	 */
 608	atomic_t		rescale;
 609	/*
 610	 * used for GC, identify if any front side I/Os is inflight
 611	 */
 612	atomic_t		search_inflight;
 613	/*
 614	 * When we invalidate buckets, we use both the priority and the amount
 615	 * of good data to determine which buckets to reuse first - to weight
 616	 * those together consistently we keep track of the smallest nonzero
 617	 * priority of any bucket.
 618	 */
 619	uint16_t		min_prio;
 620
 621	/*
 622	 * max(gen - last_gc) for all buckets. When it gets too big we have to
 623	 * gc to keep gens from wrapping around.
 624	 */
 625	uint8_t			need_gc;
 626	struct gc_stat		gc_stats;
 627	size_t			nbuckets;
 628	size_t			avail_nbuckets;
 629
 630	struct task_struct	*gc_thread;
 631	/* Where in the btree gc currently is */
 632	struct bkey		gc_done;
 633
 634	/*
 635	 * For automatical garbage collection after writeback completed, this
 636	 * varialbe is used as bit fields,
 637	 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
 638	 * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
 639	 * This is an optimization for following write request after writeback
 640	 * finished, but read hit rate dropped due to clean data on cache is
 641	 * discarded. Unless user explicitly sets it via sysfs, it won't be
 642	 * enabled.
 643	 */
 644#define BCH_ENABLE_AUTO_GC	1
 645#define BCH_DO_AUTO_GC		2
 646	uint8_t			gc_after_writeback;
 647
 648	/*
 649	 * The allocation code needs gc_mark in struct bucket to be correct, but
 650	 * it's not while a gc is in progress. Protected by bucket_lock.
 651	 */
 652	int			gc_mark_valid;
 653
 654	/* Counts how many sectors bio_insert has added to the cache */
 655	atomic_t		sectors_to_gc;
 656	wait_queue_head_t	gc_wait;
 657
 658	struct keybuf		moving_gc_keys;
 659	/* Number of moving GC bios in flight */
 660	struct semaphore	moving_in_flight;
 661
 662	struct workqueue_struct	*moving_gc_wq;
 663
 664	struct btree		*root;
 665
 666#ifdef CONFIG_BCACHE_DEBUG
 667	struct btree		*verify_data;
 668	struct bset		*verify_ondisk;
 669	struct mutex		verify_lock;
 670#endif
 671
 672	uint8_t			set_uuid[16];
 673	unsigned int		nr_uuids;
 674	struct uuid_entry	*uuids;
 675	BKEY_PADDED(uuid_bucket);
 676	struct closure		uuid_write;
 677	struct semaphore	uuid_write_mutex;
 678
 679	/*
 680	 * A btree node on disk could have too many bsets for an iterator to fit
 681	 * on the stack - have to dynamically allocate them.
 682	 * bch_cache_set_alloc() will make sure the pool can allocate iterators
 683	 * equipped with enough room that can host
 684	 *     (sb.bucket_size / sb.block_size)
 685	 * btree_iter_sets, which is more than static MAX_BSETS.
 686	 */
 687	mempool_t		fill_iter;
 688
 689	struct bset_sort_state	sort;
 690
 691	/* List of buckets we're currently writing data to */
 692	struct list_head	data_buckets;
 693	spinlock_t		data_bucket_lock;
 694
 695	struct journal		journal;
 696
 697#define CONGESTED_MAX		1024
 698	unsigned int		congested_last_us;
 699	atomic_t		congested;
 700
 701	/* The rest of this all shows up in sysfs */
 702	unsigned int		congested_read_threshold_us;
 703	unsigned int		congested_write_threshold_us;
 704
 705	struct time_stats	btree_gc_time;
 706	struct time_stats	btree_split_time;
 707	struct time_stats	btree_read_time;
 708
 709	atomic_long_t		cache_read_races;
 710	atomic_long_t		writeback_keys_done;
 711	atomic_long_t		writeback_keys_failed;
 712
 713	atomic_long_t		reclaim;
 714	atomic_long_t		reclaimed_journal_buckets;
 715	atomic_long_t		flush_write;
 
 716
 717	enum			{
 718		ON_ERROR_UNREGISTER,
 719		ON_ERROR_PANIC,
 720	}			on_error;
 721#define DEFAULT_IO_ERROR_LIMIT 8
 722	unsigned int		error_limit;
 723	unsigned int		error_decay;
 724
 725	unsigned short		journal_delay_ms;
 726	bool			expensive_debug_checks;
 727	unsigned int		verify:1;
 728	unsigned int		key_merging_disabled:1;
 729	unsigned int		gc_always_rewrite:1;
 730	unsigned int		shrinker_disabled:1;
 731	unsigned int		copy_gc_enabled:1;
 732	unsigned int		idle_max_writeback_rate_enabled:1;
 733
 734#define BUCKET_HASH_BITS	12
 735	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
 
 
 736};
 737
 738struct bbio {
 739	unsigned int		submit_time_us;
 740	union {
 741		struct bkey	key;
 742		uint64_t	_pad[3];
 743		/*
 744		 * We only need pad = 3 here because we only ever carry around a
 745		 * single pointer - i.e. the pointer we're doing io to/from.
 746		 */
 747	};
 748	struct bio		bio;
 749};
 750
 751#define BTREE_PRIO		USHRT_MAX
 752#define INITIAL_PRIO		32768U
 753
 754#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
 755#define btree_blocks(b)							\
 756	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 757
 758#define btree_default_blocks(c)						\
 759	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 760
 761#define bucket_bytes(ca)	((ca)->sb.bucket_size << 9)
 762#define block_bytes(ca)		((ca)->sb.block_size << 9)
 763
 764static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
 765{
 766	unsigned int n, max_pages;
 767
 768	max_pages = min_t(unsigned int,
 769			  __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
 770			  MAX_ORDER_NR_PAGES);
 771
 772	n = sb->bucket_size / PAGE_SECTORS;
 773	if (n > max_pages)
 774		n = max_pages;
 775
 776	return n;
 777}
 778
 779static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
 780{
 781	return meta_bucket_pages(sb) << PAGE_SHIFT;
 782}
 783
 784#define prios_per_bucket(ca)						\
 785	((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) /	\
 786	 sizeof(struct bucket_disk))
 787
 788#define prio_buckets(ca)						\
 789	DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
 790
 791static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 792{
 793	return s >> c->bucket_bits;
 794}
 795
 796static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 797{
 798	return ((sector_t) b) << c->bucket_bits;
 799}
 800
 801static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 802{
 803	return s & (c->cache->sb.bucket_size - 1);
 
 
 
 
 
 
 
 804}
 805
 806static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 807				   const struct bkey *k,
 808				   unsigned int ptr)
 809{
 810	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 811}
 812
 813static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 814					const struct bkey *k,
 815					unsigned int ptr)
 816{
 817	return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
 818}
 819
 820static inline uint8_t gen_after(uint8_t a, uint8_t b)
 821{
 822	uint8_t r = a - b;
 823
 824	return r > 128U ? 0 : r;
 825}
 826
 827static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 828				unsigned int i)
 829{
 830	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 831}
 832
 833static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 834				 unsigned int i)
 835{
 836	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
 837}
 838
 839/* Btree key macros */
 840
 841/*
 842 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 843 * jset: The checksum is _always_ the first 8 bytes of these structs
 844 */
 845#define csum_set(i)							\
 846	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
 847		  ((void *) bset_bkey_last(i)) -			\
 848		  (((void *) (i)) + sizeof(uint64_t)))
 849
 850/* Error handling macros */
 851
 852#define btree_bug(b, ...)						\
 853do {									\
 854	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
 855		dump_stack();						\
 856} while (0)
 857
 858#define cache_bug(c, ...)						\
 859do {									\
 860	if (bch_cache_set_error(c, __VA_ARGS__))			\
 861		dump_stack();						\
 862} while (0)
 863
 864#define btree_bug_on(cond, b, ...)					\
 865do {									\
 866	if (cond)							\
 867		btree_bug(b, __VA_ARGS__);				\
 868} while (0)
 869
 870#define cache_bug_on(cond, c, ...)					\
 871do {									\
 872	if (cond)							\
 873		cache_bug(c, __VA_ARGS__);				\
 874} while (0)
 875
 876#define cache_set_err_on(cond, c, ...)					\
 877do {									\
 878	if (cond)							\
 879		bch_cache_set_error(c, __VA_ARGS__);			\
 880} while (0)
 881
 882/* Looping macros */
 883
 
 
 
 884#define for_each_bucket(b, ca)						\
 885	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
 886	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 887
 888static inline void cached_dev_put(struct cached_dev *dc)
 889{
 890	if (refcount_dec_and_test(&dc->count))
 891		schedule_work(&dc->detach);
 892}
 893
 894static inline bool cached_dev_get(struct cached_dev *dc)
 895{
 896	if (!refcount_inc_not_zero(&dc->count))
 897		return false;
 898
 899	/* Paired with the mb in cached_dev_attach */
 900	smp_mb__after_atomic();
 901	return true;
 902}
 903
 904/*
 905 * bucket_gc_gen() returns the difference between the bucket's current gen and
 906 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 907 */
 908
 909static inline uint8_t bucket_gc_gen(struct bucket *b)
 910{
 911	return b->gen - b->last_gc;
 912}
 913
 914#define BUCKET_GC_GEN_MAX	96U
 915
 916#define kobj_attribute_write(n, fn)					\
 917	static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
 918
 919#define kobj_attribute_rw(n, show, store)				\
 920	static struct kobj_attribute ksysfs_##n =			\
 921		__ATTR(n, 0600, show, store)
 922
 923static inline void wake_up_allocators(struct cache_set *c)
 924{
 925	struct cache *ca = c->cache;
 
 926
 927	wake_up_process(ca->alloc_thread);
 
 928}
 929
 930static inline void closure_bio_submit(struct cache_set *c,
 931				      struct bio *bio,
 932				      struct closure *cl)
 933{
 934	closure_get(cl);
 935	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
 936		bio->bi_status = BLK_STS_IOERR;
 937		bio_endio(bio);
 938		return;
 939	}
 940	submit_bio_noacct(bio);
 941}
 942
 943/*
 944 * Prevent the kthread exits directly, and make sure when kthread_stop()
 945 * is called to stop a kthread, it is still alive. If a kthread might be
 946 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
 947 * necessary before the kthread returns.
 948 */
 949static inline void wait_for_kthread_stop(void)
 950{
 951	while (!kthread_should_stop()) {
 952		set_current_state(TASK_INTERRUPTIBLE);
 953		schedule();
 954	}
 955}
 956
 957/* Forward declarations */
 958
 959void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 960void bch_count_io_errors(struct cache *ca, blk_status_t error,
 961			 int is_read, const char *m);
 962void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 963			      blk_status_t error, const char *m);
 964void bch_bbio_endio(struct cache_set *c, struct bio *bio,
 965		    blk_status_t error, const char *m);
 966void bch_bbio_free(struct bio *bio, struct cache_set *c);
 967struct bio *bch_bbio_alloc(struct cache_set *c);
 968
 969void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
 970void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 971		     struct bkey *k, unsigned int ptr);
 972
 973uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
 974void bch_rescale_priorities(struct cache_set *c, int sectors);
 975
 976bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
 977void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
 978
 979void __bch_bucket_free(struct cache *ca, struct bucket *b);
 980void bch_bucket_free(struct cache_set *c, struct bkey *k);
 981
 982long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
 983int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 984			   struct bkey *k, bool wait);
 985int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 986			 struct bkey *k, bool wait);
 987bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
 988		       unsigned int sectors, unsigned int write_point,
 989		       unsigned int write_prio, bool wait);
 990bool bch_cached_dev_error(struct cached_dev *dc);
 991
 992__printf(2, 3)
 993bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
 994
 995int bch_prio_write(struct cache *ca, bool wait);
 996void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 997
 998extern struct workqueue_struct *bcache_wq;
 999extern struct workqueue_struct *bch_journal_wq;
1000extern struct workqueue_struct *bch_flush_wq;
1001extern struct mutex bch_register_lock;
1002extern struct list_head bch_cache_sets;
1003
1004extern struct kobj_type bch_cached_dev_ktype;
1005extern struct kobj_type bch_flash_dev_ktype;
1006extern struct kobj_type bch_cache_set_ktype;
1007extern struct kobj_type bch_cache_set_internal_ktype;
1008extern struct kobj_type bch_cache_ktype;
1009
1010void bch_cached_dev_release(struct kobject *kobj);
1011void bch_flash_dev_release(struct kobject *kobj);
1012void bch_cache_set_release(struct kobject *kobj);
1013void bch_cache_release(struct kobject *kobj);
1014
1015int bch_uuid_write(struct cache_set *c);
1016void bcache_write_super(struct cache_set *c);
1017
1018int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1019
1020int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1021			  uint8_t *set_uuid);
1022void bch_cached_dev_detach(struct cached_dev *dc);
1023int bch_cached_dev_run(struct cached_dev *dc);
1024void bcache_device_stop(struct bcache_device *d);
1025
1026void bch_cache_set_unregister(struct cache_set *c);
1027void bch_cache_set_stop(struct cache_set *c);
1028
1029struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1030void bch_btree_cache_free(struct cache_set *c);
1031int bch_btree_cache_alloc(struct cache_set *c);
1032void bch_moving_init_cache_set(struct cache_set *c);
1033int bch_open_buckets_alloc(struct cache_set *c);
1034void bch_open_buckets_free(struct cache_set *c);
1035
1036int bch_cache_allocator_start(struct cache *ca);
1037
1038void bch_debug_exit(void);
1039void bch_debug_init(void);
1040void bch_request_exit(void);
1041int bch_request_init(void);
1042void bch_btree_exit(void);
1043int bch_btree_init(void);
1044
1045#endif /* _BCACHE_H */