Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2009-2011 Red Hat, Inc.
   4 *
   5 * Author: Mikulas Patocka <mpatocka@redhat.com>
   6 *
   7 * This file is released under the GPL.
   8 */
   9
  10#include <linux/dm-bufio.h>
  11
  12#include <linux/device-mapper.h>
  13#include <linux/dm-io.h>
  14#include <linux/slab.h>
  15#include <linux/sched/mm.h>
  16#include <linux/jiffies.h>
  17#include <linux/vmalloc.h>
  18#include <linux/shrinker.h>
  19#include <linux/module.h>
  20#include <linux/rbtree.h>
  21#include <linux/stacktrace.h>
  22#include <linux/jump_label.h>
  23
  24#include "dm.h"
  25
  26#define DM_MSG_PREFIX "bufio"
  27
  28/*
  29 * Memory management policy:
  30 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  31 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  32 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  33 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  34 *	dirty buffers.
  35 */
  36#define DM_BUFIO_MIN_BUFFERS		8
  37
  38#define DM_BUFIO_MEMORY_PERCENT		2
  39#define DM_BUFIO_VMALLOC_PERCENT	25
  40#define DM_BUFIO_WRITEBACK_RATIO	3
  41#define DM_BUFIO_LOW_WATERMARK_RATIO	16
  42
  43/*
  44 * Check buffer ages in this interval (seconds)
  45 */
  46#define DM_BUFIO_WORK_TIMER_SECS	30
  47
  48/*
  49 * Free buffers when they are older than this (seconds)
  50 */
  51#define DM_BUFIO_DEFAULT_AGE_SECS	300
  52
  53/*
  54 * The nr of bytes of cached data to keep around.
  55 */
  56#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  57
  58/*
  59 * Align buffer writes to this boundary.
  60 * Tests show that SSDs have the highest IOPS when using 4k writes.
  61 */
  62#define DM_BUFIO_WRITE_ALIGN		4096
  63
  64/*
  65 * dm_buffer->list_mode
  66 */
  67#define LIST_CLEAN	0
  68#define LIST_DIRTY	1
  69#define LIST_SIZE	2
  70
  71/*--------------------------------------------------------------*/
  72
  73/*
  74 * Rather than use an LRU list, we use a clock algorithm where entries
  75 * are held in a circular list.  When an entry is 'hit' a reference bit
  76 * is set.  The least recently used entry is approximated by running a
  77 * cursor around the list selecting unreferenced entries. Referenced
  78 * entries have their reference bit cleared as the cursor passes them.
  79 */
  80struct lru_entry {
  81	struct list_head list;
  82	atomic_t referenced;
  83};
  84
  85struct lru_iter {
  86	struct lru *lru;
  87	struct list_head list;
  88	struct lru_entry *stop;
  89	struct lru_entry *e;
  90};
  91
  92struct lru {
  93	struct list_head *cursor;
  94	unsigned long count;
  95
  96	struct list_head iterators;
  97};
  98
  99/*--------------*/
 100
 101static void lru_init(struct lru *lru)
 102{
 103	lru->cursor = NULL;
 104	lru->count = 0;
 105	INIT_LIST_HEAD(&lru->iterators);
 106}
 107
 108static void lru_destroy(struct lru *lru)
 109{
 110	WARN_ON_ONCE(lru->cursor);
 111	WARN_ON_ONCE(!list_empty(&lru->iterators));
 112}
 113
 114/*
 115 * Insert a new entry into the lru.
 116 */
 117static void lru_insert(struct lru *lru, struct lru_entry *le)
 118{
 119	/*
 120	 * Don't be tempted to set to 1, makes the lru aspect
 121	 * perform poorly.
 122	 */
 123	atomic_set(&le->referenced, 0);
 124
 125	if (lru->cursor) {
 126		list_add_tail(&le->list, lru->cursor);
 127	} else {
 128		INIT_LIST_HEAD(&le->list);
 129		lru->cursor = &le->list;
 130	}
 131	lru->count++;
 132}
 133
 134/*--------------*/
 135
 136/*
 137 * Convert a list_head pointer to an lru_entry pointer.
 138 */
 139static inline struct lru_entry *to_le(struct list_head *l)
 140{
 141	return container_of(l, struct lru_entry, list);
 142}
 143
 144/*
 145 * Initialize an lru_iter and add it to the list of cursors in the lru.
 146 */
 147static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
 148{
 149	it->lru = lru;
 150	it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
 151	it->e = lru->cursor ? to_le(lru->cursor) : NULL;
 152	list_add(&it->list, &lru->iterators);
 153}
 154
 155/*
 156 * Remove an lru_iter from the list of cursors in the lru.
 157 */
 158static inline void lru_iter_end(struct lru_iter *it)
 159{
 160	list_del(&it->list);
 161}
 162
 163/* Predicate function type to be used with lru_iter_next */
 164typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
 165
 166/*
 167 * Advance the cursor to the next entry that passes the
 168 * predicate, and return that entry.  Returns NULL if the
 169 * iteration is complete.
 170 */
 171static struct lru_entry *lru_iter_next(struct lru_iter *it,
 172				       iter_predicate pred, void *context)
 173{
 174	struct lru_entry *e;
 175
 176	while (it->e) {
 177		e = it->e;
 178
 179		/* advance the cursor */
 180		if (it->e == it->stop)
 181			it->e = NULL;
 182		else
 183			it->e = to_le(it->e->list.next);
 184
 185		if (pred(e, context))
 186			return e;
 187	}
 188
 189	return NULL;
 190}
 191
 192/*
 193 * Invalidate a specific lru_entry and update all cursors in
 194 * the lru accordingly.
 195 */
 196static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
 197{
 198	struct lru_iter *it;
 199
 200	list_for_each_entry(it, &lru->iterators, list) {
 201		/* Move c->e forwards if necc. */
 202		if (it->e == e) {
 203			it->e = to_le(it->e->list.next);
 204			if (it->e == e)
 205				it->e = NULL;
 206		}
 207
 208		/* Move it->stop backwards if necc. */
 209		if (it->stop == e) {
 210			it->stop = to_le(it->stop->list.prev);
 211			if (it->stop == e)
 212				it->stop = NULL;
 213		}
 214	}
 215}
 216
 217/*--------------*/
 218
 219/*
 220 * Remove a specific entry from the lru.
 221 */
 222static void lru_remove(struct lru *lru, struct lru_entry *le)
 223{
 224	lru_iter_invalidate(lru, le);
 225	if (lru->count == 1) {
 226		lru->cursor = NULL;
 227	} else {
 228		if (lru->cursor == &le->list)
 229			lru->cursor = lru->cursor->next;
 230		list_del(&le->list);
 231	}
 232	lru->count--;
 233}
 234
 235/*
 236 * Mark as referenced.
 237 */
 238static inline void lru_reference(struct lru_entry *le)
 239{
 240	atomic_set(&le->referenced, 1);
 241}
 242
 243/*--------------*/
 244
 245/*
 246 * Remove the least recently used entry (approx), that passes the predicate.
 247 * Returns NULL on failure.
 248 */
 249enum evict_result {
 250	ER_EVICT,
 251	ER_DONT_EVICT,
 252	ER_STOP, /* stop looking for something to evict */
 253};
 254
 255typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
 256
 257static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
 258{
 259	unsigned long tested = 0;
 260	struct list_head *h = lru->cursor;
 261	struct lru_entry *le;
 262
 263	if (!h)
 264		return NULL;
 265	/*
 266	 * In the worst case we have to loop around twice. Once to clear
 267	 * the reference flags, and then again to discover the predicate
 268	 * fails for all entries.
 269	 */
 270	while (tested < lru->count) {
 271		le = container_of(h, struct lru_entry, list);
 272
 273		if (atomic_read(&le->referenced)) {
 274			atomic_set(&le->referenced, 0);
 275		} else {
 276			tested++;
 277			switch (pred(le, context)) {
 278			case ER_EVICT:
 279				/*
 280				 * Adjust the cursor, so we start the next
 281				 * search from here.
 282				 */
 283				lru->cursor = le->list.next;
 284				lru_remove(lru, le);
 285				return le;
 286
 287			case ER_DONT_EVICT:
 288				break;
 289
 290			case ER_STOP:
 291				lru->cursor = le->list.next;
 292				return NULL;
 293			}
 294		}
 295
 296		h = h->next;
 297
 298		if (!no_sleep)
 299			cond_resched();
 300	}
 301
 302	return NULL;
 303}
 304
 305/*--------------------------------------------------------------*/
 306
 307/*
 308 * Buffer state bits.
 309 */
 310#define B_READING	0
 311#define B_WRITING	1
 312#define B_DIRTY		2
 313
 314/*
 315 * Describes how the block was allocated:
 316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 317 * See the comment at alloc_buffer_data.
 318 */
 319enum data_mode {
 320	DATA_MODE_SLAB = 0,
 321	DATA_MODE_GET_FREE_PAGES = 1,
 322	DATA_MODE_VMALLOC = 2,
 323	DATA_MODE_LIMIT = 3
 
 324};
 325
 326struct dm_buffer {
 327	/* protected by the locks in dm_buffer_cache */
 328	struct rb_node node;
 329
 330	/* immutable, so don't need protecting */
 331	sector_t block;
 332	void *data;
 333	unsigned char data_mode;		/* DATA_MODE_* */
 334
 335	/*
 336	 * These two fields are used in isolation, so do not need
 337	 * a surrounding lock.
 338	 */
 339	atomic_t hold_count;
 340	unsigned long last_accessed;
 341
 342	/*
 343	 * Everything else is protected by the mutex in
 344	 * dm_bufio_client
 345	 */
 346	unsigned long state;
 347	struct lru_entry lru;
 348	unsigned char list_mode;		/* LIST_* */
 349	blk_status_t read_error;
 350	blk_status_t write_error;
 351	unsigned int dirty_start;
 352	unsigned int dirty_end;
 353	unsigned int write_start;
 354	unsigned int write_end;
 355	struct list_head write_list;
 356	struct dm_bufio_client *c;
 357	void (*end_io)(struct dm_buffer *b, blk_status_t bs);
 358#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 359#define MAX_STACK 10
 360	unsigned int stack_len;
 361	unsigned long stack_entries[MAX_STACK];
 362#endif
 363};
 364
 365/*--------------------------------------------------------------*/
 366
 367/*
 368 * The buffer cache manages buffers, particularly:
 369 *  - inc/dec of holder count
 370 *  - setting the last_accessed field
 371 *  - maintains clean/dirty state along with lru
 372 *  - selecting buffers that match predicates
 373 *
 374 * It does *not* handle:
 375 *  - allocation/freeing of buffers.
 376 *  - IO
 377 *  - Eviction or cache sizing.
 378 *
 379 * cache_get() and cache_put() are threadsafe, you do not need to
 380 * protect these calls with a surrounding mutex.  All the other
 381 * methods are not threadsafe; they do use locking primitives, but
 382 * only enough to ensure get/put are threadsafe.
 383 */
 384
 385struct buffer_tree {
 386	union {
 387		struct rw_semaphore lock;
 388		rwlock_t spinlock;
 389	} u;
 390	struct rb_root root;
 391} ____cacheline_aligned_in_smp;
 392
 393struct dm_buffer_cache {
 394	struct lru lru[LIST_SIZE];
 395	/*
 396	 * We spread entries across multiple trees to reduce contention
 397	 * on the locks.
 398	 */
 399	unsigned int num_locks;
 400	bool no_sleep;
 401	struct buffer_tree trees[];
 402};
 403
 404static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
 405
 406static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
 407{
 408	return dm_hash_locks_index(block, num_locks);
 409}
 410
 411static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
 412{
 413	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 414		read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 415	else
 416		down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 417}
 418
 419static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
 420{
 421	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 422		read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 423	else
 424		up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 425}
 426
 427static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
 428{
 429	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 430		write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 431	else
 432		down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 433}
 434
 435static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
 436{
 437	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 438		write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 439	else
 440		up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 441}
 442
 443/*
 444 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
 445 * This struct helps avoid redundant drop and gets of the same lock.
 446 */
 447struct lock_history {
 448	struct dm_buffer_cache *cache;
 449	bool write;
 450	unsigned int previous;
 451	unsigned int no_previous;
 452};
 453
 454static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
 455{
 456	lh->cache = cache;
 457	lh->write = write;
 458	lh->no_previous = cache->num_locks;
 459	lh->previous = lh->no_previous;
 460}
 461
 462static void __lh_lock(struct lock_history *lh, unsigned int index)
 463{
 464	if (lh->write) {
 465		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 466			write_lock_bh(&lh->cache->trees[index].u.spinlock);
 467		else
 468			down_write(&lh->cache->trees[index].u.lock);
 469	} else {
 470		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 471			read_lock_bh(&lh->cache->trees[index].u.spinlock);
 472		else
 473			down_read(&lh->cache->trees[index].u.lock);
 474	}
 475}
 476
 477static void __lh_unlock(struct lock_history *lh, unsigned int index)
 478{
 479	if (lh->write) {
 480		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 481			write_unlock_bh(&lh->cache->trees[index].u.spinlock);
 482		else
 483			up_write(&lh->cache->trees[index].u.lock);
 484	} else {
 485		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 486			read_unlock_bh(&lh->cache->trees[index].u.spinlock);
 487		else
 488			up_read(&lh->cache->trees[index].u.lock);
 489	}
 490}
 491
 492/*
 493 * Make sure you call this since it will unlock the final lock.
 494 */
 495static void lh_exit(struct lock_history *lh)
 496{
 497	if (lh->previous != lh->no_previous) {
 498		__lh_unlock(lh, lh->previous);
 499		lh->previous = lh->no_previous;
 500	}
 501}
 502
 503/*
 504 * Named 'next' because there is no corresponding
 505 * 'up/unlock' call since it's done automatically.
 506 */
 507static void lh_next(struct lock_history *lh, sector_t b)
 508{
 509	unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
 510
 511	if (lh->previous != lh->no_previous) {
 512		if (lh->previous != index) {
 513			__lh_unlock(lh, lh->previous);
 514			__lh_lock(lh, index);
 515			lh->previous = index;
 516		}
 517	} else {
 518		__lh_lock(lh, index);
 519		lh->previous = index;
 520	}
 521}
 522
 523static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
 524{
 525	return container_of(le, struct dm_buffer, lru);
 526}
 527
 528static struct dm_buffer *list_to_buffer(struct list_head *l)
 529{
 530	struct lru_entry *le = list_entry(l, struct lru_entry, list);
 531
 532	if (!le)
 533		return NULL;
 534
 535	return le_to_buffer(le);
 536}
 537
 538static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
 539{
 540	unsigned int i;
 541
 542	bc->num_locks = num_locks;
 543	bc->no_sleep = no_sleep;
 544
 545	for (i = 0; i < bc->num_locks; i++) {
 546		if (no_sleep)
 547			rwlock_init(&bc->trees[i].u.spinlock);
 548		else
 549			init_rwsem(&bc->trees[i].u.lock);
 550		bc->trees[i].root = RB_ROOT;
 551	}
 552
 553	lru_init(&bc->lru[LIST_CLEAN]);
 554	lru_init(&bc->lru[LIST_DIRTY]);
 555}
 556
 557static void cache_destroy(struct dm_buffer_cache *bc)
 558{
 559	unsigned int i;
 560
 561	for (i = 0; i < bc->num_locks; i++)
 562		WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
 563
 564	lru_destroy(&bc->lru[LIST_CLEAN]);
 565	lru_destroy(&bc->lru[LIST_DIRTY]);
 566}
 567
 568/*--------------*/
 569
 570/*
 571 * not threadsafe, or racey depending how you look at it
 572 */
 573static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
 574{
 575	return bc->lru[list_mode].count;
 576}
 577
 578static inline unsigned long cache_total(struct dm_buffer_cache *bc)
 579{
 580	return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
 581}
 582
 583/*--------------*/
 584
 585/*
 586 * Gets a specific buffer, indexed by block.
 587 * If the buffer is found then its holder count will be incremented and
 588 * lru_reference will be called.
 589 *
 590 * threadsafe
 591 */
 592static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
 593{
 594	struct rb_node *n = root->rb_node;
 595	struct dm_buffer *b;
 596
 597	while (n) {
 598		b = container_of(n, struct dm_buffer, node);
 599
 600		if (b->block == block)
 601			return b;
 602
 603		n = block < b->block ? n->rb_left : n->rb_right;
 604	}
 605
 606	return NULL;
 607}
 608
 609static void __cache_inc_buffer(struct dm_buffer *b)
 610{
 611	atomic_inc(&b->hold_count);
 612	WRITE_ONCE(b->last_accessed, jiffies);
 613}
 614
 615static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
 616{
 617	struct dm_buffer *b;
 618
 619	cache_read_lock(bc, block);
 620	b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
 621	if (b) {
 622		lru_reference(&b->lru);
 623		__cache_inc_buffer(b);
 624	}
 625	cache_read_unlock(bc, block);
 626
 627	return b;
 628}
 629
 630/*--------------*/
 631
 632/*
 633 * Returns true if the hold count hits zero.
 634 * threadsafe
 635 */
 636static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
 637{
 638	bool r;
 639
 640	cache_read_lock(bc, b->block);
 641	BUG_ON(!atomic_read(&b->hold_count));
 642	r = atomic_dec_and_test(&b->hold_count);
 643	cache_read_unlock(bc, b->block);
 644
 645	return r;
 646}
 647
 648/*--------------*/
 649
 650typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
 651
 652/*
 653 * Evicts a buffer based on a predicate.  The oldest buffer that
 654 * matches the predicate will be selected.  In addition to the
 655 * predicate the hold_count of the selected buffer will be zero.
 656 */
 657struct evict_wrapper {
 658	struct lock_history *lh;
 659	b_predicate pred;
 660	void *context;
 661};
 662
 663/*
 664 * Wraps the buffer predicate turning it into an lru predicate.  Adds
 665 * extra test for hold_count.
 666 */
 667static enum evict_result __evict_pred(struct lru_entry *le, void *context)
 668{
 669	struct evict_wrapper *w = context;
 670	struct dm_buffer *b = le_to_buffer(le);
 671
 672	lh_next(w->lh, b->block);
 673
 674	if (atomic_read(&b->hold_count))
 675		return ER_DONT_EVICT;
 676
 677	return w->pred(b, w->context);
 678}
 679
 680static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
 681				       b_predicate pred, void *context,
 682				       struct lock_history *lh)
 683{
 684	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 685	struct lru_entry *le;
 686	struct dm_buffer *b;
 687
 688	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
 689	if (!le)
 690		return NULL;
 691
 692	b = le_to_buffer(le);
 693	/* __evict_pred will have locked the appropriate tree. */
 694	rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 695
 696	return b;
 697}
 698
 699static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
 700				     b_predicate pred, void *context)
 701{
 702	struct dm_buffer *b;
 703	struct lock_history lh;
 704
 705	lh_init(&lh, bc, true);
 706	b = __cache_evict(bc, list_mode, pred, context, &lh);
 707	lh_exit(&lh);
 708
 709	return b;
 710}
 711
 712/*--------------*/
 713
 714/*
 715 * Mark a buffer as clean or dirty. Not threadsafe.
 716 */
 717static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
 718{
 719	cache_write_lock(bc, b->block);
 720	if (list_mode != b->list_mode) {
 721		lru_remove(&bc->lru[b->list_mode], &b->lru);
 722		b->list_mode = list_mode;
 723		lru_insert(&bc->lru[b->list_mode], &b->lru);
 724	}
 725	cache_write_unlock(bc, b->block);
 726}
 727
 728/*--------------*/
 729
 730/*
 731 * Runs through the lru associated with 'old_mode', if the predicate matches then
 732 * it moves them to 'new_mode'.  Not threadsafe.
 733 */
 734static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 735			      b_predicate pred, void *context, struct lock_history *lh)
 736{
 737	struct lru_entry *le;
 738	struct dm_buffer *b;
 739	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 740
 741	while (true) {
 742		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
 743		if (!le)
 744			break;
 745
 746		b = le_to_buffer(le);
 747		b->list_mode = new_mode;
 748		lru_insert(&bc->lru[b->list_mode], &b->lru);
 749	}
 750}
 751
 752static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 753			    b_predicate pred, void *context)
 754{
 755	struct lock_history lh;
 756
 757	lh_init(&lh, bc, true);
 758	__cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
 759	lh_exit(&lh);
 760}
 761
 762/*--------------*/
 763
 764/*
 765 * Iterates through all clean or dirty entries calling a function for each
 766 * entry.  The callback may terminate the iteration early.  Not threadsafe.
 767 */
 768
 769/*
 770 * Iterator functions should return one of these actions to indicate
 771 * how the iteration should proceed.
 772 */
 773enum it_action {
 774	IT_NEXT,
 775	IT_COMPLETE,
 776};
 777
 778typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
 779
 780static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 781			    iter_fn fn, void *context, struct lock_history *lh)
 782{
 783	struct lru *lru = &bc->lru[list_mode];
 784	struct lru_entry *le, *first;
 785
 786	if (!lru->cursor)
 787		return;
 788
 789	first = le = to_le(lru->cursor);
 790	do {
 791		struct dm_buffer *b = le_to_buffer(le);
 792
 793		lh_next(lh, b->block);
 794
 795		switch (fn(b, context)) {
 796		case IT_NEXT:
 797			break;
 798
 799		case IT_COMPLETE:
 800			return;
 801		}
 802		cond_resched();
 803
 804		le = to_le(le->list.next);
 805	} while (le != first);
 806}
 807
 808static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 809			  iter_fn fn, void *context)
 810{
 811	struct lock_history lh;
 812
 813	lh_init(&lh, bc, false);
 814	__cache_iterate(bc, list_mode, fn, context, &lh);
 815	lh_exit(&lh);
 816}
 817
 818/*--------------*/
 819
 820/*
 821 * Passes ownership of the buffer to the cache. Returns false if the
 822 * buffer was already present (in which case ownership does not pass).
 823 * eg, a race with another thread.
 824 *
 825 * Holder count should be 1 on insertion.
 826 *
 827 * Not threadsafe.
 828 */
 829static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
 830{
 831	struct rb_node **new = &root->rb_node, *parent = NULL;
 832	struct dm_buffer *found;
 833
 834	while (*new) {
 835		found = container_of(*new, struct dm_buffer, node);
 836
 837		if (found->block == b->block)
 838			return false;
 839
 840		parent = *new;
 841		new = b->block < found->block ?
 842			&found->node.rb_left : &found->node.rb_right;
 843	}
 844
 845	rb_link_node(&b->node, parent, new);
 846	rb_insert_color(&b->node, root);
 847
 848	return true;
 849}
 850
 851static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
 852{
 853	bool r;
 854
 855	if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
 856		return false;
 857
 858	cache_write_lock(bc, b->block);
 859	BUG_ON(atomic_read(&b->hold_count) != 1);
 860	r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
 861	if (r)
 862		lru_insert(&bc->lru[b->list_mode], &b->lru);
 863	cache_write_unlock(bc, b->block);
 864
 865	return r;
 866}
 867
 868/*--------------*/
 869
 870/*
 871 * Removes buffer from cache, ownership of the buffer passes back to the caller.
 872 * Fails if the hold_count is not one (ie. the caller holds the only reference).
 873 *
 874 * Not threadsafe.
 875 */
 876static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
 877{
 878	bool r;
 879
 880	cache_write_lock(bc, b->block);
 881
 882	if (atomic_read(&b->hold_count) != 1) {
 883		r = false;
 884	} else {
 885		r = true;
 886		rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 887		lru_remove(&bc->lru[b->list_mode], &b->lru);
 888	}
 889
 890	cache_write_unlock(bc, b->block);
 891
 892	return r;
 893}
 894
 895/*--------------*/
 896
 897typedef void (*b_release)(struct dm_buffer *);
 898
 899static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
 900{
 901	struct rb_node *n = root->rb_node;
 902	struct dm_buffer *b;
 903	struct dm_buffer *best = NULL;
 904
 905	while (n) {
 906		b = container_of(n, struct dm_buffer, node);
 907
 908		if (b->block == block)
 909			return b;
 910
 911		if (block <= b->block) {
 912			n = n->rb_left;
 913			best = b;
 914		} else {
 915			n = n->rb_right;
 916		}
 917	}
 918
 919	return best;
 920}
 921
 922static void __remove_range(struct dm_buffer_cache *bc,
 923			   struct rb_root *root,
 924			   sector_t begin, sector_t end,
 925			   b_predicate pred, b_release release)
 926{
 927	struct dm_buffer *b;
 928
 929	while (true) {
 930		cond_resched();
 931
 932		b = __find_next(root, begin);
 933		if (!b || (b->block >= end))
 934			break;
 935
 936		begin = b->block + 1;
 937
 938		if (atomic_read(&b->hold_count))
 939			continue;
 940
 941		if (pred(b, NULL) == ER_EVICT) {
 942			rb_erase(&b->node, root);
 943			lru_remove(&bc->lru[b->list_mode], &b->lru);
 944			release(b);
 945		}
 946	}
 947}
 948
 949static void cache_remove_range(struct dm_buffer_cache *bc,
 950			       sector_t begin, sector_t end,
 951			       b_predicate pred, b_release release)
 952{
 953	unsigned int i;
 954
 955	BUG_ON(bc->no_sleep);
 956	for (i = 0; i < bc->num_locks; i++) {
 957		down_write(&bc->trees[i].u.lock);
 958		__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
 959		up_write(&bc->trees[i].u.lock);
 960	}
 961}
 962
 963/*----------------------------------------------------------------*/
 964
 965/*
 966 * Linking of buffers:
 967 *	All buffers are linked to buffer_cache with their node field.
 968 *
 969 *	Clean buffers that are not being written (B_WRITING not set)
 970 *	are linked to lru[LIST_CLEAN] with their lru_list field.
 971 *
 972 *	Dirty and clean buffers that are being written are linked to
 973 *	lru[LIST_DIRTY] with their lru_list field. When the write
 974 *	finishes, the buffer cannot be relinked immediately (because we
 975 *	are in an interrupt context and relinking requires process
 976 *	context), so some clean-not-writing buffers can be held on
 977 *	dirty_lru too.  They are later added to lru in the process
 978 *	context.
 979 */
 980struct dm_bufio_client {
 981	struct block_device *bdev;
 982	unsigned int block_size;
 983	s8 sectors_per_block_bits;
 984
 985	bool no_sleep;
 986	struct mutex lock;
 987	spinlock_t spinlock;
 988
 989	int async_write_error;
 990
 991	void (*alloc_callback)(struct dm_buffer *buf);
 992	void (*write_callback)(struct dm_buffer *buf);
 993	struct kmem_cache *slab_buffer;
 994	struct kmem_cache *slab_cache;
 995	struct dm_io_client *dm_io;
 996
 997	struct list_head reserved_buffers;
 998	unsigned int need_reserved_buffers;
 999
1000	unsigned int minimum_buffers;
1001
1002	sector_t start;
1003
1004	struct shrinker *shrinker;
1005	struct work_struct shrink_work;
1006	atomic_long_t need_shrink;
1007
1008	wait_queue_head_t free_buffer_wait;
1009
1010	struct list_head client_list;
1011
1012	/*
1013	 * Used by global_cleanup to sort the clients list.
1014	 */
1015	unsigned long oldest_buffer;
1016
1017	struct dm_buffer_cache cache; /* must be last member */
1018};
1019
1020/*----------------------------------------------------------------*/
1021
1022#define dm_bufio_in_request()	(!!current->bio_list)
1023
1024static void dm_bufio_lock(struct dm_bufio_client *c)
1025{
1026	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1027		spin_lock_bh(&c->spinlock);
1028	else
1029		mutex_lock_nested(&c->lock, dm_bufio_in_request());
1030}
1031
1032static void dm_bufio_unlock(struct dm_bufio_client *c)
1033{
1034	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1035		spin_unlock_bh(&c->spinlock);
1036	else
1037		mutex_unlock(&c->lock);
1038}
1039
1040/*----------------------------------------------------------------*/
1041
1042/*
1043 * Default cache size: available memory divided by the ratio.
1044 */
1045static unsigned long dm_bufio_default_cache_size;
1046
1047/*
1048 * Total cache size set by the user.
1049 */
1050static unsigned long dm_bufio_cache_size;
1051
1052/*
1053 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1054 * at any time.  If it disagrees, the user has changed cache size.
1055 */
1056static unsigned long dm_bufio_cache_size_latch;
1057
1058static DEFINE_SPINLOCK(global_spinlock);
1059
1060/*
1061 * Buffers are freed after this timeout
1062 */
1063static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1064static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1065
1066static unsigned long dm_bufio_peak_allocated;
1067static unsigned long dm_bufio_allocated_kmem_cache;
 
1068static unsigned long dm_bufio_allocated_get_free_pages;
1069static unsigned long dm_bufio_allocated_vmalloc;
1070static unsigned long dm_bufio_current_allocated;
1071
1072/*----------------------------------------------------------------*/
1073
1074/*
1075 * The current number of clients.
1076 */
1077static int dm_bufio_client_count;
1078
1079/*
1080 * The list of all clients.
1081 */
1082static LIST_HEAD(dm_bufio_all_clients);
1083
1084/*
1085 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1086 */
1087static DEFINE_MUTEX(dm_bufio_clients_lock);
1088
1089static struct workqueue_struct *dm_bufio_wq;
1090static struct delayed_work dm_bufio_cleanup_old_work;
1091static struct work_struct dm_bufio_replacement_work;
1092
1093
1094#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1095static void buffer_record_stack(struct dm_buffer *b)
1096{
1097	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1098}
1099#endif
1100
1101/*----------------------------------------------------------------*/
1102
1103static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1104{
1105	unsigned char data_mode;
1106	long diff;
1107
1108	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1109		&dm_bufio_allocated_kmem_cache,
 
1110		&dm_bufio_allocated_get_free_pages,
1111		&dm_bufio_allocated_vmalloc,
1112	};
1113
1114	data_mode = b->data_mode;
1115	diff = (long)b->c->block_size;
1116	if (unlink)
1117		diff = -diff;
1118
1119	spin_lock(&global_spinlock);
1120
1121	*class_ptr[data_mode] += diff;
1122
1123	dm_bufio_current_allocated += diff;
1124
1125	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126		dm_bufio_peak_allocated = dm_bufio_current_allocated;
1127
1128	if (!unlink) {
1129		if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1131	}
1132
1133	spin_unlock(&global_spinlock);
1134}
1135
1136/*
1137 * Change the number of clients and recalculate per-client limit.
1138 */
1139static void __cache_size_refresh(void)
1140{
1141	if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1142		return;
1143	if (WARN_ON(dm_bufio_client_count < 0))
1144		return;
1145
1146	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1147
1148	/*
1149	 * Use default if set to 0 and report the actual cache size used.
1150	 */
1151	if (!dm_bufio_cache_size_latch) {
1152		(void)cmpxchg(&dm_bufio_cache_size, 0,
1153			      dm_bufio_default_cache_size);
1154		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1155	}
1156}
1157
1158/*
1159 * Allocating buffer data.
1160 *
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1162 *
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1165 *
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1169 *
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_PAGE_ORDER).
1174 *
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1177 * space.
1178 */
1179static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180			       unsigned char *data_mode)
1181{
1182	if (unlikely(c->slab_cache != NULL)) {
1183		*data_mode = DATA_MODE_SLAB;
1184		return kmem_cache_alloc(c->slab_cache, gfp_mask);
1185	}
1186
 
 
 
 
 
1187	if (c->block_size <= KMALLOC_MAX_SIZE &&
1188	    gfp_mask & __GFP_NORETRY) {
1189		*data_mode = DATA_MODE_GET_FREE_PAGES;
1190		return (void *)__get_free_pages(gfp_mask,
1191						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1192	}
1193
1194	*data_mode = DATA_MODE_VMALLOC;
1195
1196	return __vmalloc(c->block_size, gfp_mask);
1197}
1198
1199/*
1200 * Free buffer's data.
1201 */
1202static void free_buffer_data(struct dm_bufio_client *c,
1203			     void *data, unsigned char data_mode)
1204{
1205	switch (data_mode) {
1206	case DATA_MODE_SLAB:
1207		kmem_cache_free(c->slab_cache, data);
1208		break;
1209
 
 
 
 
1210	case DATA_MODE_GET_FREE_PAGES:
1211		free_pages((unsigned long)data,
1212			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1213		break;
1214
1215	case DATA_MODE_VMALLOC:
1216		vfree(data);
1217		break;
1218
1219	default:
1220		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1221		       data_mode);
1222		BUG();
1223	}
1224}
1225
1226/*
1227 * Allocate buffer and its data.
1228 */
1229static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1230{
1231	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1232
1233	if (!b)
1234		return NULL;
1235
1236	b->c = c;
1237
1238	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1239	if (!b->data) {
1240		kmem_cache_free(c->slab_buffer, b);
1241		return NULL;
1242	}
1243	adjust_total_allocated(b, false);
1244
1245#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1246	b->stack_len = 0;
1247#endif
1248	return b;
1249}
1250
1251/*
1252 * Free buffer and its data.
1253 */
1254static void free_buffer(struct dm_buffer *b)
1255{
1256	struct dm_bufio_client *c = b->c;
1257
1258	adjust_total_allocated(b, true);
1259	free_buffer_data(c, b->data, b->data_mode);
1260	kmem_cache_free(c->slab_buffer, b);
1261}
1262
1263/*
1264 *--------------------------------------------------------------------------
1265 * Submit I/O on the buffer.
1266 *
1267 * Bio interface is faster but it has some problems:
1268 *	the vector list is limited (increasing this limit increases
1269 *	memory-consumption per buffer, so it is not viable);
1270 *
1271 *	the memory must be direct-mapped, not vmalloced;
1272 *
1273 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1274 * it is not vmalloced, try using the bio interface.
1275 *
1276 * If the buffer is big, if it is vmalloced or if the underlying device
1277 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1278 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1279 * shortcomings.
1280 *--------------------------------------------------------------------------
1281 */
1282
1283/*
1284 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1285 * that the request was handled directly with bio interface.
1286 */
1287static void dmio_complete(unsigned long error, void *context)
1288{
1289	struct dm_buffer *b = context;
1290
1291	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1292}
1293
1294static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1295		     unsigned int n_sectors, unsigned int offset)
 
1296{
1297	int r;
1298	struct dm_io_request io_req = {
1299		.bi_opf = op,
1300		.notify.fn = dmio_complete,
1301		.notify.context = b,
1302		.client = b->c->dm_io,
1303	};
1304	struct dm_io_region region = {
1305		.bdev = b->c->bdev,
1306		.sector = sector,
1307		.count = n_sectors,
1308	};
1309
1310	if (b->data_mode != DATA_MODE_VMALLOC) {
1311		io_req.mem.type = DM_IO_KMEM;
1312		io_req.mem.ptr.addr = (char *)b->data + offset;
1313	} else {
1314		io_req.mem.type = DM_IO_VMA;
1315		io_req.mem.ptr.vma = (char *)b->data + offset;
1316	}
1317
1318	r = dm_io(&io_req, 1, &region, NULL);
1319	if (unlikely(r))
1320		b->end_io(b, errno_to_blk_status(r));
1321}
1322
1323static void bio_complete(struct bio *bio)
1324{
1325	struct dm_buffer *b = bio->bi_private;
1326	blk_status_t status = bio->bi_status;
1327
1328	bio_uninit(bio);
1329	kfree(bio);
1330	b->end_io(b, status);
1331}
1332
1333static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1334		    unsigned int n_sectors, unsigned int offset)
 
1335{
1336	struct bio *bio;
1337	char *ptr;
1338	unsigned int len;
1339
1340	bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1341	if (!bio) {
1342		use_dmio(b, op, sector, n_sectors, offset);
1343		return;
1344	}
1345	bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1346	bio->bi_iter.bi_sector = sector;
1347	bio->bi_end_io = bio_complete;
1348	bio->bi_private = b;
 
1349
1350	ptr = (char *)b->data + offset;
1351	len = n_sectors << SECTOR_SHIFT;
1352
1353	__bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1354
1355	submit_bio(bio);
1356}
1357
1358static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1359{
1360	sector_t sector;
1361
1362	if (likely(c->sectors_per_block_bits >= 0))
1363		sector = block << c->sectors_per_block_bits;
1364	else
1365		sector = block * (c->block_size >> SECTOR_SHIFT);
1366	sector += c->start;
1367
1368	return sector;
1369}
1370
1371static void submit_io(struct dm_buffer *b, enum req_op op,
1372		      void (*end_io)(struct dm_buffer *, blk_status_t))
1373{
1374	unsigned int n_sectors;
1375	sector_t sector;
1376	unsigned int offset, end;
1377
1378	b->end_io = end_io;
1379
1380	sector = block_to_sector(b->c, b->block);
1381
1382	if (op != REQ_OP_WRITE) {
1383		n_sectors = b->c->block_size >> SECTOR_SHIFT;
1384		offset = 0;
1385	} else {
1386		if (b->c->write_callback)
1387			b->c->write_callback(b);
1388		offset = b->write_start;
1389		end = b->write_end;
1390		offset &= -DM_BUFIO_WRITE_ALIGN;
1391		end += DM_BUFIO_WRITE_ALIGN - 1;
1392		end &= -DM_BUFIO_WRITE_ALIGN;
1393		if (unlikely(end > b->c->block_size))
1394			end = b->c->block_size;
1395
1396		sector += offset >> SECTOR_SHIFT;
1397		n_sectors = (end - offset) >> SECTOR_SHIFT;
1398	}
1399
1400	if (b->data_mode != DATA_MODE_VMALLOC)
1401		use_bio(b, op, sector, n_sectors, offset);
1402	else
1403		use_dmio(b, op, sector, n_sectors, offset);
1404}
1405
1406/*
1407 *--------------------------------------------------------------
1408 * Writing dirty buffers
1409 *--------------------------------------------------------------
1410 */
1411
1412/*
1413 * The endio routine for write.
1414 *
1415 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1416 * it.
1417 */
1418static void write_endio(struct dm_buffer *b, blk_status_t status)
1419{
1420	b->write_error = status;
1421	if (unlikely(status)) {
1422		struct dm_bufio_client *c = b->c;
1423
1424		(void)cmpxchg(&c->async_write_error, 0,
1425				blk_status_to_errno(status));
1426	}
1427
1428	BUG_ON(!test_bit(B_WRITING, &b->state));
1429
1430	smp_mb__before_atomic();
1431	clear_bit(B_WRITING, &b->state);
1432	smp_mb__after_atomic();
1433
1434	wake_up_bit(&b->state, B_WRITING);
1435}
1436
1437/*
1438 * Initiate a write on a dirty buffer, but don't wait for it.
1439 *
1440 * - If the buffer is not dirty, exit.
1441 * - If there some previous write going on, wait for it to finish (we can't
1442 *   have two writes on the same buffer simultaneously).
1443 * - Submit our write and don't wait on it. We set B_WRITING indicating
1444 *   that there is a write in progress.
1445 */
1446static void __write_dirty_buffer(struct dm_buffer *b,
1447				 struct list_head *write_list)
1448{
1449	if (!test_bit(B_DIRTY, &b->state))
1450		return;
1451
1452	clear_bit(B_DIRTY, &b->state);
1453	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1454
1455	b->write_start = b->dirty_start;
1456	b->write_end = b->dirty_end;
1457
1458	if (!write_list)
1459		submit_io(b, REQ_OP_WRITE, write_endio);
1460	else
1461		list_add_tail(&b->write_list, write_list);
1462}
1463
1464static void __flush_write_list(struct list_head *write_list)
1465{
1466	struct blk_plug plug;
1467
1468	blk_start_plug(&plug);
1469	while (!list_empty(write_list)) {
1470		struct dm_buffer *b =
1471			list_entry(write_list->next, struct dm_buffer, write_list);
1472		list_del(&b->write_list);
1473		submit_io(b, REQ_OP_WRITE, write_endio);
1474		cond_resched();
1475	}
1476	blk_finish_plug(&plug);
1477}
1478
1479/*
1480 * Wait until any activity on the buffer finishes.  Possibly write the
1481 * buffer if it is dirty.  When this function finishes, there is no I/O
1482 * running on the buffer and the buffer is not dirty.
1483 */
1484static void __make_buffer_clean(struct dm_buffer *b)
1485{
1486	BUG_ON(atomic_read(&b->hold_count));
1487
1488	/* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1489	if (!smp_load_acquire(&b->state))	/* fast case */
1490		return;
1491
1492	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1493	__write_dirty_buffer(b, NULL);
1494	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1495}
1496
1497static enum evict_result is_clean(struct dm_buffer *b, void *context)
1498{
1499	struct dm_bufio_client *c = context;
1500
1501	/* These should never happen */
1502	if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1503		return ER_DONT_EVICT;
1504	if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1505		return ER_DONT_EVICT;
1506	if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1507		return ER_DONT_EVICT;
1508
1509	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1510	    unlikely(test_bit(B_READING, &b->state)))
1511		return ER_DONT_EVICT;
1512
1513	return ER_EVICT;
1514}
1515
1516static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1517{
1518	/* These should never happen */
1519	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1520		return ER_DONT_EVICT;
1521	if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1522		return ER_DONT_EVICT;
1523
1524	return ER_EVICT;
1525}
1526
1527/*
1528 * Find some buffer that is not held by anybody, clean it, unlink it and
1529 * return it.
1530 */
1531static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1532{
1533	struct dm_buffer *b;
1534
1535	b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1536	if (b) {
1537		/* this also waits for pending reads */
1538		__make_buffer_clean(b);
1539		return b;
1540	}
1541
1542	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1543		return NULL;
1544
1545	b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1546	if (b) {
1547		__make_buffer_clean(b);
1548		return b;
1549	}
1550
1551	return NULL;
1552}
1553
1554/*
1555 * Wait until some other threads free some buffer or release hold count on
1556 * some buffer.
1557 *
1558 * This function is entered with c->lock held, drops it and regains it
1559 * before exiting.
1560 */
1561static void __wait_for_free_buffer(struct dm_bufio_client *c)
1562{
1563	DECLARE_WAITQUEUE(wait, current);
1564
1565	add_wait_queue(&c->free_buffer_wait, &wait);
1566	set_current_state(TASK_UNINTERRUPTIBLE);
1567	dm_bufio_unlock(c);
1568
1569	/*
1570	 * It's possible to miss a wake up event since we don't always
1571	 * hold c->lock when wake_up is called.  So we have a timeout here,
1572	 * just in case.
1573	 */
1574	io_schedule_timeout(5 * HZ);
1575
1576	remove_wait_queue(&c->free_buffer_wait, &wait);
1577
1578	dm_bufio_lock(c);
1579}
1580
1581enum new_flag {
1582	NF_FRESH = 0,
1583	NF_READ = 1,
1584	NF_GET = 2,
1585	NF_PREFETCH = 3
1586};
1587
1588/*
1589 * Allocate a new buffer. If the allocation is not possible, wait until
1590 * some other thread frees a buffer.
1591 *
1592 * May drop the lock and regain it.
1593 */
1594static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1595{
1596	struct dm_buffer *b;
1597	bool tried_noio_alloc = false;
1598
1599	/*
1600	 * dm-bufio is resistant to allocation failures (it just keeps
1601	 * one buffer reserved in cases all the allocations fail).
1602	 * So set flags to not try too hard:
1603	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1604	 *		    mutex and wait ourselves.
1605	 *	__GFP_NORETRY: don't retry and rather return failure
1606	 *	__GFP_NOMEMALLOC: don't use emergency reserves
1607	 *	__GFP_NOWARN: don't print a warning in case of failure
1608	 *
1609	 * For debugging, if we set the cache size to 1, no new buffers will
1610	 * be allocated.
1611	 */
1612	while (1) {
1613		if (dm_bufio_cache_size_latch != 1) {
1614			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1615			if (b)
1616				return b;
1617		}
1618
1619		if (nf == NF_PREFETCH)
1620			return NULL;
1621
1622		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1623			dm_bufio_unlock(c);
1624			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1625			dm_bufio_lock(c);
1626			if (b)
1627				return b;
1628			tried_noio_alloc = true;
1629		}
1630
1631		if (!list_empty(&c->reserved_buffers)) {
1632			b = list_to_buffer(c->reserved_buffers.next);
1633			list_del(&b->lru.list);
1634			c->need_reserved_buffers++;
1635
1636			return b;
1637		}
1638
1639		b = __get_unclaimed_buffer(c);
1640		if (b)
1641			return b;
1642
1643		__wait_for_free_buffer(c);
1644	}
1645}
1646
1647static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1648{
1649	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1650
1651	if (!b)
1652		return NULL;
1653
1654	if (c->alloc_callback)
1655		c->alloc_callback(b);
1656
1657	return b;
1658}
1659
1660/*
1661 * Free a buffer and wake other threads waiting for free buffers.
1662 */
1663static void __free_buffer_wake(struct dm_buffer *b)
1664{
1665	struct dm_bufio_client *c = b->c;
1666
1667	b->block = -1;
1668	if (!c->need_reserved_buffers)
1669		free_buffer(b);
1670	else {
1671		list_add(&b->lru.list, &c->reserved_buffers);
1672		c->need_reserved_buffers--;
1673	}
1674
1675	/*
1676	 * We hold the bufio lock here, so no one can add entries to the
1677	 * wait queue anyway.
1678	 */
1679	if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1680		wake_up(&c->free_buffer_wait);
1681}
1682
1683static enum evict_result cleaned(struct dm_buffer *b, void *context)
1684{
1685	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1686		return ER_DONT_EVICT; /* should never happen */
1687
1688	if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1689		return ER_DONT_EVICT;
1690	else
1691		return ER_EVICT;
1692}
1693
1694static void __move_clean_buffers(struct dm_bufio_client *c)
1695{
1696	cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1697}
1698
1699struct write_context {
1700	int no_wait;
1701	struct list_head *write_list;
1702};
1703
1704static enum it_action write_one(struct dm_buffer *b, void *context)
1705{
1706	struct write_context *wc = context;
1707
1708	if (wc->no_wait && test_bit(B_WRITING, &b->state))
1709		return IT_COMPLETE;
1710
1711	__write_dirty_buffer(b, wc->write_list);
1712	return IT_NEXT;
1713}
1714
1715static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1716					struct list_head *write_list)
1717{
1718	struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1719
1720	__move_clean_buffers(c);
1721	cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1722}
1723
1724/*
1725 * Check if we're over watermark.
1726 * If we are over threshold_buffers, start freeing buffers.
1727 * If we're over "limit_buffers", block until we get under the limit.
1728 */
1729static void __check_watermark(struct dm_bufio_client *c,
1730			      struct list_head *write_list)
1731{
1732	if (cache_count(&c->cache, LIST_DIRTY) >
1733	    cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1734		__write_dirty_buffers_async(c, 1, write_list);
1735}
1736
1737/*
1738 *--------------------------------------------------------------
1739 * Getting a buffer
1740 *--------------------------------------------------------------
1741 */
1742
1743static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1744{
1745	/*
1746	 * Relying on waitqueue_active() is racey, but we sleep
1747	 * with schedule_timeout anyway.
1748	 */
1749	if (cache_put(&c->cache, b) &&
1750	    unlikely(waitqueue_active(&c->free_buffer_wait)))
1751		wake_up(&c->free_buffer_wait);
1752}
1753
1754/*
1755 * This assumes you have already checked the cache to see if the buffer
1756 * is already present (it will recheck after dropping the lock for allocation).
1757 */
1758static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1759				     enum new_flag nf, int *need_submit,
1760				     struct list_head *write_list)
1761{
1762	struct dm_buffer *b, *new_b = NULL;
1763
1764	*need_submit = 0;
1765
1766	/* This can't be called with NF_GET */
1767	if (WARN_ON_ONCE(nf == NF_GET))
1768		return NULL;
1769
1770	new_b = __alloc_buffer_wait(c, nf);
1771	if (!new_b)
1772		return NULL;
1773
1774	/*
1775	 * We've had a period where the mutex was unlocked, so need to
1776	 * recheck the buffer tree.
1777	 */
1778	b = cache_get(&c->cache, block);
1779	if (b) {
1780		__free_buffer_wake(new_b);
1781		goto found_buffer;
1782	}
1783
1784	__check_watermark(c, write_list);
1785
1786	b = new_b;
1787	atomic_set(&b->hold_count, 1);
1788	WRITE_ONCE(b->last_accessed, jiffies);
1789	b->block = block;
1790	b->read_error = 0;
1791	b->write_error = 0;
1792	b->list_mode = LIST_CLEAN;
1793
1794	if (nf == NF_FRESH)
1795		b->state = 0;
1796	else {
1797		b->state = 1 << B_READING;
1798		*need_submit = 1;
1799	}
1800
1801	/*
1802	 * We mustn't insert into the cache until the B_READING state
1803	 * is set.  Otherwise another thread could get it and use
1804	 * it before it had been read.
1805	 */
1806	cache_insert(&c->cache, b);
1807
1808	return b;
1809
1810found_buffer:
1811	if (nf == NF_PREFETCH) {
1812		cache_put_and_wake(c, b);
1813		return NULL;
1814	}
1815
1816	/*
1817	 * Note: it is essential that we don't wait for the buffer to be
1818	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1819	 * dm_bufio_prefetch can be used in the driver request routine.
1820	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1821	 * the same buffer, it would deadlock if we waited.
1822	 */
1823	if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1824		cache_put_and_wake(c, b);
1825		return NULL;
1826	}
1827
1828	return b;
1829}
1830
1831/*
1832 * The endio routine for reading: set the error, clear the bit and wake up
1833 * anyone waiting on the buffer.
1834 */
1835static void read_endio(struct dm_buffer *b, blk_status_t status)
1836{
1837	b->read_error = status;
1838
1839	BUG_ON(!test_bit(B_READING, &b->state));
1840
1841	smp_mb__before_atomic();
1842	clear_bit(B_READING, &b->state);
1843	smp_mb__after_atomic();
1844
1845	wake_up_bit(&b->state, B_READING);
1846}
1847
1848/*
1849 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1850 * functions is similar except that dm_bufio_new doesn't read the
1851 * buffer from the disk (assuming that the caller overwrites all the data
1852 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1853 */
1854static void *new_read(struct dm_bufio_client *c, sector_t block,
1855		      enum new_flag nf, struct dm_buffer **bp)
 
1856{
1857	int need_submit = 0;
1858	struct dm_buffer *b;
1859
1860	LIST_HEAD(write_list);
1861
1862	*bp = NULL;
1863
1864	/*
1865	 * Fast path, hopefully the block is already in the cache.  No need
1866	 * to get the client lock for this.
1867	 */
1868	b = cache_get(&c->cache, block);
1869	if (b) {
1870		if (nf == NF_PREFETCH) {
1871			cache_put_and_wake(c, b);
1872			return NULL;
1873		}
1874
1875		/*
1876		 * Note: it is essential that we don't wait for the buffer to be
1877		 * read if dm_bufio_get function is used. Both dm_bufio_get and
1878		 * dm_bufio_prefetch can be used in the driver request routine.
1879		 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1880		 * the same buffer, it would deadlock if we waited.
1881		 */
1882		if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1883			cache_put_and_wake(c, b);
1884			return NULL;
1885		}
1886	}
1887
1888	if (!b) {
1889		if (nf == NF_GET)
1890			return NULL;
1891
1892		dm_bufio_lock(c);
1893		b = __bufio_new(c, block, nf, &need_submit, &write_list);
1894		dm_bufio_unlock(c);
1895	}
1896
1897#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1898	if (b && (atomic_read(&b->hold_count) == 1))
1899		buffer_record_stack(b);
1900#endif
1901
1902	__flush_write_list(&write_list);
1903
1904	if (!b)
1905		return NULL;
1906
1907	if (need_submit)
1908		submit_io(b, REQ_OP_READ, read_endio);
1909
1910	if (nf != NF_GET)	/* we already tested this condition above */
1911		wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1912
1913	if (b->read_error) {
1914		int error = blk_status_to_errno(b->read_error);
1915
1916		dm_bufio_release(b);
1917
1918		return ERR_PTR(error);
1919	}
1920
1921	*bp = b;
1922
1923	return b->data;
1924}
1925
1926void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1927		   struct dm_buffer **bp)
1928{
1929	return new_read(c, block, NF_GET, bp);
1930}
1931EXPORT_SYMBOL_GPL(dm_bufio_get);
1932
1933void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1934		    struct dm_buffer **bp)
1935{
1936	if (WARN_ON_ONCE(dm_bufio_in_request()))
1937		return ERR_PTR(-EINVAL);
1938
1939	return new_read(c, block, NF_READ, bp);
 
 
 
 
 
 
1940}
1941EXPORT_SYMBOL_GPL(dm_bufio_read);
1942
 
 
 
 
 
 
 
1943void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1944		   struct dm_buffer **bp)
1945{
1946	if (WARN_ON_ONCE(dm_bufio_in_request()))
1947		return ERR_PTR(-EINVAL);
1948
1949	return new_read(c, block, NF_FRESH, bp);
1950}
1951EXPORT_SYMBOL_GPL(dm_bufio_new);
1952
1953void dm_bufio_prefetch(struct dm_bufio_client *c,
1954		       sector_t block, unsigned int n_blocks)
 
1955{
1956	struct blk_plug plug;
1957
1958	LIST_HEAD(write_list);
1959
1960	if (WARN_ON_ONCE(dm_bufio_in_request()))
1961		return; /* should never happen */
1962
1963	blk_start_plug(&plug);
1964
1965	for (; n_blocks--; block++) {
1966		int need_submit;
1967		struct dm_buffer *b;
1968
1969		b = cache_get(&c->cache, block);
1970		if (b) {
1971			/* already in cache */
1972			cache_put_and_wake(c, b);
1973			continue;
1974		}
1975
1976		dm_bufio_lock(c);
1977		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1978				&write_list);
1979		if (unlikely(!list_empty(&write_list))) {
1980			dm_bufio_unlock(c);
1981			blk_finish_plug(&plug);
1982			__flush_write_list(&write_list);
1983			blk_start_plug(&plug);
1984			dm_bufio_lock(c);
1985		}
1986		if (unlikely(b != NULL)) {
1987			dm_bufio_unlock(c);
1988
1989			if (need_submit)
1990				submit_io(b, REQ_OP_READ, read_endio);
1991			dm_bufio_release(b);
1992
1993			cond_resched();
1994
1995			if (!n_blocks)
1996				goto flush_plug;
1997			dm_bufio_lock(c);
1998		}
1999		dm_bufio_unlock(c);
2000	}
2001
2002flush_plug:
2003	blk_finish_plug(&plug);
2004}
 
 
 
 
 
2005EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2006
 
 
 
 
 
 
 
2007void dm_bufio_release(struct dm_buffer *b)
2008{
2009	struct dm_bufio_client *c = b->c;
2010
2011	/*
2012	 * If there were errors on the buffer, and the buffer is not
2013	 * to be written, free the buffer. There is no point in caching
2014	 * invalid buffer.
2015	 */
2016	if ((b->read_error || b->write_error) &&
2017	    !test_bit_acquire(B_READING, &b->state) &&
2018	    !test_bit(B_WRITING, &b->state) &&
2019	    !test_bit(B_DIRTY, &b->state)) {
2020		dm_bufio_lock(c);
2021
2022		/* cache remove can fail if there are other holders */
2023		if (cache_remove(&c->cache, b)) {
2024			__free_buffer_wake(b);
2025			dm_bufio_unlock(c);
2026			return;
2027		}
2028
2029		dm_bufio_unlock(c);
2030	}
2031
2032	cache_put_and_wake(c, b);
2033}
2034EXPORT_SYMBOL_GPL(dm_bufio_release);
2035
2036void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2037					unsigned int start, unsigned int end)
2038{
2039	struct dm_bufio_client *c = b->c;
2040
2041	BUG_ON(start >= end);
2042	BUG_ON(end > b->c->block_size);
2043
2044	dm_bufio_lock(c);
2045
2046	BUG_ON(test_bit(B_READING, &b->state));
2047
2048	if (!test_and_set_bit(B_DIRTY, &b->state)) {
2049		b->dirty_start = start;
2050		b->dirty_end = end;
2051		cache_mark(&c->cache, b, LIST_DIRTY);
2052	} else {
2053		if (start < b->dirty_start)
2054			b->dirty_start = start;
2055		if (end > b->dirty_end)
2056			b->dirty_end = end;
2057	}
2058
2059	dm_bufio_unlock(c);
2060}
2061EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2062
2063void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2064{
2065	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2066}
2067EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2068
2069void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2070{
2071	LIST_HEAD(write_list);
2072
2073	if (WARN_ON_ONCE(dm_bufio_in_request()))
2074		return; /* should never happen */
2075
2076	dm_bufio_lock(c);
2077	__write_dirty_buffers_async(c, 0, &write_list);
2078	dm_bufio_unlock(c);
2079	__flush_write_list(&write_list);
2080}
2081EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2082
2083/*
2084 * For performance, it is essential that the buffers are written asynchronously
2085 * and simultaneously (so that the block layer can merge the writes) and then
2086 * waited upon.
2087 *
2088 * Finally, we flush hardware disk cache.
2089 */
2090static bool is_writing(struct lru_entry *e, void *context)
2091{
2092	struct dm_buffer *b = le_to_buffer(e);
2093
2094	return test_bit(B_WRITING, &b->state);
2095}
2096
2097int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2098{
2099	int a, f;
2100	unsigned long nr_buffers;
2101	struct lru_entry *e;
2102	struct lru_iter it;
2103
2104	LIST_HEAD(write_list);
2105
2106	dm_bufio_lock(c);
2107	__write_dirty_buffers_async(c, 0, &write_list);
2108	dm_bufio_unlock(c);
2109	__flush_write_list(&write_list);
2110	dm_bufio_lock(c);
2111
2112	nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2113	lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2114	while ((e = lru_iter_next(&it, is_writing, c))) {
2115		struct dm_buffer *b = le_to_buffer(e);
2116		__cache_inc_buffer(b);
2117
2118		BUG_ON(test_bit(B_READING, &b->state));
2119
2120		if (nr_buffers) {
2121			nr_buffers--;
2122			dm_bufio_unlock(c);
2123			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2124			dm_bufio_lock(c);
2125		} else {
2126			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2127		}
2128
2129		if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2130			cache_mark(&c->cache, b, LIST_CLEAN);
2131
2132		cache_put_and_wake(c, b);
2133
2134		cond_resched();
2135	}
2136	lru_iter_end(&it);
2137
2138	wake_up(&c->free_buffer_wait);
2139	dm_bufio_unlock(c);
2140
2141	a = xchg(&c->async_write_error, 0);
2142	f = dm_bufio_issue_flush(c);
2143	if (a)
2144		return a;
2145
2146	return f;
2147}
2148EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2149
2150/*
2151 * Use dm-io to send an empty barrier to flush the device.
2152 */
2153int dm_bufio_issue_flush(struct dm_bufio_client *c)
2154{
2155	struct dm_io_request io_req = {
2156		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2157		.mem.type = DM_IO_KMEM,
2158		.mem.ptr.addr = NULL,
2159		.client = c->dm_io,
2160	};
2161	struct dm_io_region io_reg = {
2162		.bdev = c->bdev,
2163		.sector = 0,
2164		.count = 0,
2165	};
2166
2167	if (WARN_ON_ONCE(dm_bufio_in_request()))
2168		return -EINVAL;
2169
2170	return dm_io(&io_req, 1, &io_reg, NULL);
2171}
2172EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2173
2174/*
2175 * Use dm-io to send a discard request to flush the device.
2176 */
2177int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2178{
2179	struct dm_io_request io_req = {
2180		.bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2181		.mem.type = DM_IO_KMEM,
2182		.mem.ptr.addr = NULL,
2183		.client = c->dm_io,
2184	};
2185	struct dm_io_region io_reg = {
2186		.bdev = c->bdev,
2187		.sector = block_to_sector(c, block),
2188		.count = block_to_sector(c, count),
2189	};
2190
2191	if (WARN_ON_ONCE(dm_bufio_in_request()))
2192		return -EINVAL; /* discards are optional */
2193
2194	return dm_io(&io_req, 1, &io_reg, NULL);
2195}
2196EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2197
2198static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2199{
2200	struct dm_buffer *b;
2201
2202	b = cache_get(&c->cache, block);
2203	if (b) {
2204		if (likely(!smp_load_acquire(&b->state))) {
2205			if (cache_remove(&c->cache, b))
2206				__free_buffer_wake(b);
2207			else
2208				cache_put_and_wake(c, b);
2209		} else {
2210			cache_put_and_wake(c, b);
2211		}
2212	}
2213
2214	return b ? true : false;
2215}
2216
2217/*
2218 * Free the given buffer.
2219 *
2220 * This is just a hint, if the buffer is in use or dirty, this function
2221 * does nothing.
2222 */
2223void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2224{
2225	dm_bufio_lock(c);
2226	forget_buffer(c, block);
2227	dm_bufio_unlock(c);
2228}
2229EXPORT_SYMBOL_GPL(dm_bufio_forget);
2230
2231static enum evict_result idle(struct dm_buffer *b, void *context)
2232{
2233	return b->state ? ER_DONT_EVICT : ER_EVICT;
2234}
2235
2236void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2237{
2238	dm_bufio_lock(c);
2239	cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2240	dm_bufio_unlock(c);
2241}
2242EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2243
2244void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2245{
2246	c->minimum_buffers = n;
2247}
2248EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2249
2250unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2251{
2252	return c->block_size;
2253}
2254EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2255
2256sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2257{
2258	sector_t s = bdev_nr_sectors(c->bdev);
2259
2260	if (s >= c->start)
2261		s -= c->start;
2262	else
2263		s = 0;
2264	if (likely(c->sectors_per_block_bits >= 0))
2265		s >>= c->sectors_per_block_bits;
2266	else
2267		sector_div(s, c->block_size >> SECTOR_SHIFT);
2268	return s;
2269}
2270EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2271
2272struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2273{
2274	return c->dm_io;
2275}
2276EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2277
2278sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2279{
2280	return b->block;
2281}
2282EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2283
2284void *dm_bufio_get_block_data(struct dm_buffer *b)
2285{
2286	return b->data;
2287}
2288EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2289
2290void *dm_bufio_get_aux_data(struct dm_buffer *b)
2291{
2292	return b + 1;
2293}
2294EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2295
2296struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2297{
2298	return b->c;
2299}
2300EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2301
2302static enum it_action warn_leak(struct dm_buffer *b, void *context)
2303{
2304	bool *warned = context;
2305
2306	WARN_ON(!(*warned));
2307	*warned = true;
2308	DMERR("leaked buffer %llx, hold count %u, list %d",
2309	      (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2310#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2311	stack_trace_print(b->stack_entries, b->stack_len, 1);
2312	/* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2313	atomic_set(&b->hold_count, 0);
2314#endif
2315	return IT_NEXT;
2316}
2317
2318static void drop_buffers(struct dm_bufio_client *c)
2319{
2320	int i;
2321	struct dm_buffer *b;
2322
2323	if (WARN_ON(dm_bufio_in_request()))
2324		return; /* should never happen */
2325
2326	/*
2327	 * An optimization so that the buffers are not written one-by-one.
2328	 */
2329	dm_bufio_write_dirty_buffers_async(c);
2330
2331	dm_bufio_lock(c);
2332
2333	while ((b = __get_unclaimed_buffer(c)))
2334		__free_buffer_wake(b);
2335
2336	for (i = 0; i < LIST_SIZE; i++) {
2337		bool warned = false;
2338
2339		cache_iterate(&c->cache, i, warn_leak, &warned);
2340	}
2341
2342#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2343	while ((b = __get_unclaimed_buffer(c)))
2344		__free_buffer_wake(b);
2345#endif
2346
2347	for (i = 0; i < LIST_SIZE; i++)
2348		WARN_ON(cache_count(&c->cache, i));
2349
2350	dm_bufio_unlock(c);
2351}
2352
2353static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2354{
2355	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2356
2357	if (likely(c->sectors_per_block_bits >= 0))
2358		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2359	else
2360		retain_bytes /= c->block_size;
2361
2362	return retain_bytes;
2363}
2364
2365static void __scan(struct dm_bufio_client *c)
2366{
2367	int l;
2368	struct dm_buffer *b;
2369	unsigned long freed = 0;
2370	unsigned long retain_target = get_retain_buffers(c);
2371	unsigned long count = cache_total(&c->cache);
2372
2373	for (l = 0; l < LIST_SIZE; l++) {
2374		while (true) {
2375			if (count - freed <= retain_target)
2376				atomic_long_set(&c->need_shrink, 0);
2377			if (!atomic_long_read(&c->need_shrink))
2378				break;
2379
2380			b = cache_evict(&c->cache, l,
2381					l == LIST_CLEAN ? is_clean : is_dirty, c);
2382			if (!b)
2383				break;
2384
2385			__make_buffer_clean(b);
2386			__free_buffer_wake(b);
2387
2388			atomic_long_dec(&c->need_shrink);
2389			freed++;
2390			cond_resched();
2391		}
2392	}
2393}
2394
2395static void shrink_work(struct work_struct *w)
2396{
2397	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2398
2399	dm_bufio_lock(c);
2400	__scan(c);
2401	dm_bufio_unlock(c);
2402}
2403
2404static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2405{
2406	struct dm_bufio_client *c;
2407
2408	c = shrink->private_data;
2409	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2410	queue_work(dm_bufio_wq, &c->shrink_work);
2411
2412	return sc->nr_to_scan;
2413}
2414
2415static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2416{
2417	struct dm_bufio_client *c = shrink->private_data;
2418	unsigned long count = cache_total(&c->cache);
2419	unsigned long retain_target = get_retain_buffers(c);
2420	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2421
2422	if (unlikely(count < retain_target))
2423		count = 0;
2424	else
2425		count -= retain_target;
2426
2427	if (unlikely(count < queued_for_cleanup))
2428		count = 0;
2429	else
2430		count -= queued_for_cleanup;
2431
2432	return count;
2433}
2434
2435/*
2436 * Create the buffering interface
2437 */
2438struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2439					       unsigned int reserved_buffers, unsigned int aux_size,
2440					       void (*alloc_callback)(struct dm_buffer *),
2441					       void (*write_callback)(struct dm_buffer *),
2442					       unsigned int flags)
2443{
2444	int r;
2445	unsigned int num_locks;
2446	struct dm_bufio_client *c;
2447	char slab_name[27];
 
2448
2449	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2450		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2451		r = -EINVAL;
2452		goto bad_client;
2453	}
2454
2455	num_locks = dm_num_hash_locks();
2456	c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2457	if (!c) {
2458		r = -ENOMEM;
2459		goto bad_client;
2460	}
2461	cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2462
2463	c->bdev = bdev;
2464	c->block_size = block_size;
2465	if (is_power_of_2(block_size))
2466		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2467	else
2468		c->sectors_per_block_bits = -1;
2469
2470	c->alloc_callback = alloc_callback;
2471	c->write_callback = write_callback;
2472
2473	if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2474		c->no_sleep = true;
2475		static_branch_inc(&no_sleep_enabled);
2476	}
2477
2478	mutex_init(&c->lock);
2479	spin_lock_init(&c->spinlock);
2480	INIT_LIST_HEAD(&c->reserved_buffers);
2481	c->need_reserved_buffers = reserved_buffers;
2482
2483	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2484
2485	init_waitqueue_head(&c->free_buffer_wait);
2486	c->async_write_error = 0;
2487
2488	c->dm_io = dm_io_client_create();
2489	if (IS_ERR(c->dm_io)) {
2490		r = PTR_ERR(c->dm_io);
2491		goto bad_dm_io;
2492	}
2493
2494	if (block_size <= KMALLOC_MAX_SIZE &&
2495	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
2496		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2497
2498		snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
 
2499		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2500						  SLAB_RECLAIM_ACCOUNT, NULL);
2501		if (!c->slab_cache) {
2502			r = -ENOMEM;
2503			goto bad;
2504		}
2505	}
2506	if (aux_size)
2507		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
 
2508	else
2509		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
 
2510	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2511					   0, SLAB_RECLAIM_ACCOUNT, NULL);
2512	if (!c->slab_buffer) {
2513		r = -ENOMEM;
2514		goto bad;
2515	}
2516
2517	while (c->need_reserved_buffers) {
2518		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2519
2520		if (!b) {
2521			r = -ENOMEM;
2522			goto bad;
2523		}
2524		__free_buffer_wake(b);
2525	}
2526
2527	INIT_WORK(&c->shrink_work, shrink_work);
2528	atomic_long_set(&c->need_shrink, 0);
2529
2530	c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2531				     MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2532	if (!c->shrinker) {
2533		r = -ENOMEM;
2534		goto bad;
2535	}
2536
2537	c->shrinker->count_objects = dm_bufio_shrink_count;
2538	c->shrinker->scan_objects = dm_bufio_shrink_scan;
2539	c->shrinker->seeks = 1;
2540	c->shrinker->batch = 0;
2541	c->shrinker->private_data = c;
2542
2543	shrinker_register(c->shrinker);
2544
2545	mutex_lock(&dm_bufio_clients_lock);
2546	dm_bufio_client_count++;
2547	list_add(&c->client_list, &dm_bufio_all_clients);
2548	__cache_size_refresh();
2549	mutex_unlock(&dm_bufio_clients_lock);
2550
2551	return c;
2552
2553bad:
2554	while (!list_empty(&c->reserved_buffers)) {
2555		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2556
2557		list_del(&b->lru.list);
2558		free_buffer(b);
2559	}
2560	kmem_cache_destroy(c->slab_cache);
2561	kmem_cache_destroy(c->slab_buffer);
2562	dm_io_client_destroy(c->dm_io);
2563bad_dm_io:
2564	mutex_destroy(&c->lock);
2565	if (c->no_sleep)
2566		static_branch_dec(&no_sleep_enabled);
2567	kfree(c);
2568bad_client:
2569	return ERR_PTR(r);
2570}
2571EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2572
2573/*
2574 * Free the buffering interface.
2575 * It is required that there are no references on any buffers.
2576 */
2577void dm_bufio_client_destroy(struct dm_bufio_client *c)
2578{
2579	unsigned int i;
2580
2581	drop_buffers(c);
2582
2583	shrinker_free(c->shrinker);
2584	flush_work(&c->shrink_work);
2585
2586	mutex_lock(&dm_bufio_clients_lock);
2587
2588	list_del(&c->client_list);
2589	dm_bufio_client_count--;
2590	__cache_size_refresh();
2591
2592	mutex_unlock(&dm_bufio_clients_lock);
2593
2594	WARN_ON(c->need_reserved_buffers);
2595
2596	while (!list_empty(&c->reserved_buffers)) {
2597		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2598
2599		list_del(&b->lru.list);
2600		free_buffer(b);
2601	}
2602
2603	for (i = 0; i < LIST_SIZE; i++)
2604		if (cache_count(&c->cache, i))
2605			DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2606
2607	for (i = 0; i < LIST_SIZE; i++)
2608		WARN_ON(cache_count(&c->cache, i));
2609
2610	cache_destroy(&c->cache);
2611	kmem_cache_destroy(c->slab_cache);
2612	kmem_cache_destroy(c->slab_buffer);
2613	dm_io_client_destroy(c->dm_io);
2614	mutex_destroy(&c->lock);
2615	if (c->no_sleep)
2616		static_branch_dec(&no_sleep_enabled);
2617	kfree(c);
2618}
2619EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2620
2621void dm_bufio_client_reset(struct dm_bufio_client *c)
2622{
2623	drop_buffers(c);
2624	flush_work(&c->shrink_work);
2625}
2626EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2627
2628void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2629{
2630	c->start = start;
2631}
2632EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2633
2634/*--------------------------------------------------------------*/
2635
2636static unsigned int get_max_age_hz(void)
2637{
2638	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2639
2640	if (max_age > UINT_MAX / HZ)
2641		max_age = UINT_MAX / HZ;
2642
2643	return max_age * HZ;
2644}
2645
2646static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2647{
2648	return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2649}
2650
2651struct evict_params {
2652	gfp_t gfp;
2653	unsigned long age_hz;
2654
2655	/*
2656	 * This gets updated with the largest last_accessed (ie. most
2657	 * recently used) of the evicted buffers.  It will not be reinitialised
2658	 * by __evict_many(), so you can use it across multiple invocations.
2659	 */
2660	unsigned long last_accessed;
2661};
2662
2663/*
2664 * We may not be able to evict this buffer if IO pending or the client
2665 * is still using it.
2666 *
2667 * And if GFP_NOFS is used, we must not do any I/O because we hold
2668 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2669 * rerouted to different bufio client.
2670 */
2671static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2672{
2673	struct evict_params *params = context;
2674
2675	if (!(params->gfp & __GFP_FS) ||
2676	    (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2677		if (test_bit_acquire(B_READING, &b->state) ||
2678		    test_bit(B_WRITING, &b->state) ||
2679		    test_bit(B_DIRTY, &b->state))
2680			return ER_DONT_EVICT;
2681	}
2682
2683	return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2684}
2685
2686static unsigned long __evict_many(struct dm_bufio_client *c,
2687				  struct evict_params *params,
2688				  int list_mode, unsigned long max_count)
2689{
2690	unsigned long count;
2691	unsigned long last_accessed;
2692	struct dm_buffer *b;
2693
2694	for (count = 0; count < max_count; count++) {
2695		b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2696		if (!b)
2697			break;
2698
2699		last_accessed = READ_ONCE(b->last_accessed);
2700		if (time_after_eq(params->last_accessed, last_accessed))
2701			params->last_accessed = last_accessed;
2702
2703		__make_buffer_clean(b);
2704		__free_buffer_wake(b);
2705
2706		cond_resched();
2707	}
2708
2709	return count;
2710}
2711
2712static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2713{
2714	struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2715	unsigned long retain = get_retain_buffers(c);
2716	unsigned long count;
2717	LIST_HEAD(write_list);
2718
2719	dm_bufio_lock(c);
2720
2721	__check_watermark(c, &write_list);
2722	if (unlikely(!list_empty(&write_list))) {
2723		dm_bufio_unlock(c);
2724		__flush_write_list(&write_list);
2725		dm_bufio_lock(c);
2726	}
2727
2728	count = cache_total(&c->cache);
2729	if (count > retain)
2730		__evict_many(c, &params, LIST_CLEAN, count - retain);
2731
2732	dm_bufio_unlock(c);
2733}
2734
2735static void cleanup_old_buffers(void)
2736{
2737	unsigned long max_age_hz = get_max_age_hz();
2738	struct dm_bufio_client *c;
2739
2740	mutex_lock(&dm_bufio_clients_lock);
2741
2742	__cache_size_refresh();
2743
2744	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2745		evict_old_buffers(c, max_age_hz);
2746
2747	mutex_unlock(&dm_bufio_clients_lock);
2748}
2749
2750static void work_fn(struct work_struct *w)
2751{
2752	cleanup_old_buffers();
2753
2754	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2755			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2756}
2757
2758/*--------------------------------------------------------------*/
2759
2760/*
2761 * Global cleanup tries to evict the oldest buffers from across _all_
2762 * the clients.  It does this by repeatedly evicting a few buffers from
2763 * the client that holds the oldest buffer.  It's approximate, but hopefully
2764 * good enough.
2765 */
2766static struct dm_bufio_client *__pop_client(void)
2767{
2768	struct list_head *h;
2769
2770	if (list_empty(&dm_bufio_all_clients))
2771		return NULL;
2772
2773	h = dm_bufio_all_clients.next;
2774	list_del(h);
2775	return container_of(h, struct dm_bufio_client, client_list);
2776}
2777
2778/*
2779 * Inserts the client in the global client list based on its
2780 * 'oldest_buffer' field.
2781 */
2782static void __insert_client(struct dm_bufio_client *new_client)
2783{
2784	struct dm_bufio_client *c;
2785	struct list_head *h = dm_bufio_all_clients.next;
2786
2787	while (h != &dm_bufio_all_clients) {
2788		c = container_of(h, struct dm_bufio_client, client_list);
2789		if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2790			break;
2791		h = h->next;
2792	}
2793
2794	list_add_tail(&new_client->client_list, h);
2795}
2796
2797static unsigned long __evict_a_few(unsigned long nr_buffers)
2798{
2799	unsigned long count;
2800	struct dm_bufio_client *c;
2801	struct evict_params params = {
2802		.gfp = GFP_KERNEL,
2803		.age_hz = 0,
2804		/* set to jiffies in case there are no buffers in this client */
2805		.last_accessed = jiffies
2806	};
2807
2808	c = __pop_client();
2809	if (!c)
2810		return 0;
2811
2812	dm_bufio_lock(c);
2813	count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
2814	dm_bufio_unlock(c);
2815
2816	if (count)
2817		c->oldest_buffer = params.last_accessed;
2818	__insert_client(c);
2819
2820	return count;
2821}
2822
2823static void check_watermarks(void)
2824{
2825	LIST_HEAD(write_list);
2826	struct dm_bufio_client *c;
2827
2828	mutex_lock(&dm_bufio_clients_lock);
2829	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2830		dm_bufio_lock(c);
2831		__check_watermark(c, &write_list);
2832		dm_bufio_unlock(c);
2833	}
2834	mutex_unlock(&dm_bufio_clients_lock);
2835
2836	__flush_write_list(&write_list);
2837}
2838
2839static void evict_old(void)
2840{
2841	unsigned long threshold = dm_bufio_cache_size -
2842		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2843
2844	mutex_lock(&dm_bufio_clients_lock);
2845	while (dm_bufio_current_allocated > threshold) {
2846		if (!__evict_a_few(64))
2847			break;
2848		cond_resched();
2849	}
2850	mutex_unlock(&dm_bufio_clients_lock);
2851}
2852
2853static void do_global_cleanup(struct work_struct *w)
2854{
2855	check_watermarks();
2856	evict_old();
2857}
2858
2859/*
2860 *--------------------------------------------------------------
2861 * Module setup
2862 *--------------------------------------------------------------
2863 */
2864
2865/*
2866 * This is called only once for the whole dm_bufio module.
2867 * It initializes memory limit.
2868 */
2869static int __init dm_bufio_init(void)
2870{
2871	__u64 mem;
2872
2873	dm_bufio_allocated_kmem_cache = 0;
 
2874	dm_bufio_allocated_get_free_pages = 0;
2875	dm_bufio_allocated_vmalloc = 0;
2876	dm_bufio_current_allocated = 0;
2877
2878	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2879			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2880
2881	if (mem > ULONG_MAX)
2882		mem = ULONG_MAX;
2883
2884#ifdef CONFIG_MMU
2885	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2886		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2887#endif
2888
2889	dm_bufio_default_cache_size = mem;
2890
2891	mutex_lock(&dm_bufio_clients_lock);
2892	__cache_size_refresh();
2893	mutex_unlock(&dm_bufio_clients_lock);
2894
2895	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2896	if (!dm_bufio_wq)
2897		return -ENOMEM;
2898
2899	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2900	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2901	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2902			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2903
2904	return 0;
2905}
2906
2907/*
2908 * This is called once when unloading the dm_bufio module.
2909 */
2910static void __exit dm_bufio_exit(void)
2911{
2912	int bug = 0;
2913
2914	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2915	destroy_workqueue(dm_bufio_wq);
2916
2917	if (dm_bufio_client_count) {
2918		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2919			__func__, dm_bufio_client_count);
2920		bug = 1;
2921	}
2922
2923	if (dm_bufio_current_allocated) {
2924		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2925			__func__, dm_bufio_current_allocated);
2926		bug = 1;
2927	}
2928
2929	if (dm_bufio_allocated_get_free_pages) {
2930		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2931		       __func__, dm_bufio_allocated_get_free_pages);
2932		bug = 1;
2933	}
2934
2935	if (dm_bufio_allocated_vmalloc) {
2936		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2937		       __func__, dm_bufio_allocated_vmalloc);
2938		bug = 1;
2939	}
2940
2941	WARN_ON(bug); /* leaks are not worth crashing the system */
2942}
2943
2944module_init(dm_bufio_init)
2945module_exit(dm_bufio_exit)
2946
2947module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2948MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2949
2950module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2951MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2952
2953module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2954MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2955
2956module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2957MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2958
2959module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2960MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2961
 
 
 
2962module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2963MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2964
2965module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2966MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2967
2968module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2969MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2970
2971MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2972MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2973MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2009-2011 Red Hat, Inc.
   4 *
   5 * Author: Mikulas Patocka <mpatocka@redhat.com>
   6 *
   7 * This file is released under the GPL.
   8 */
   9
  10#include <linux/dm-bufio.h>
  11
  12#include <linux/device-mapper.h>
  13#include <linux/dm-io.h>
  14#include <linux/slab.h>
  15#include <linux/sched/mm.h>
  16#include <linux/jiffies.h>
  17#include <linux/vmalloc.h>
  18#include <linux/shrinker.h>
  19#include <linux/module.h>
  20#include <linux/rbtree.h>
  21#include <linux/stacktrace.h>
  22#include <linux/jump_label.h>
  23
  24#include "dm.h"
  25
  26#define DM_MSG_PREFIX "bufio"
  27
  28/*
  29 * Memory management policy:
  30 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  31 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  32 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  33 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  34 *	dirty buffers.
  35 */
  36#define DM_BUFIO_MIN_BUFFERS		8
  37
  38#define DM_BUFIO_MEMORY_PERCENT		2
  39#define DM_BUFIO_VMALLOC_PERCENT	25
  40#define DM_BUFIO_WRITEBACK_RATIO	3
  41#define DM_BUFIO_LOW_WATERMARK_RATIO	16
  42
  43/*
  44 * Check buffer ages in this interval (seconds)
  45 */
  46#define DM_BUFIO_WORK_TIMER_SECS	30
  47
  48/*
  49 * Free buffers when they are older than this (seconds)
  50 */
  51#define DM_BUFIO_DEFAULT_AGE_SECS	300
  52
  53/*
  54 * The nr of bytes of cached data to keep around.
  55 */
  56#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  57
  58/*
  59 * Align buffer writes to this boundary.
  60 * Tests show that SSDs have the highest IOPS when using 4k writes.
  61 */
  62#define DM_BUFIO_WRITE_ALIGN		4096
  63
  64/*
  65 * dm_buffer->list_mode
  66 */
  67#define LIST_CLEAN	0
  68#define LIST_DIRTY	1
  69#define LIST_SIZE	2
  70
  71/*--------------------------------------------------------------*/
  72
  73/*
  74 * Rather than use an LRU list, we use a clock algorithm where entries
  75 * are held in a circular list.  When an entry is 'hit' a reference bit
  76 * is set.  The least recently used entry is approximated by running a
  77 * cursor around the list selecting unreferenced entries. Referenced
  78 * entries have their reference bit cleared as the cursor passes them.
  79 */
  80struct lru_entry {
  81	struct list_head list;
  82	atomic_t referenced;
  83};
  84
  85struct lru_iter {
  86	struct lru *lru;
  87	struct list_head list;
  88	struct lru_entry *stop;
  89	struct lru_entry *e;
  90};
  91
  92struct lru {
  93	struct list_head *cursor;
  94	unsigned long count;
  95
  96	struct list_head iterators;
  97};
  98
  99/*--------------*/
 100
 101static void lru_init(struct lru *lru)
 102{
 103	lru->cursor = NULL;
 104	lru->count = 0;
 105	INIT_LIST_HEAD(&lru->iterators);
 106}
 107
 108static void lru_destroy(struct lru *lru)
 109{
 110	WARN_ON_ONCE(lru->cursor);
 111	WARN_ON_ONCE(!list_empty(&lru->iterators));
 112}
 113
 114/*
 115 * Insert a new entry into the lru.
 116 */
 117static void lru_insert(struct lru *lru, struct lru_entry *le)
 118{
 119	/*
 120	 * Don't be tempted to set to 1, makes the lru aspect
 121	 * perform poorly.
 122	 */
 123	atomic_set(&le->referenced, 0);
 124
 125	if (lru->cursor) {
 126		list_add_tail(&le->list, lru->cursor);
 127	} else {
 128		INIT_LIST_HEAD(&le->list);
 129		lru->cursor = &le->list;
 130	}
 131	lru->count++;
 132}
 133
 134/*--------------*/
 135
 136/*
 137 * Convert a list_head pointer to an lru_entry pointer.
 138 */
 139static inline struct lru_entry *to_le(struct list_head *l)
 140{
 141	return container_of(l, struct lru_entry, list);
 142}
 143
 144/*
 145 * Initialize an lru_iter and add it to the list of cursors in the lru.
 146 */
 147static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
 148{
 149	it->lru = lru;
 150	it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
 151	it->e = lru->cursor ? to_le(lru->cursor) : NULL;
 152	list_add(&it->list, &lru->iterators);
 153}
 154
 155/*
 156 * Remove an lru_iter from the list of cursors in the lru.
 157 */
 158static inline void lru_iter_end(struct lru_iter *it)
 159{
 160	list_del(&it->list);
 161}
 162
 163/* Predicate function type to be used with lru_iter_next */
 164typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
 165
 166/*
 167 * Advance the cursor to the next entry that passes the
 168 * predicate, and return that entry.  Returns NULL if the
 169 * iteration is complete.
 170 */
 171static struct lru_entry *lru_iter_next(struct lru_iter *it,
 172				       iter_predicate pred, void *context)
 173{
 174	struct lru_entry *e;
 175
 176	while (it->e) {
 177		e = it->e;
 178
 179		/* advance the cursor */
 180		if (it->e == it->stop)
 181			it->e = NULL;
 182		else
 183			it->e = to_le(it->e->list.next);
 184
 185		if (pred(e, context))
 186			return e;
 187	}
 188
 189	return NULL;
 190}
 191
 192/*
 193 * Invalidate a specific lru_entry and update all cursors in
 194 * the lru accordingly.
 195 */
 196static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
 197{
 198	struct lru_iter *it;
 199
 200	list_for_each_entry(it, &lru->iterators, list) {
 201		/* Move c->e forwards if necc. */
 202		if (it->e == e) {
 203			it->e = to_le(it->e->list.next);
 204			if (it->e == e)
 205				it->e = NULL;
 206		}
 207
 208		/* Move it->stop backwards if necc. */
 209		if (it->stop == e) {
 210			it->stop = to_le(it->stop->list.prev);
 211			if (it->stop == e)
 212				it->stop = NULL;
 213		}
 214	}
 215}
 216
 217/*--------------*/
 218
 219/*
 220 * Remove a specific entry from the lru.
 221 */
 222static void lru_remove(struct lru *lru, struct lru_entry *le)
 223{
 224	lru_iter_invalidate(lru, le);
 225	if (lru->count == 1) {
 226		lru->cursor = NULL;
 227	} else {
 228		if (lru->cursor == &le->list)
 229			lru->cursor = lru->cursor->next;
 230		list_del(&le->list);
 231	}
 232	lru->count--;
 233}
 234
 235/*
 236 * Mark as referenced.
 237 */
 238static inline void lru_reference(struct lru_entry *le)
 239{
 240	atomic_set(&le->referenced, 1);
 241}
 242
 243/*--------------*/
 244
 245/*
 246 * Remove the least recently used entry (approx), that passes the predicate.
 247 * Returns NULL on failure.
 248 */
 249enum evict_result {
 250	ER_EVICT,
 251	ER_DONT_EVICT,
 252	ER_STOP, /* stop looking for something to evict */
 253};
 254
 255typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
 256
 257static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
 258{
 259	unsigned long tested = 0;
 260	struct list_head *h = lru->cursor;
 261	struct lru_entry *le;
 262
 263	if (!h)
 264		return NULL;
 265	/*
 266	 * In the worst case we have to loop around twice. Once to clear
 267	 * the reference flags, and then again to discover the predicate
 268	 * fails for all entries.
 269	 */
 270	while (tested < lru->count) {
 271		le = container_of(h, struct lru_entry, list);
 272
 273		if (atomic_read(&le->referenced)) {
 274			atomic_set(&le->referenced, 0);
 275		} else {
 276			tested++;
 277			switch (pred(le, context)) {
 278			case ER_EVICT:
 279				/*
 280				 * Adjust the cursor, so we start the next
 281				 * search from here.
 282				 */
 283				lru->cursor = le->list.next;
 284				lru_remove(lru, le);
 285				return le;
 286
 287			case ER_DONT_EVICT:
 288				break;
 289
 290			case ER_STOP:
 291				lru->cursor = le->list.next;
 292				return NULL;
 293			}
 294		}
 295
 296		h = h->next;
 297
 298		if (!no_sleep)
 299			cond_resched();
 300	}
 301
 302	return NULL;
 303}
 304
 305/*--------------------------------------------------------------*/
 306
 307/*
 308 * Buffer state bits.
 309 */
 310#define B_READING	0
 311#define B_WRITING	1
 312#define B_DIRTY		2
 313
 314/*
 315 * Describes how the block was allocated:
 316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 317 * See the comment at alloc_buffer_data.
 318 */
 319enum data_mode {
 320	DATA_MODE_SLAB = 0,
 321	DATA_MODE_KMALLOC = 1,
 322	DATA_MODE_GET_FREE_PAGES = 2,
 323	DATA_MODE_VMALLOC = 3,
 324	DATA_MODE_LIMIT = 4
 325};
 326
 327struct dm_buffer {
 328	/* protected by the locks in dm_buffer_cache */
 329	struct rb_node node;
 330
 331	/* immutable, so don't need protecting */
 332	sector_t block;
 333	void *data;
 334	unsigned char data_mode;		/* DATA_MODE_* */
 335
 336	/*
 337	 * These two fields are used in isolation, so do not need
 338	 * a surrounding lock.
 339	 */
 340	atomic_t hold_count;
 341	unsigned long last_accessed;
 342
 343	/*
 344	 * Everything else is protected by the mutex in
 345	 * dm_bufio_client
 346	 */
 347	unsigned long state;
 348	struct lru_entry lru;
 349	unsigned char list_mode;		/* LIST_* */
 350	blk_status_t read_error;
 351	blk_status_t write_error;
 352	unsigned int dirty_start;
 353	unsigned int dirty_end;
 354	unsigned int write_start;
 355	unsigned int write_end;
 356	struct list_head write_list;
 357	struct dm_bufio_client *c;
 358	void (*end_io)(struct dm_buffer *b, blk_status_t bs);
 359#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 360#define MAX_STACK 10
 361	unsigned int stack_len;
 362	unsigned long stack_entries[MAX_STACK];
 363#endif
 364};
 365
 366/*--------------------------------------------------------------*/
 367
 368/*
 369 * The buffer cache manages buffers, particularly:
 370 *  - inc/dec of holder count
 371 *  - setting the last_accessed field
 372 *  - maintains clean/dirty state along with lru
 373 *  - selecting buffers that match predicates
 374 *
 375 * It does *not* handle:
 376 *  - allocation/freeing of buffers.
 377 *  - IO
 378 *  - Eviction or cache sizing.
 379 *
 380 * cache_get() and cache_put() are threadsafe, you do not need to
 381 * protect these calls with a surrounding mutex.  All the other
 382 * methods are not threadsafe; they do use locking primitives, but
 383 * only enough to ensure get/put are threadsafe.
 384 */
 385
 386struct buffer_tree {
 387	union {
 388		struct rw_semaphore lock;
 389		rwlock_t spinlock;
 390	} u;
 391	struct rb_root root;
 392} ____cacheline_aligned_in_smp;
 393
 394struct dm_buffer_cache {
 395	struct lru lru[LIST_SIZE];
 396	/*
 397	 * We spread entries across multiple trees to reduce contention
 398	 * on the locks.
 399	 */
 400	unsigned int num_locks;
 401	bool no_sleep;
 402	struct buffer_tree trees[];
 403};
 404
 405static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
 406
 407static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
 408{
 409	return dm_hash_locks_index(block, num_locks);
 410}
 411
 412static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
 413{
 414	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 415		read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 416	else
 417		down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 418}
 419
 420static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
 421{
 422	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 423		read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 424	else
 425		up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 426}
 427
 428static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
 429{
 430	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 431		write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 432	else
 433		down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 434}
 435
 436static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
 437{
 438	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 439		write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 440	else
 441		up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 442}
 443
 444/*
 445 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
 446 * This struct helps avoid redundant drop and gets of the same lock.
 447 */
 448struct lock_history {
 449	struct dm_buffer_cache *cache;
 450	bool write;
 451	unsigned int previous;
 452	unsigned int no_previous;
 453};
 454
 455static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
 456{
 457	lh->cache = cache;
 458	lh->write = write;
 459	lh->no_previous = cache->num_locks;
 460	lh->previous = lh->no_previous;
 461}
 462
 463static void __lh_lock(struct lock_history *lh, unsigned int index)
 464{
 465	if (lh->write) {
 466		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 467			write_lock_bh(&lh->cache->trees[index].u.spinlock);
 468		else
 469			down_write(&lh->cache->trees[index].u.lock);
 470	} else {
 471		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 472			read_lock_bh(&lh->cache->trees[index].u.spinlock);
 473		else
 474			down_read(&lh->cache->trees[index].u.lock);
 475	}
 476}
 477
 478static void __lh_unlock(struct lock_history *lh, unsigned int index)
 479{
 480	if (lh->write) {
 481		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 482			write_unlock_bh(&lh->cache->trees[index].u.spinlock);
 483		else
 484			up_write(&lh->cache->trees[index].u.lock);
 485	} else {
 486		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 487			read_unlock_bh(&lh->cache->trees[index].u.spinlock);
 488		else
 489			up_read(&lh->cache->trees[index].u.lock);
 490	}
 491}
 492
 493/*
 494 * Make sure you call this since it will unlock the final lock.
 495 */
 496static void lh_exit(struct lock_history *lh)
 497{
 498	if (lh->previous != lh->no_previous) {
 499		__lh_unlock(lh, lh->previous);
 500		lh->previous = lh->no_previous;
 501	}
 502}
 503
 504/*
 505 * Named 'next' because there is no corresponding
 506 * 'up/unlock' call since it's done automatically.
 507 */
 508static void lh_next(struct lock_history *lh, sector_t b)
 509{
 510	unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
 511
 512	if (lh->previous != lh->no_previous) {
 513		if (lh->previous != index) {
 514			__lh_unlock(lh, lh->previous);
 515			__lh_lock(lh, index);
 516			lh->previous = index;
 517		}
 518	} else {
 519		__lh_lock(lh, index);
 520		lh->previous = index;
 521	}
 522}
 523
 524static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
 525{
 526	return container_of(le, struct dm_buffer, lru);
 527}
 528
 529static struct dm_buffer *list_to_buffer(struct list_head *l)
 530{
 531	struct lru_entry *le = list_entry(l, struct lru_entry, list);
 532
 
 
 
 533	return le_to_buffer(le);
 534}
 535
 536static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
 537{
 538	unsigned int i;
 539
 540	bc->num_locks = num_locks;
 541	bc->no_sleep = no_sleep;
 542
 543	for (i = 0; i < bc->num_locks; i++) {
 544		if (no_sleep)
 545			rwlock_init(&bc->trees[i].u.spinlock);
 546		else
 547			init_rwsem(&bc->trees[i].u.lock);
 548		bc->trees[i].root = RB_ROOT;
 549	}
 550
 551	lru_init(&bc->lru[LIST_CLEAN]);
 552	lru_init(&bc->lru[LIST_DIRTY]);
 553}
 554
 555static void cache_destroy(struct dm_buffer_cache *bc)
 556{
 557	unsigned int i;
 558
 559	for (i = 0; i < bc->num_locks; i++)
 560		WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
 561
 562	lru_destroy(&bc->lru[LIST_CLEAN]);
 563	lru_destroy(&bc->lru[LIST_DIRTY]);
 564}
 565
 566/*--------------*/
 567
 568/*
 569 * not threadsafe, or racey depending how you look at it
 570 */
 571static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
 572{
 573	return bc->lru[list_mode].count;
 574}
 575
 576static inline unsigned long cache_total(struct dm_buffer_cache *bc)
 577{
 578	return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
 579}
 580
 581/*--------------*/
 582
 583/*
 584 * Gets a specific buffer, indexed by block.
 585 * If the buffer is found then its holder count will be incremented and
 586 * lru_reference will be called.
 587 *
 588 * threadsafe
 589 */
 590static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
 591{
 592	struct rb_node *n = root->rb_node;
 593	struct dm_buffer *b;
 594
 595	while (n) {
 596		b = container_of(n, struct dm_buffer, node);
 597
 598		if (b->block == block)
 599			return b;
 600
 601		n = block < b->block ? n->rb_left : n->rb_right;
 602	}
 603
 604	return NULL;
 605}
 606
 607static void __cache_inc_buffer(struct dm_buffer *b)
 608{
 609	atomic_inc(&b->hold_count);
 610	WRITE_ONCE(b->last_accessed, jiffies);
 611}
 612
 613static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
 614{
 615	struct dm_buffer *b;
 616
 617	cache_read_lock(bc, block);
 618	b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
 619	if (b) {
 620		lru_reference(&b->lru);
 621		__cache_inc_buffer(b);
 622	}
 623	cache_read_unlock(bc, block);
 624
 625	return b;
 626}
 627
 628/*--------------*/
 629
 630/*
 631 * Returns true if the hold count hits zero.
 632 * threadsafe
 633 */
 634static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
 635{
 636	bool r;
 637
 638	cache_read_lock(bc, b->block);
 639	BUG_ON(!atomic_read(&b->hold_count));
 640	r = atomic_dec_and_test(&b->hold_count);
 641	cache_read_unlock(bc, b->block);
 642
 643	return r;
 644}
 645
 646/*--------------*/
 647
 648typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
 649
 650/*
 651 * Evicts a buffer based on a predicate.  The oldest buffer that
 652 * matches the predicate will be selected.  In addition to the
 653 * predicate the hold_count of the selected buffer will be zero.
 654 */
 655struct evict_wrapper {
 656	struct lock_history *lh;
 657	b_predicate pred;
 658	void *context;
 659};
 660
 661/*
 662 * Wraps the buffer predicate turning it into an lru predicate.  Adds
 663 * extra test for hold_count.
 664 */
 665static enum evict_result __evict_pred(struct lru_entry *le, void *context)
 666{
 667	struct evict_wrapper *w = context;
 668	struct dm_buffer *b = le_to_buffer(le);
 669
 670	lh_next(w->lh, b->block);
 671
 672	if (atomic_read(&b->hold_count))
 673		return ER_DONT_EVICT;
 674
 675	return w->pred(b, w->context);
 676}
 677
 678static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
 679				       b_predicate pred, void *context,
 680				       struct lock_history *lh)
 681{
 682	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 683	struct lru_entry *le;
 684	struct dm_buffer *b;
 685
 686	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
 687	if (!le)
 688		return NULL;
 689
 690	b = le_to_buffer(le);
 691	/* __evict_pred will have locked the appropriate tree. */
 692	rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 693
 694	return b;
 695}
 696
 697static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
 698				     b_predicate pred, void *context)
 699{
 700	struct dm_buffer *b;
 701	struct lock_history lh;
 702
 703	lh_init(&lh, bc, true);
 704	b = __cache_evict(bc, list_mode, pred, context, &lh);
 705	lh_exit(&lh);
 706
 707	return b;
 708}
 709
 710/*--------------*/
 711
 712/*
 713 * Mark a buffer as clean or dirty. Not threadsafe.
 714 */
 715static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
 716{
 717	cache_write_lock(bc, b->block);
 718	if (list_mode != b->list_mode) {
 719		lru_remove(&bc->lru[b->list_mode], &b->lru);
 720		b->list_mode = list_mode;
 721		lru_insert(&bc->lru[b->list_mode], &b->lru);
 722	}
 723	cache_write_unlock(bc, b->block);
 724}
 725
 726/*--------------*/
 727
 728/*
 729 * Runs through the lru associated with 'old_mode', if the predicate matches then
 730 * it moves them to 'new_mode'.  Not threadsafe.
 731 */
 732static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 733			      b_predicate pred, void *context, struct lock_history *lh)
 734{
 735	struct lru_entry *le;
 736	struct dm_buffer *b;
 737	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 738
 739	while (true) {
 740		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
 741		if (!le)
 742			break;
 743
 744		b = le_to_buffer(le);
 745		b->list_mode = new_mode;
 746		lru_insert(&bc->lru[b->list_mode], &b->lru);
 747	}
 748}
 749
 750static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 751			    b_predicate pred, void *context)
 752{
 753	struct lock_history lh;
 754
 755	lh_init(&lh, bc, true);
 756	__cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
 757	lh_exit(&lh);
 758}
 759
 760/*--------------*/
 761
 762/*
 763 * Iterates through all clean or dirty entries calling a function for each
 764 * entry.  The callback may terminate the iteration early.  Not threadsafe.
 765 */
 766
 767/*
 768 * Iterator functions should return one of these actions to indicate
 769 * how the iteration should proceed.
 770 */
 771enum it_action {
 772	IT_NEXT,
 773	IT_COMPLETE,
 774};
 775
 776typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
 777
 778static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 779			    iter_fn fn, void *context, struct lock_history *lh)
 780{
 781	struct lru *lru = &bc->lru[list_mode];
 782	struct lru_entry *le, *first;
 783
 784	if (!lru->cursor)
 785		return;
 786
 787	first = le = to_le(lru->cursor);
 788	do {
 789		struct dm_buffer *b = le_to_buffer(le);
 790
 791		lh_next(lh, b->block);
 792
 793		switch (fn(b, context)) {
 794		case IT_NEXT:
 795			break;
 796
 797		case IT_COMPLETE:
 798			return;
 799		}
 800		cond_resched();
 801
 802		le = to_le(le->list.next);
 803	} while (le != first);
 804}
 805
 806static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 807			  iter_fn fn, void *context)
 808{
 809	struct lock_history lh;
 810
 811	lh_init(&lh, bc, false);
 812	__cache_iterate(bc, list_mode, fn, context, &lh);
 813	lh_exit(&lh);
 814}
 815
 816/*--------------*/
 817
 818/*
 819 * Passes ownership of the buffer to the cache. Returns false if the
 820 * buffer was already present (in which case ownership does not pass).
 821 * eg, a race with another thread.
 822 *
 823 * Holder count should be 1 on insertion.
 824 *
 825 * Not threadsafe.
 826 */
 827static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
 828{
 829	struct rb_node **new = &root->rb_node, *parent = NULL;
 830	struct dm_buffer *found;
 831
 832	while (*new) {
 833		found = container_of(*new, struct dm_buffer, node);
 834
 835		if (found->block == b->block)
 836			return false;
 837
 838		parent = *new;
 839		new = b->block < found->block ?
 840			&found->node.rb_left : &found->node.rb_right;
 841	}
 842
 843	rb_link_node(&b->node, parent, new);
 844	rb_insert_color(&b->node, root);
 845
 846	return true;
 847}
 848
 849static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
 850{
 851	bool r;
 852
 853	if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
 854		return false;
 855
 856	cache_write_lock(bc, b->block);
 857	BUG_ON(atomic_read(&b->hold_count) != 1);
 858	r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
 859	if (r)
 860		lru_insert(&bc->lru[b->list_mode], &b->lru);
 861	cache_write_unlock(bc, b->block);
 862
 863	return r;
 864}
 865
 866/*--------------*/
 867
 868/*
 869 * Removes buffer from cache, ownership of the buffer passes back to the caller.
 870 * Fails if the hold_count is not one (ie. the caller holds the only reference).
 871 *
 872 * Not threadsafe.
 873 */
 874static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
 875{
 876	bool r;
 877
 878	cache_write_lock(bc, b->block);
 879
 880	if (atomic_read(&b->hold_count) != 1) {
 881		r = false;
 882	} else {
 883		r = true;
 884		rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 885		lru_remove(&bc->lru[b->list_mode], &b->lru);
 886	}
 887
 888	cache_write_unlock(bc, b->block);
 889
 890	return r;
 891}
 892
 893/*--------------*/
 894
 895typedef void (*b_release)(struct dm_buffer *);
 896
 897static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
 898{
 899	struct rb_node *n = root->rb_node;
 900	struct dm_buffer *b;
 901	struct dm_buffer *best = NULL;
 902
 903	while (n) {
 904		b = container_of(n, struct dm_buffer, node);
 905
 906		if (b->block == block)
 907			return b;
 908
 909		if (block <= b->block) {
 910			n = n->rb_left;
 911			best = b;
 912		} else {
 913			n = n->rb_right;
 914		}
 915	}
 916
 917	return best;
 918}
 919
 920static void __remove_range(struct dm_buffer_cache *bc,
 921			   struct rb_root *root,
 922			   sector_t begin, sector_t end,
 923			   b_predicate pred, b_release release)
 924{
 925	struct dm_buffer *b;
 926
 927	while (true) {
 928		cond_resched();
 929
 930		b = __find_next(root, begin);
 931		if (!b || (b->block >= end))
 932			break;
 933
 934		begin = b->block + 1;
 935
 936		if (atomic_read(&b->hold_count))
 937			continue;
 938
 939		if (pred(b, NULL) == ER_EVICT) {
 940			rb_erase(&b->node, root);
 941			lru_remove(&bc->lru[b->list_mode], &b->lru);
 942			release(b);
 943		}
 944	}
 945}
 946
 947static void cache_remove_range(struct dm_buffer_cache *bc,
 948			       sector_t begin, sector_t end,
 949			       b_predicate pred, b_release release)
 950{
 951	unsigned int i;
 952
 953	BUG_ON(bc->no_sleep);
 954	for (i = 0; i < bc->num_locks; i++) {
 955		down_write(&bc->trees[i].u.lock);
 956		__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
 957		up_write(&bc->trees[i].u.lock);
 958	}
 959}
 960
 961/*----------------------------------------------------------------*/
 962
 963/*
 964 * Linking of buffers:
 965 *	All buffers are linked to buffer_cache with their node field.
 966 *
 967 *	Clean buffers that are not being written (B_WRITING not set)
 968 *	are linked to lru[LIST_CLEAN] with their lru_list field.
 969 *
 970 *	Dirty and clean buffers that are being written are linked to
 971 *	lru[LIST_DIRTY] with their lru_list field. When the write
 972 *	finishes, the buffer cannot be relinked immediately (because we
 973 *	are in an interrupt context and relinking requires process
 974 *	context), so some clean-not-writing buffers can be held on
 975 *	dirty_lru too.  They are later added to lru in the process
 976 *	context.
 977 */
 978struct dm_bufio_client {
 979	struct block_device *bdev;
 980	unsigned int block_size;
 981	s8 sectors_per_block_bits;
 982
 983	bool no_sleep;
 984	struct mutex lock;
 985	spinlock_t spinlock;
 986
 987	int async_write_error;
 988
 989	void (*alloc_callback)(struct dm_buffer *buf);
 990	void (*write_callback)(struct dm_buffer *buf);
 991	struct kmem_cache *slab_buffer;
 992	struct kmem_cache *slab_cache;
 993	struct dm_io_client *dm_io;
 994
 995	struct list_head reserved_buffers;
 996	unsigned int need_reserved_buffers;
 997
 998	unsigned int minimum_buffers;
 999
1000	sector_t start;
1001
1002	struct shrinker *shrinker;
1003	struct work_struct shrink_work;
1004	atomic_long_t need_shrink;
1005
1006	wait_queue_head_t free_buffer_wait;
1007
1008	struct list_head client_list;
1009
1010	/*
1011	 * Used by global_cleanup to sort the clients list.
1012	 */
1013	unsigned long oldest_buffer;
1014
1015	struct dm_buffer_cache cache; /* must be last member */
1016};
1017
1018/*----------------------------------------------------------------*/
1019
1020#define dm_bufio_in_request()	(!!current->bio_list)
1021
1022static void dm_bufio_lock(struct dm_bufio_client *c)
1023{
1024	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1025		spin_lock_bh(&c->spinlock);
1026	else
1027		mutex_lock_nested(&c->lock, dm_bufio_in_request());
1028}
1029
1030static void dm_bufio_unlock(struct dm_bufio_client *c)
1031{
1032	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1033		spin_unlock_bh(&c->spinlock);
1034	else
1035		mutex_unlock(&c->lock);
1036}
1037
1038/*----------------------------------------------------------------*/
1039
1040/*
1041 * Default cache size: available memory divided by the ratio.
1042 */
1043static unsigned long dm_bufio_default_cache_size;
1044
1045/*
1046 * Total cache size set by the user.
1047 */
1048static unsigned long dm_bufio_cache_size;
1049
1050/*
1051 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1052 * at any time.  If it disagrees, the user has changed cache size.
1053 */
1054static unsigned long dm_bufio_cache_size_latch;
1055
1056static DEFINE_SPINLOCK(global_spinlock);
1057
1058/*
1059 * Buffers are freed after this timeout
1060 */
1061static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1062static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1063
1064static unsigned long dm_bufio_peak_allocated;
1065static unsigned long dm_bufio_allocated_kmem_cache;
1066static unsigned long dm_bufio_allocated_kmalloc;
1067static unsigned long dm_bufio_allocated_get_free_pages;
1068static unsigned long dm_bufio_allocated_vmalloc;
1069static unsigned long dm_bufio_current_allocated;
1070
1071/*----------------------------------------------------------------*/
1072
1073/*
1074 * The current number of clients.
1075 */
1076static int dm_bufio_client_count;
1077
1078/*
1079 * The list of all clients.
1080 */
1081static LIST_HEAD(dm_bufio_all_clients);
1082
1083/*
1084 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1085 */
1086static DEFINE_MUTEX(dm_bufio_clients_lock);
1087
1088static struct workqueue_struct *dm_bufio_wq;
1089static struct delayed_work dm_bufio_cleanup_old_work;
1090static struct work_struct dm_bufio_replacement_work;
1091
1092
1093#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1094static void buffer_record_stack(struct dm_buffer *b)
1095{
1096	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1097}
1098#endif
1099
1100/*----------------------------------------------------------------*/
1101
1102static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1103{
1104	unsigned char data_mode;
1105	long diff;
1106
1107	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1108		&dm_bufio_allocated_kmem_cache,
1109		&dm_bufio_allocated_kmalloc,
1110		&dm_bufio_allocated_get_free_pages,
1111		&dm_bufio_allocated_vmalloc,
1112	};
1113
1114	data_mode = b->data_mode;
1115	diff = (long)b->c->block_size;
1116	if (unlink)
1117		diff = -diff;
1118
1119	spin_lock(&global_spinlock);
1120
1121	*class_ptr[data_mode] += diff;
1122
1123	dm_bufio_current_allocated += diff;
1124
1125	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126		dm_bufio_peak_allocated = dm_bufio_current_allocated;
1127
1128	if (!unlink) {
1129		if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1131	}
1132
1133	spin_unlock(&global_spinlock);
1134}
1135
1136/*
1137 * Change the number of clients and recalculate per-client limit.
1138 */
1139static void __cache_size_refresh(void)
1140{
1141	if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1142		return;
1143	if (WARN_ON(dm_bufio_client_count < 0))
1144		return;
1145
1146	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1147
1148	/*
1149	 * Use default if set to 0 and report the actual cache size used.
1150	 */
1151	if (!dm_bufio_cache_size_latch) {
1152		(void)cmpxchg(&dm_bufio_cache_size, 0,
1153			      dm_bufio_default_cache_size);
1154		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1155	}
1156}
1157
1158/*
1159 * Allocating buffer data.
1160 *
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1162 *
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1165 *
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1169 *
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_PAGE_ORDER).
1174 *
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1177 * space.
1178 */
1179static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180			       unsigned char *data_mode)
1181{
1182	if (unlikely(c->slab_cache != NULL)) {
1183		*data_mode = DATA_MODE_SLAB;
1184		return kmem_cache_alloc(c->slab_cache, gfp_mask);
1185	}
1186
1187	if (unlikely(c->block_size < PAGE_SIZE)) {
1188		*data_mode = DATA_MODE_KMALLOC;
1189		return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE);
1190	}
1191
1192	if (c->block_size <= KMALLOC_MAX_SIZE &&
1193	    gfp_mask & __GFP_NORETRY) {
1194		*data_mode = DATA_MODE_GET_FREE_PAGES;
1195		return (void *)__get_free_pages(gfp_mask,
1196						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1197	}
1198
1199	*data_mode = DATA_MODE_VMALLOC;
1200
1201	return __vmalloc(c->block_size, gfp_mask);
1202}
1203
1204/*
1205 * Free buffer's data.
1206 */
1207static void free_buffer_data(struct dm_bufio_client *c,
1208			     void *data, unsigned char data_mode)
1209{
1210	switch (data_mode) {
1211	case DATA_MODE_SLAB:
1212		kmem_cache_free(c->slab_cache, data);
1213		break;
1214
1215	case DATA_MODE_KMALLOC:
1216		kfree(data);
1217		break;
1218
1219	case DATA_MODE_GET_FREE_PAGES:
1220		free_pages((unsigned long)data,
1221			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1222		break;
1223
1224	case DATA_MODE_VMALLOC:
1225		vfree(data);
1226		break;
1227
1228	default:
1229		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1230		       data_mode);
1231		BUG();
1232	}
1233}
1234
1235/*
1236 * Allocate buffer and its data.
1237 */
1238static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1239{
1240	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1241
1242	if (!b)
1243		return NULL;
1244
1245	b->c = c;
1246
1247	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1248	if (!b->data) {
1249		kmem_cache_free(c->slab_buffer, b);
1250		return NULL;
1251	}
1252	adjust_total_allocated(b, false);
1253
1254#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1255	b->stack_len = 0;
1256#endif
1257	return b;
1258}
1259
1260/*
1261 * Free buffer and its data.
1262 */
1263static void free_buffer(struct dm_buffer *b)
1264{
1265	struct dm_bufio_client *c = b->c;
1266
1267	adjust_total_allocated(b, true);
1268	free_buffer_data(c, b->data, b->data_mode);
1269	kmem_cache_free(c->slab_buffer, b);
1270}
1271
1272/*
1273 *--------------------------------------------------------------------------
1274 * Submit I/O on the buffer.
1275 *
1276 * Bio interface is faster but it has some problems:
1277 *	the vector list is limited (increasing this limit increases
1278 *	memory-consumption per buffer, so it is not viable);
1279 *
1280 *	the memory must be direct-mapped, not vmalloced;
1281 *
1282 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1283 * it is not vmalloced, try using the bio interface.
1284 *
1285 * If the buffer is big, if it is vmalloced or if the underlying device
1286 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1287 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1288 * shortcomings.
1289 *--------------------------------------------------------------------------
1290 */
1291
1292/*
1293 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1294 * that the request was handled directly with bio interface.
1295 */
1296static void dmio_complete(unsigned long error, void *context)
1297{
1298	struct dm_buffer *b = context;
1299
1300	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1301}
1302
1303static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1304		     unsigned int n_sectors, unsigned int offset,
1305		     unsigned short ioprio)
1306{
1307	int r;
1308	struct dm_io_request io_req = {
1309		.bi_opf = op,
1310		.notify.fn = dmio_complete,
1311		.notify.context = b,
1312		.client = b->c->dm_io,
1313	};
1314	struct dm_io_region region = {
1315		.bdev = b->c->bdev,
1316		.sector = sector,
1317		.count = n_sectors,
1318	};
1319
1320	if (b->data_mode != DATA_MODE_VMALLOC) {
1321		io_req.mem.type = DM_IO_KMEM;
1322		io_req.mem.ptr.addr = (char *)b->data + offset;
1323	} else {
1324		io_req.mem.type = DM_IO_VMA;
1325		io_req.mem.ptr.vma = (char *)b->data + offset;
1326	}
1327
1328	r = dm_io(&io_req, 1, &region, NULL, ioprio);
1329	if (unlikely(r))
1330		b->end_io(b, errno_to_blk_status(r));
1331}
1332
1333static void bio_complete(struct bio *bio)
1334{
1335	struct dm_buffer *b = bio->bi_private;
1336	blk_status_t status = bio->bi_status;
1337
1338	bio_uninit(bio);
1339	kfree(bio);
1340	b->end_io(b, status);
1341}
1342
1343static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1344		    unsigned int n_sectors, unsigned int offset,
1345		    unsigned short ioprio)
1346{
1347	struct bio *bio;
1348	char *ptr;
1349	unsigned int len;
1350
1351	bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1352	if (!bio) {
1353		use_dmio(b, op, sector, n_sectors, offset, ioprio);
1354		return;
1355	}
1356	bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1357	bio->bi_iter.bi_sector = sector;
1358	bio->bi_end_io = bio_complete;
1359	bio->bi_private = b;
1360	bio->bi_ioprio = ioprio;
1361
1362	ptr = (char *)b->data + offset;
1363	len = n_sectors << SECTOR_SHIFT;
1364
1365	__bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1366
1367	submit_bio(bio);
1368}
1369
1370static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1371{
1372	sector_t sector;
1373
1374	if (likely(c->sectors_per_block_bits >= 0))
1375		sector = block << c->sectors_per_block_bits;
1376	else
1377		sector = block * (c->block_size >> SECTOR_SHIFT);
1378	sector += c->start;
1379
1380	return sector;
1381}
1382
1383static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
1384		      void (*end_io)(struct dm_buffer *, blk_status_t))
1385{
1386	unsigned int n_sectors;
1387	sector_t sector;
1388	unsigned int offset, end;
1389
1390	b->end_io = end_io;
1391
1392	sector = block_to_sector(b->c, b->block);
1393
1394	if (op != REQ_OP_WRITE) {
1395		n_sectors = b->c->block_size >> SECTOR_SHIFT;
1396		offset = 0;
1397	} else {
1398		if (b->c->write_callback)
1399			b->c->write_callback(b);
1400		offset = b->write_start;
1401		end = b->write_end;
1402		offset &= -DM_BUFIO_WRITE_ALIGN;
1403		end += DM_BUFIO_WRITE_ALIGN - 1;
1404		end &= -DM_BUFIO_WRITE_ALIGN;
1405		if (unlikely(end > b->c->block_size))
1406			end = b->c->block_size;
1407
1408		sector += offset >> SECTOR_SHIFT;
1409		n_sectors = (end - offset) >> SECTOR_SHIFT;
1410	}
1411
1412	if (b->data_mode != DATA_MODE_VMALLOC)
1413		use_bio(b, op, sector, n_sectors, offset, ioprio);
1414	else
1415		use_dmio(b, op, sector, n_sectors, offset, ioprio);
1416}
1417
1418/*
1419 *--------------------------------------------------------------
1420 * Writing dirty buffers
1421 *--------------------------------------------------------------
1422 */
1423
1424/*
1425 * The endio routine for write.
1426 *
1427 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1428 * it.
1429 */
1430static void write_endio(struct dm_buffer *b, blk_status_t status)
1431{
1432	b->write_error = status;
1433	if (unlikely(status)) {
1434		struct dm_bufio_client *c = b->c;
1435
1436		(void)cmpxchg(&c->async_write_error, 0,
1437				blk_status_to_errno(status));
1438	}
1439
1440	BUG_ON(!test_bit(B_WRITING, &b->state));
1441
1442	smp_mb__before_atomic();
1443	clear_bit(B_WRITING, &b->state);
1444	smp_mb__after_atomic();
1445
1446	wake_up_bit(&b->state, B_WRITING);
1447}
1448
1449/*
1450 * Initiate a write on a dirty buffer, but don't wait for it.
1451 *
1452 * - If the buffer is not dirty, exit.
1453 * - If there some previous write going on, wait for it to finish (we can't
1454 *   have two writes on the same buffer simultaneously).
1455 * - Submit our write and don't wait on it. We set B_WRITING indicating
1456 *   that there is a write in progress.
1457 */
1458static void __write_dirty_buffer(struct dm_buffer *b,
1459				 struct list_head *write_list)
1460{
1461	if (!test_bit(B_DIRTY, &b->state))
1462		return;
1463
1464	clear_bit(B_DIRTY, &b->state);
1465	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1466
1467	b->write_start = b->dirty_start;
1468	b->write_end = b->dirty_end;
1469
1470	if (!write_list)
1471		submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1472	else
1473		list_add_tail(&b->write_list, write_list);
1474}
1475
1476static void __flush_write_list(struct list_head *write_list)
1477{
1478	struct blk_plug plug;
1479
1480	blk_start_plug(&plug);
1481	while (!list_empty(write_list)) {
1482		struct dm_buffer *b =
1483			list_entry(write_list->next, struct dm_buffer, write_list);
1484		list_del(&b->write_list);
1485		submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1486		cond_resched();
1487	}
1488	blk_finish_plug(&plug);
1489}
1490
1491/*
1492 * Wait until any activity on the buffer finishes.  Possibly write the
1493 * buffer if it is dirty.  When this function finishes, there is no I/O
1494 * running on the buffer and the buffer is not dirty.
1495 */
1496static void __make_buffer_clean(struct dm_buffer *b)
1497{
1498	BUG_ON(atomic_read(&b->hold_count));
1499
1500	/* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1501	if (!smp_load_acquire(&b->state))	/* fast case */
1502		return;
1503
1504	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1505	__write_dirty_buffer(b, NULL);
1506	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1507}
1508
1509static enum evict_result is_clean(struct dm_buffer *b, void *context)
1510{
1511	struct dm_bufio_client *c = context;
1512
1513	/* These should never happen */
1514	if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1515		return ER_DONT_EVICT;
1516	if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1517		return ER_DONT_EVICT;
1518	if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1519		return ER_DONT_EVICT;
1520
1521	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1522	    unlikely(test_bit(B_READING, &b->state)))
1523		return ER_DONT_EVICT;
1524
1525	return ER_EVICT;
1526}
1527
1528static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1529{
1530	/* These should never happen */
1531	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1532		return ER_DONT_EVICT;
1533	if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1534		return ER_DONT_EVICT;
1535
1536	return ER_EVICT;
1537}
1538
1539/*
1540 * Find some buffer that is not held by anybody, clean it, unlink it and
1541 * return it.
1542 */
1543static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1544{
1545	struct dm_buffer *b;
1546
1547	b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1548	if (b) {
1549		/* this also waits for pending reads */
1550		__make_buffer_clean(b);
1551		return b;
1552	}
1553
1554	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1555		return NULL;
1556
1557	b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1558	if (b) {
1559		__make_buffer_clean(b);
1560		return b;
1561	}
1562
1563	return NULL;
1564}
1565
1566/*
1567 * Wait until some other threads free some buffer or release hold count on
1568 * some buffer.
1569 *
1570 * This function is entered with c->lock held, drops it and regains it
1571 * before exiting.
1572 */
1573static void __wait_for_free_buffer(struct dm_bufio_client *c)
1574{
1575	DECLARE_WAITQUEUE(wait, current);
1576
1577	add_wait_queue(&c->free_buffer_wait, &wait);
1578	set_current_state(TASK_UNINTERRUPTIBLE);
1579	dm_bufio_unlock(c);
1580
1581	/*
1582	 * It's possible to miss a wake up event since we don't always
1583	 * hold c->lock when wake_up is called.  So we have a timeout here,
1584	 * just in case.
1585	 */
1586	io_schedule_timeout(5 * HZ);
1587
1588	remove_wait_queue(&c->free_buffer_wait, &wait);
1589
1590	dm_bufio_lock(c);
1591}
1592
1593enum new_flag {
1594	NF_FRESH = 0,
1595	NF_READ = 1,
1596	NF_GET = 2,
1597	NF_PREFETCH = 3
1598};
1599
1600/*
1601 * Allocate a new buffer. If the allocation is not possible, wait until
1602 * some other thread frees a buffer.
1603 *
1604 * May drop the lock and regain it.
1605 */
1606static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1607{
1608	struct dm_buffer *b;
1609	bool tried_noio_alloc = false;
1610
1611	/*
1612	 * dm-bufio is resistant to allocation failures (it just keeps
1613	 * one buffer reserved in cases all the allocations fail).
1614	 * So set flags to not try too hard:
1615	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1616	 *		    mutex and wait ourselves.
1617	 *	__GFP_NORETRY: don't retry and rather return failure
1618	 *	__GFP_NOMEMALLOC: don't use emergency reserves
1619	 *	__GFP_NOWARN: don't print a warning in case of failure
1620	 *
1621	 * For debugging, if we set the cache size to 1, no new buffers will
1622	 * be allocated.
1623	 */
1624	while (1) {
1625		if (dm_bufio_cache_size_latch != 1) {
1626			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1627			if (b)
1628				return b;
1629		}
1630
1631		if (nf == NF_PREFETCH)
1632			return NULL;
1633
1634		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1635			dm_bufio_unlock(c);
1636			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1637			dm_bufio_lock(c);
1638			if (b)
1639				return b;
1640			tried_noio_alloc = true;
1641		}
1642
1643		if (!list_empty(&c->reserved_buffers)) {
1644			b = list_to_buffer(c->reserved_buffers.next);
1645			list_del(&b->lru.list);
1646			c->need_reserved_buffers++;
1647
1648			return b;
1649		}
1650
1651		b = __get_unclaimed_buffer(c);
1652		if (b)
1653			return b;
1654
1655		__wait_for_free_buffer(c);
1656	}
1657}
1658
1659static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1660{
1661	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1662
1663	if (!b)
1664		return NULL;
1665
1666	if (c->alloc_callback)
1667		c->alloc_callback(b);
1668
1669	return b;
1670}
1671
1672/*
1673 * Free a buffer and wake other threads waiting for free buffers.
1674 */
1675static void __free_buffer_wake(struct dm_buffer *b)
1676{
1677	struct dm_bufio_client *c = b->c;
1678
1679	b->block = -1;
1680	if (!c->need_reserved_buffers)
1681		free_buffer(b);
1682	else {
1683		list_add(&b->lru.list, &c->reserved_buffers);
1684		c->need_reserved_buffers--;
1685	}
1686
1687	/*
1688	 * We hold the bufio lock here, so no one can add entries to the
1689	 * wait queue anyway.
1690	 */
1691	if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1692		wake_up(&c->free_buffer_wait);
1693}
1694
1695static enum evict_result cleaned(struct dm_buffer *b, void *context)
1696{
1697	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1698		return ER_DONT_EVICT; /* should never happen */
1699
1700	if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1701		return ER_DONT_EVICT;
1702	else
1703		return ER_EVICT;
1704}
1705
1706static void __move_clean_buffers(struct dm_bufio_client *c)
1707{
1708	cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1709}
1710
1711struct write_context {
1712	int no_wait;
1713	struct list_head *write_list;
1714};
1715
1716static enum it_action write_one(struct dm_buffer *b, void *context)
1717{
1718	struct write_context *wc = context;
1719
1720	if (wc->no_wait && test_bit(B_WRITING, &b->state))
1721		return IT_COMPLETE;
1722
1723	__write_dirty_buffer(b, wc->write_list);
1724	return IT_NEXT;
1725}
1726
1727static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1728					struct list_head *write_list)
1729{
1730	struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1731
1732	__move_clean_buffers(c);
1733	cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1734}
1735
1736/*
1737 * Check if we're over watermark.
1738 * If we are over threshold_buffers, start freeing buffers.
1739 * If we're over "limit_buffers", block until we get under the limit.
1740 */
1741static void __check_watermark(struct dm_bufio_client *c,
1742			      struct list_head *write_list)
1743{
1744	if (cache_count(&c->cache, LIST_DIRTY) >
1745	    cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1746		__write_dirty_buffers_async(c, 1, write_list);
1747}
1748
1749/*
1750 *--------------------------------------------------------------
1751 * Getting a buffer
1752 *--------------------------------------------------------------
1753 */
1754
1755static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1756{
1757	/*
1758	 * Relying on waitqueue_active() is racey, but we sleep
1759	 * with schedule_timeout anyway.
1760	 */
1761	if (cache_put(&c->cache, b) &&
1762	    unlikely(waitqueue_active(&c->free_buffer_wait)))
1763		wake_up(&c->free_buffer_wait);
1764}
1765
1766/*
1767 * This assumes you have already checked the cache to see if the buffer
1768 * is already present (it will recheck after dropping the lock for allocation).
1769 */
1770static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1771				     enum new_flag nf, int *need_submit,
1772				     struct list_head *write_list)
1773{
1774	struct dm_buffer *b, *new_b = NULL;
1775
1776	*need_submit = 0;
1777
1778	/* This can't be called with NF_GET */
1779	if (WARN_ON_ONCE(nf == NF_GET))
1780		return NULL;
1781
1782	new_b = __alloc_buffer_wait(c, nf);
1783	if (!new_b)
1784		return NULL;
1785
1786	/*
1787	 * We've had a period where the mutex was unlocked, so need to
1788	 * recheck the buffer tree.
1789	 */
1790	b = cache_get(&c->cache, block);
1791	if (b) {
1792		__free_buffer_wake(new_b);
1793		goto found_buffer;
1794	}
1795
1796	__check_watermark(c, write_list);
1797
1798	b = new_b;
1799	atomic_set(&b->hold_count, 1);
1800	WRITE_ONCE(b->last_accessed, jiffies);
1801	b->block = block;
1802	b->read_error = 0;
1803	b->write_error = 0;
1804	b->list_mode = LIST_CLEAN;
1805
1806	if (nf == NF_FRESH)
1807		b->state = 0;
1808	else {
1809		b->state = 1 << B_READING;
1810		*need_submit = 1;
1811	}
1812
1813	/*
1814	 * We mustn't insert into the cache until the B_READING state
1815	 * is set.  Otherwise another thread could get it and use
1816	 * it before it had been read.
1817	 */
1818	cache_insert(&c->cache, b);
1819
1820	return b;
1821
1822found_buffer:
1823	if (nf == NF_PREFETCH) {
1824		cache_put_and_wake(c, b);
1825		return NULL;
1826	}
1827
1828	/*
1829	 * Note: it is essential that we don't wait for the buffer to be
1830	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1831	 * dm_bufio_prefetch can be used in the driver request routine.
1832	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1833	 * the same buffer, it would deadlock if we waited.
1834	 */
1835	if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1836		cache_put_and_wake(c, b);
1837		return NULL;
1838	}
1839
1840	return b;
1841}
1842
1843/*
1844 * The endio routine for reading: set the error, clear the bit and wake up
1845 * anyone waiting on the buffer.
1846 */
1847static void read_endio(struct dm_buffer *b, blk_status_t status)
1848{
1849	b->read_error = status;
1850
1851	BUG_ON(!test_bit(B_READING, &b->state));
1852
1853	smp_mb__before_atomic();
1854	clear_bit(B_READING, &b->state);
1855	smp_mb__after_atomic();
1856
1857	wake_up_bit(&b->state, B_READING);
1858}
1859
1860/*
1861 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1862 * functions is similar except that dm_bufio_new doesn't read the
1863 * buffer from the disk (assuming that the caller overwrites all the data
1864 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1865 */
1866static void *new_read(struct dm_bufio_client *c, sector_t block,
1867		      enum new_flag nf, struct dm_buffer **bp,
1868		      unsigned short ioprio)
1869{
1870	int need_submit = 0;
1871	struct dm_buffer *b;
1872
1873	LIST_HEAD(write_list);
1874
1875	*bp = NULL;
1876
1877	/*
1878	 * Fast path, hopefully the block is already in the cache.  No need
1879	 * to get the client lock for this.
1880	 */
1881	b = cache_get(&c->cache, block);
1882	if (b) {
1883		if (nf == NF_PREFETCH) {
1884			cache_put_and_wake(c, b);
1885			return NULL;
1886		}
1887
1888		/*
1889		 * Note: it is essential that we don't wait for the buffer to be
1890		 * read if dm_bufio_get function is used. Both dm_bufio_get and
1891		 * dm_bufio_prefetch can be used in the driver request routine.
1892		 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1893		 * the same buffer, it would deadlock if we waited.
1894		 */
1895		if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1896			cache_put_and_wake(c, b);
1897			return NULL;
1898		}
1899	}
1900
1901	if (!b) {
1902		if (nf == NF_GET)
1903			return NULL;
1904
1905		dm_bufio_lock(c);
1906		b = __bufio_new(c, block, nf, &need_submit, &write_list);
1907		dm_bufio_unlock(c);
1908	}
1909
1910#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1911	if (b && (atomic_read(&b->hold_count) == 1))
1912		buffer_record_stack(b);
1913#endif
1914
1915	__flush_write_list(&write_list);
1916
1917	if (!b)
1918		return NULL;
1919
1920	if (need_submit)
1921		submit_io(b, REQ_OP_READ, ioprio, read_endio);
1922
1923	if (nf != NF_GET)	/* we already tested this condition above */
1924		wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1925
1926	if (b->read_error) {
1927		int error = blk_status_to_errno(b->read_error);
1928
1929		dm_bufio_release(b);
1930
1931		return ERR_PTR(error);
1932	}
1933
1934	*bp = b;
1935
1936	return b->data;
1937}
1938
1939void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1940		   struct dm_buffer **bp)
1941{
1942	return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1943}
1944EXPORT_SYMBOL_GPL(dm_bufio_get);
1945
1946static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1947			struct dm_buffer **bp, unsigned short ioprio)
1948{
1949	if (WARN_ON_ONCE(dm_bufio_in_request()))
1950		return ERR_PTR(-EINVAL);
1951
1952	return new_read(c, block, NF_READ, bp, ioprio);
1953}
1954
1955void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1956		    struct dm_buffer **bp)
1957{
1958	return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1959}
1960EXPORT_SYMBOL_GPL(dm_bufio_read);
1961
1962void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1963				struct dm_buffer **bp, unsigned short ioprio)
1964{
1965	return __dm_bufio_read(c, block, bp, ioprio);
1966}
1967EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1968
1969void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1970		   struct dm_buffer **bp)
1971{
1972	if (WARN_ON_ONCE(dm_bufio_in_request()))
1973		return ERR_PTR(-EINVAL);
1974
1975	return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1976}
1977EXPORT_SYMBOL_GPL(dm_bufio_new);
1978
1979static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1980			sector_t block, unsigned int n_blocks,
1981			unsigned short ioprio)
1982{
1983	struct blk_plug plug;
1984
1985	LIST_HEAD(write_list);
1986
1987	if (WARN_ON_ONCE(dm_bufio_in_request()))
1988		return; /* should never happen */
1989
1990	blk_start_plug(&plug);
1991
1992	for (; n_blocks--; block++) {
1993		int need_submit;
1994		struct dm_buffer *b;
1995
1996		b = cache_get(&c->cache, block);
1997		if (b) {
1998			/* already in cache */
1999			cache_put_and_wake(c, b);
2000			continue;
2001		}
2002
2003		dm_bufio_lock(c);
2004		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
2005				&write_list);
2006		if (unlikely(!list_empty(&write_list))) {
2007			dm_bufio_unlock(c);
2008			blk_finish_plug(&plug);
2009			__flush_write_list(&write_list);
2010			blk_start_plug(&plug);
2011			dm_bufio_lock(c);
2012		}
2013		if (unlikely(b != NULL)) {
2014			dm_bufio_unlock(c);
2015
2016			if (need_submit)
2017				submit_io(b, REQ_OP_READ, ioprio, read_endio);
2018			dm_bufio_release(b);
2019
2020			cond_resched();
2021
2022			if (!n_blocks)
2023				goto flush_plug;
2024			dm_bufio_lock(c);
2025		}
2026		dm_bufio_unlock(c);
2027	}
2028
2029flush_plug:
2030	blk_finish_plug(&plug);
2031}
2032
2033void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
2034{
2035	return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
2036}
2037EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2038
2039void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
2040				unsigned int n_blocks, unsigned short ioprio)
2041{
2042	return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
2043}
2044EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
2045
2046void dm_bufio_release(struct dm_buffer *b)
2047{
2048	struct dm_bufio_client *c = b->c;
2049
2050	/*
2051	 * If there were errors on the buffer, and the buffer is not
2052	 * to be written, free the buffer. There is no point in caching
2053	 * invalid buffer.
2054	 */
2055	if ((b->read_error || b->write_error) &&
2056	    !test_bit_acquire(B_READING, &b->state) &&
2057	    !test_bit(B_WRITING, &b->state) &&
2058	    !test_bit(B_DIRTY, &b->state)) {
2059		dm_bufio_lock(c);
2060
2061		/* cache remove can fail if there are other holders */
2062		if (cache_remove(&c->cache, b)) {
2063			__free_buffer_wake(b);
2064			dm_bufio_unlock(c);
2065			return;
2066		}
2067
2068		dm_bufio_unlock(c);
2069	}
2070
2071	cache_put_and_wake(c, b);
2072}
2073EXPORT_SYMBOL_GPL(dm_bufio_release);
2074
2075void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2076					unsigned int start, unsigned int end)
2077{
2078	struct dm_bufio_client *c = b->c;
2079
2080	BUG_ON(start >= end);
2081	BUG_ON(end > b->c->block_size);
2082
2083	dm_bufio_lock(c);
2084
2085	BUG_ON(test_bit(B_READING, &b->state));
2086
2087	if (!test_and_set_bit(B_DIRTY, &b->state)) {
2088		b->dirty_start = start;
2089		b->dirty_end = end;
2090		cache_mark(&c->cache, b, LIST_DIRTY);
2091	} else {
2092		if (start < b->dirty_start)
2093			b->dirty_start = start;
2094		if (end > b->dirty_end)
2095			b->dirty_end = end;
2096	}
2097
2098	dm_bufio_unlock(c);
2099}
2100EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2101
2102void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2103{
2104	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2105}
2106EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2107
2108void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2109{
2110	LIST_HEAD(write_list);
2111
2112	if (WARN_ON_ONCE(dm_bufio_in_request()))
2113		return; /* should never happen */
2114
2115	dm_bufio_lock(c);
2116	__write_dirty_buffers_async(c, 0, &write_list);
2117	dm_bufio_unlock(c);
2118	__flush_write_list(&write_list);
2119}
2120EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2121
2122/*
2123 * For performance, it is essential that the buffers are written asynchronously
2124 * and simultaneously (so that the block layer can merge the writes) and then
2125 * waited upon.
2126 *
2127 * Finally, we flush hardware disk cache.
2128 */
2129static bool is_writing(struct lru_entry *e, void *context)
2130{
2131	struct dm_buffer *b = le_to_buffer(e);
2132
2133	return test_bit(B_WRITING, &b->state);
2134}
2135
2136int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2137{
2138	int a, f;
2139	unsigned long nr_buffers;
2140	struct lru_entry *e;
2141	struct lru_iter it;
2142
2143	LIST_HEAD(write_list);
2144
2145	dm_bufio_lock(c);
2146	__write_dirty_buffers_async(c, 0, &write_list);
2147	dm_bufio_unlock(c);
2148	__flush_write_list(&write_list);
2149	dm_bufio_lock(c);
2150
2151	nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2152	lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2153	while ((e = lru_iter_next(&it, is_writing, c))) {
2154		struct dm_buffer *b = le_to_buffer(e);
2155		__cache_inc_buffer(b);
2156
2157		BUG_ON(test_bit(B_READING, &b->state));
2158
2159		if (nr_buffers) {
2160			nr_buffers--;
2161			dm_bufio_unlock(c);
2162			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2163			dm_bufio_lock(c);
2164		} else {
2165			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2166		}
2167
2168		if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2169			cache_mark(&c->cache, b, LIST_CLEAN);
2170
2171		cache_put_and_wake(c, b);
2172
2173		cond_resched();
2174	}
2175	lru_iter_end(&it);
2176
2177	wake_up(&c->free_buffer_wait);
2178	dm_bufio_unlock(c);
2179
2180	a = xchg(&c->async_write_error, 0);
2181	f = dm_bufio_issue_flush(c);
2182	if (a)
2183		return a;
2184
2185	return f;
2186}
2187EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2188
2189/*
2190 * Use dm-io to send an empty barrier to flush the device.
2191 */
2192int dm_bufio_issue_flush(struct dm_bufio_client *c)
2193{
2194	struct dm_io_request io_req = {
2195		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2196		.mem.type = DM_IO_KMEM,
2197		.mem.ptr.addr = NULL,
2198		.client = c->dm_io,
2199	};
2200	struct dm_io_region io_reg = {
2201		.bdev = c->bdev,
2202		.sector = 0,
2203		.count = 0,
2204	};
2205
2206	if (WARN_ON_ONCE(dm_bufio_in_request()))
2207		return -EINVAL;
2208
2209	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2210}
2211EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2212
2213/*
2214 * Use dm-io to send a discard request to flush the device.
2215 */
2216int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2217{
2218	struct dm_io_request io_req = {
2219		.bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2220		.mem.type = DM_IO_KMEM,
2221		.mem.ptr.addr = NULL,
2222		.client = c->dm_io,
2223	};
2224	struct dm_io_region io_reg = {
2225		.bdev = c->bdev,
2226		.sector = block_to_sector(c, block),
2227		.count = block_to_sector(c, count),
2228	};
2229
2230	if (WARN_ON_ONCE(dm_bufio_in_request()))
2231		return -EINVAL; /* discards are optional */
2232
2233	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2234}
2235EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2236
2237static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2238{
2239	struct dm_buffer *b;
2240
2241	b = cache_get(&c->cache, block);
2242	if (b) {
2243		if (likely(!smp_load_acquire(&b->state))) {
2244			if (cache_remove(&c->cache, b))
2245				__free_buffer_wake(b);
2246			else
2247				cache_put_and_wake(c, b);
2248		} else {
2249			cache_put_and_wake(c, b);
2250		}
2251	}
2252
2253	return b ? true : false;
2254}
2255
2256/*
2257 * Free the given buffer.
2258 *
2259 * This is just a hint, if the buffer is in use or dirty, this function
2260 * does nothing.
2261 */
2262void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2263{
2264	dm_bufio_lock(c);
2265	forget_buffer(c, block);
2266	dm_bufio_unlock(c);
2267}
2268EXPORT_SYMBOL_GPL(dm_bufio_forget);
2269
2270static enum evict_result idle(struct dm_buffer *b, void *context)
2271{
2272	return b->state ? ER_DONT_EVICT : ER_EVICT;
2273}
2274
2275void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2276{
2277	dm_bufio_lock(c);
2278	cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2279	dm_bufio_unlock(c);
2280}
2281EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2282
2283void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2284{
2285	c->minimum_buffers = n;
2286}
2287EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2288
2289unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2290{
2291	return c->block_size;
2292}
2293EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2294
2295sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2296{
2297	sector_t s = bdev_nr_sectors(c->bdev);
2298
2299	if (s >= c->start)
2300		s -= c->start;
2301	else
2302		s = 0;
2303	if (likely(c->sectors_per_block_bits >= 0))
2304		s >>= c->sectors_per_block_bits;
2305	else
2306		sector_div(s, c->block_size >> SECTOR_SHIFT);
2307	return s;
2308}
2309EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2310
2311struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2312{
2313	return c->dm_io;
2314}
2315EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2316
2317sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2318{
2319	return b->block;
2320}
2321EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2322
2323void *dm_bufio_get_block_data(struct dm_buffer *b)
2324{
2325	return b->data;
2326}
2327EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2328
2329void *dm_bufio_get_aux_data(struct dm_buffer *b)
2330{
2331	return b + 1;
2332}
2333EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2334
2335struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2336{
2337	return b->c;
2338}
2339EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2340
2341static enum it_action warn_leak(struct dm_buffer *b, void *context)
2342{
2343	bool *warned = context;
2344
2345	WARN_ON(!(*warned));
2346	*warned = true;
2347	DMERR("leaked buffer %llx, hold count %u, list %d",
2348	      (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2349#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2350	stack_trace_print(b->stack_entries, b->stack_len, 1);
2351	/* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2352	atomic_set(&b->hold_count, 0);
2353#endif
2354	return IT_NEXT;
2355}
2356
2357static void drop_buffers(struct dm_bufio_client *c)
2358{
2359	int i;
2360	struct dm_buffer *b;
2361
2362	if (WARN_ON(dm_bufio_in_request()))
2363		return; /* should never happen */
2364
2365	/*
2366	 * An optimization so that the buffers are not written one-by-one.
2367	 */
2368	dm_bufio_write_dirty_buffers_async(c);
2369
2370	dm_bufio_lock(c);
2371
2372	while ((b = __get_unclaimed_buffer(c)))
2373		__free_buffer_wake(b);
2374
2375	for (i = 0; i < LIST_SIZE; i++) {
2376		bool warned = false;
2377
2378		cache_iterate(&c->cache, i, warn_leak, &warned);
2379	}
2380
2381#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2382	while ((b = __get_unclaimed_buffer(c)))
2383		__free_buffer_wake(b);
2384#endif
2385
2386	for (i = 0; i < LIST_SIZE; i++)
2387		WARN_ON(cache_count(&c->cache, i));
2388
2389	dm_bufio_unlock(c);
2390}
2391
2392static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2393{
2394	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2395
2396	if (likely(c->sectors_per_block_bits >= 0))
2397		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2398	else
2399		retain_bytes /= c->block_size;
2400
2401	return retain_bytes;
2402}
2403
2404static void __scan(struct dm_bufio_client *c)
2405{
2406	int l;
2407	struct dm_buffer *b;
2408	unsigned long freed = 0;
2409	unsigned long retain_target = get_retain_buffers(c);
2410	unsigned long count = cache_total(&c->cache);
2411
2412	for (l = 0; l < LIST_SIZE; l++) {
2413		while (true) {
2414			if (count - freed <= retain_target)
2415				atomic_long_set(&c->need_shrink, 0);
2416			if (!atomic_long_read(&c->need_shrink))
2417				break;
2418
2419			b = cache_evict(&c->cache, l,
2420					l == LIST_CLEAN ? is_clean : is_dirty, c);
2421			if (!b)
2422				break;
2423
2424			__make_buffer_clean(b);
2425			__free_buffer_wake(b);
2426
2427			atomic_long_dec(&c->need_shrink);
2428			freed++;
2429			cond_resched();
2430		}
2431	}
2432}
2433
2434static void shrink_work(struct work_struct *w)
2435{
2436	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2437
2438	dm_bufio_lock(c);
2439	__scan(c);
2440	dm_bufio_unlock(c);
2441}
2442
2443static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2444{
2445	struct dm_bufio_client *c;
2446
2447	c = shrink->private_data;
2448	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2449	queue_work(dm_bufio_wq, &c->shrink_work);
2450
2451	return sc->nr_to_scan;
2452}
2453
2454static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2455{
2456	struct dm_bufio_client *c = shrink->private_data;
2457	unsigned long count = cache_total(&c->cache);
2458	unsigned long retain_target = get_retain_buffers(c);
2459	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2460
2461	if (unlikely(count < retain_target))
2462		count = 0;
2463	else
2464		count -= retain_target;
2465
2466	if (unlikely(count < queued_for_cleanup))
2467		count = 0;
2468	else
2469		count -= queued_for_cleanup;
2470
2471	return count;
2472}
2473
2474/*
2475 * Create the buffering interface
2476 */
2477struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2478					       unsigned int reserved_buffers, unsigned int aux_size,
2479					       void (*alloc_callback)(struct dm_buffer *),
2480					       void (*write_callback)(struct dm_buffer *),
2481					       unsigned int flags)
2482{
2483	int r;
2484	unsigned int num_locks;
2485	struct dm_bufio_client *c;
2486	char slab_name[64];
2487	static atomic_t seqno = ATOMIC_INIT(0);
2488
2489	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2490		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2491		r = -EINVAL;
2492		goto bad_client;
2493	}
2494
2495	num_locks = dm_num_hash_locks();
2496	c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2497	if (!c) {
2498		r = -ENOMEM;
2499		goto bad_client;
2500	}
2501	cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2502
2503	c->bdev = bdev;
2504	c->block_size = block_size;
2505	if (is_power_of_2(block_size))
2506		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2507	else
2508		c->sectors_per_block_bits = -1;
2509
2510	c->alloc_callback = alloc_callback;
2511	c->write_callback = write_callback;
2512
2513	if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2514		c->no_sleep = true;
2515		static_branch_inc(&no_sleep_enabled);
2516	}
2517
2518	mutex_init(&c->lock);
2519	spin_lock_init(&c->spinlock);
2520	INIT_LIST_HEAD(&c->reserved_buffers);
2521	c->need_reserved_buffers = reserved_buffers;
2522
2523	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2524
2525	init_waitqueue_head(&c->free_buffer_wait);
2526	c->async_write_error = 0;
2527
2528	c->dm_io = dm_io_client_create();
2529	if (IS_ERR(c->dm_io)) {
2530		r = PTR_ERR(c->dm_io);
2531		goto bad_dm_io;
2532	}
2533
2534	if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) {
 
2535		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2536
2537		snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2538					block_size, atomic_inc_return(&seqno));
2539		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2540						  SLAB_RECLAIM_ACCOUNT, NULL);
2541		if (!c->slab_cache) {
2542			r = -ENOMEM;
2543			goto bad;
2544		}
2545	}
2546	if (aux_size)
2547		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2548					aux_size, atomic_inc_return(&seqno));
2549	else
2550		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2551					atomic_inc_return(&seqno));
2552	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2553					   0, SLAB_RECLAIM_ACCOUNT, NULL);
2554	if (!c->slab_buffer) {
2555		r = -ENOMEM;
2556		goto bad;
2557	}
2558
2559	while (c->need_reserved_buffers) {
2560		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2561
2562		if (!b) {
2563			r = -ENOMEM;
2564			goto bad;
2565		}
2566		__free_buffer_wake(b);
2567	}
2568
2569	INIT_WORK(&c->shrink_work, shrink_work);
2570	atomic_long_set(&c->need_shrink, 0);
2571
2572	c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2573				     MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2574	if (!c->shrinker) {
2575		r = -ENOMEM;
2576		goto bad;
2577	}
2578
2579	c->shrinker->count_objects = dm_bufio_shrink_count;
2580	c->shrinker->scan_objects = dm_bufio_shrink_scan;
2581	c->shrinker->seeks = 1;
2582	c->shrinker->batch = 0;
2583	c->shrinker->private_data = c;
2584
2585	shrinker_register(c->shrinker);
2586
2587	mutex_lock(&dm_bufio_clients_lock);
2588	dm_bufio_client_count++;
2589	list_add(&c->client_list, &dm_bufio_all_clients);
2590	__cache_size_refresh();
2591	mutex_unlock(&dm_bufio_clients_lock);
2592
2593	return c;
2594
2595bad:
2596	while (!list_empty(&c->reserved_buffers)) {
2597		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2598
2599		list_del(&b->lru.list);
2600		free_buffer(b);
2601	}
2602	kmem_cache_destroy(c->slab_cache);
2603	kmem_cache_destroy(c->slab_buffer);
2604	dm_io_client_destroy(c->dm_io);
2605bad_dm_io:
2606	mutex_destroy(&c->lock);
2607	if (c->no_sleep)
2608		static_branch_dec(&no_sleep_enabled);
2609	kfree(c);
2610bad_client:
2611	return ERR_PTR(r);
2612}
2613EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2614
2615/*
2616 * Free the buffering interface.
2617 * It is required that there are no references on any buffers.
2618 */
2619void dm_bufio_client_destroy(struct dm_bufio_client *c)
2620{
2621	unsigned int i;
2622
2623	drop_buffers(c);
2624
2625	shrinker_free(c->shrinker);
2626	flush_work(&c->shrink_work);
2627
2628	mutex_lock(&dm_bufio_clients_lock);
2629
2630	list_del(&c->client_list);
2631	dm_bufio_client_count--;
2632	__cache_size_refresh();
2633
2634	mutex_unlock(&dm_bufio_clients_lock);
2635
2636	WARN_ON(c->need_reserved_buffers);
2637
2638	while (!list_empty(&c->reserved_buffers)) {
2639		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2640
2641		list_del(&b->lru.list);
2642		free_buffer(b);
2643	}
2644
2645	for (i = 0; i < LIST_SIZE; i++)
2646		if (cache_count(&c->cache, i))
2647			DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2648
2649	for (i = 0; i < LIST_SIZE; i++)
2650		WARN_ON(cache_count(&c->cache, i));
2651
2652	cache_destroy(&c->cache);
2653	kmem_cache_destroy(c->slab_cache);
2654	kmem_cache_destroy(c->slab_buffer);
2655	dm_io_client_destroy(c->dm_io);
2656	mutex_destroy(&c->lock);
2657	if (c->no_sleep)
2658		static_branch_dec(&no_sleep_enabled);
2659	kfree(c);
2660}
2661EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2662
2663void dm_bufio_client_reset(struct dm_bufio_client *c)
2664{
2665	drop_buffers(c);
2666	flush_work(&c->shrink_work);
2667}
2668EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2669
2670void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2671{
2672	c->start = start;
2673}
2674EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2675
2676/*--------------------------------------------------------------*/
2677
2678static unsigned int get_max_age_hz(void)
2679{
2680	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2681
2682	if (max_age > UINT_MAX / HZ)
2683		max_age = UINT_MAX / HZ;
2684
2685	return max_age * HZ;
2686}
2687
2688static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2689{
2690	return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2691}
2692
2693struct evict_params {
2694	gfp_t gfp;
2695	unsigned long age_hz;
2696
2697	/*
2698	 * This gets updated with the largest last_accessed (ie. most
2699	 * recently used) of the evicted buffers.  It will not be reinitialised
2700	 * by __evict_many(), so you can use it across multiple invocations.
2701	 */
2702	unsigned long last_accessed;
2703};
2704
2705/*
2706 * We may not be able to evict this buffer if IO pending or the client
2707 * is still using it.
2708 *
2709 * And if GFP_NOFS is used, we must not do any I/O because we hold
2710 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2711 * rerouted to different bufio client.
2712 */
2713static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2714{
2715	struct evict_params *params = context;
2716
2717	if (!(params->gfp & __GFP_FS) ||
2718	    (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2719		if (test_bit_acquire(B_READING, &b->state) ||
2720		    test_bit(B_WRITING, &b->state) ||
2721		    test_bit(B_DIRTY, &b->state))
2722			return ER_DONT_EVICT;
2723	}
2724
2725	return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2726}
2727
2728static unsigned long __evict_many(struct dm_bufio_client *c,
2729				  struct evict_params *params,
2730				  int list_mode, unsigned long max_count)
2731{
2732	unsigned long count;
2733	unsigned long last_accessed;
2734	struct dm_buffer *b;
2735
2736	for (count = 0; count < max_count; count++) {
2737		b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2738		if (!b)
2739			break;
2740
2741		last_accessed = READ_ONCE(b->last_accessed);
2742		if (time_after_eq(params->last_accessed, last_accessed))
2743			params->last_accessed = last_accessed;
2744
2745		__make_buffer_clean(b);
2746		__free_buffer_wake(b);
2747
2748		cond_resched();
2749	}
2750
2751	return count;
2752}
2753
2754static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2755{
2756	struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2757	unsigned long retain = get_retain_buffers(c);
2758	unsigned long count;
2759	LIST_HEAD(write_list);
2760
2761	dm_bufio_lock(c);
2762
2763	__check_watermark(c, &write_list);
2764	if (unlikely(!list_empty(&write_list))) {
2765		dm_bufio_unlock(c);
2766		__flush_write_list(&write_list);
2767		dm_bufio_lock(c);
2768	}
2769
2770	count = cache_total(&c->cache);
2771	if (count > retain)
2772		__evict_many(c, &params, LIST_CLEAN, count - retain);
2773
2774	dm_bufio_unlock(c);
2775}
2776
2777static void cleanup_old_buffers(void)
2778{
2779	unsigned long max_age_hz = get_max_age_hz();
2780	struct dm_bufio_client *c;
2781
2782	mutex_lock(&dm_bufio_clients_lock);
2783
2784	__cache_size_refresh();
2785
2786	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2787		evict_old_buffers(c, max_age_hz);
2788
2789	mutex_unlock(&dm_bufio_clients_lock);
2790}
2791
2792static void work_fn(struct work_struct *w)
2793{
2794	cleanup_old_buffers();
2795
2796	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2797			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2798}
2799
2800/*--------------------------------------------------------------*/
2801
2802/*
2803 * Global cleanup tries to evict the oldest buffers from across _all_
2804 * the clients.  It does this by repeatedly evicting a few buffers from
2805 * the client that holds the oldest buffer.  It's approximate, but hopefully
2806 * good enough.
2807 */
2808static struct dm_bufio_client *__pop_client(void)
2809{
2810	struct list_head *h;
2811
2812	if (list_empty(&dm_bufio_all_clients))
2813		return NULL;
2814
2815	h = dm_bufio_all_clients.next;
2816	list_del(h);
2817	return container_of(h, struct dm_bufio_client, client_list);
2818}
2819
2820/*
2821 * Inserts the client in the global client list based on its
2822 * 'oldest_buffer' field.
2823 */
2824static void __insert_client(struct dm_bufio_client *new_client)
2825{
2826	struct dm_bufio_client *c;
2827	struct list_head *h = dm_bufio_all_clients.next;
2828
2829	while (h != &dm_bufio_all_clients) {
2830		c = container_of(h, struct dm_bufio_client, client_list);
2831		if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2832			break;
2833		h = h->next;
2834	}
2835
2836	list_add_tail(&new_client->client_list, h);
2837}
2838
2839static unsigned long __evict_a_few(unsigned long nr_buffers)
2840{
2841	unsigned long count;
2842	struct dm_bufio_client *c;
2843	struct evict_params params = {
2844		.gfp = GFP_KERNEL,
2845		.age_hz = 0,
2846		/* set to jiffies in case there are no buffers in this client */
2847		.last_accessed = jiffies
2848	};
2849
2850	c = __pop_client();
2851	if (!c)
2852		return 0;
2853
2854	dm_bufio_lock(c);
2855	count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
2856	dm_bufio_unlock(c);
2857
2858	if (count)
2859		c->oldest_buffer = params.last_accessed;
2860	__insert_client(c);
2861
2862	return count;
2863}
2864
2865static void check_watermarks(void)
2866{
2867	LIST_HEAD(write_list);
2868	struct dm_bufio_client *c;
2869
2870	mutex_lock(&dm_bufio_clients_lock);
2871	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2872		dm_bufio_lock(c);
2873		__check_watermark(c, &write_list);
2874		dm_bufio_unlock(c);
2875	}
2876	mutex_unlock(&dm_bufio_clients_lock);
2877
2878	__flush_write_list(&write_list);
2879}
2880
2881static void evict_old(void)
2882{
2883	unsigned long threshold = dm_bufio_cache_size -
2884		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2885
2886	mutex_lock(&dm_bufio_clients_lock);
2887	while (dm_bufio_current_allocated > threshold) {
2888		if (!__evict_a_few(64))
2889			break;
2890		cond_resched();
2891	}
2892	mutex_unlock(&dm_bufio_clients_lock);
2893}
2894
2895static void do_global_cleanup(struct work_struct *w)
2896{
2897	check_watermarks();
2898	evict_old();
2899}
2900
2901/*
2902 *--------------------------------------------------------------
2903 * Module setup
2904 *--------------------------------------------------------------
2905 */
2906
2907/*
2908 * This is called only once for the whole dm_bufio module.
2909 * It initializes memory limit.
2910 */
2911static int __init dm_bufio_init(void)
2912{
2913	__u64 mem;
2914
2915	dm_bufio_allocated_kmem_cache = 0;
2916	dm_bufio_allocated_kmalloc = 0;
2917	dm_bufio_allocated_get_free_pages = 0;
2918	dm_bufio_allocated_vmalloc = 0;
2919	dm_bufio_current_allocated = 0;
2920
2921	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2922			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2923
2924	if (mem > ULONG_MAX)
2925		mem = ULONG_MAX;
2926
2927#ifdef CONFIG_MMU
2928	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2929		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2930#endif
2931
2932	dm_bufio_default_cache_size = mem;
2933
2934	mutex_lock(&dm_bufio_clients_lock);
2935	__cache_size_refresh();
2936	mutex_unlock(&dm_bufio_clients_lock);
2937
2938	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2939	if (!dm_bufio_wq)
2940		return -ENOMEM;
2941
2942	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2943	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2944	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2945			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2946
2947	return 0;
2948}
2949
2950/*
2951 * This is called once when unloading the dm_bufio module.
2952 */
2953static void __exit dm_bufio_exit(void)
2954{
2955	int bug = 0;
2956
2957	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2958	destroy_workqueue(dm_bufio_wq);
2959
2960	if (dm_bufio_client_count) {
2961		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2962			__func__, dm_bufio_client_count);
2963		bug = 1;
2964	}
2965
2966	if (dm_bufio_current_allocated) {
2967		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2968			__func__, dm_bufio_current_allocated);
2969		bug = 1;
2970	}
2971
2972	if (dm_bufio_allocated_get_free_pages) {
2973		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2974		       __func__, dm_bufio_allocated_get_free_pages);
2975		bug = 1;
2976	}
2977
2978	if (dm_bufio_allocated_vmalloc) {
2979		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2980		       __func__, dm_bufio_allocated_vmalloc);
2981		bug = 1;
2982	}
2983
2984	WARN_ON(bug); /* leaks are not worth crashing the system */
2985}
2986
2987module_init(dm_bufio_init)
2988module_exit(dm_bufio_exit)
2989
2990module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2991MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2992
2993module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2994MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2995
2996module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2997MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2998
2999module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
3000MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
3001
3002module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
3003MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
3004
3005module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444);
3006MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc");
3007
3008module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
3009MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
3010
3011module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
3012MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
3013
3014module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
3015MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
3016
3017MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
3018MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
3019MODULE_LICENSE("GPL");