Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2009-2011 Red Hat, Inc.
   4 *
   5 * Author: Mikulas Patocka <mpatocka@redhat.com>
   6 *
   7 * This file is released under the GPL.
   8 */
   9
  10#include <linux/dm-bufio.h>
  11
  12#include <linux/device-mapper.h>
  13#include <linux/dm-io.h>
  14#include <linux/slab.h>
  15#include <linux/sched/mm.h>
  16#include <linux/jiffies.h>
  17#include <linux/vmalloc.h>
  18#include <linux/shrinker.h>
  19#include <linux/module.h>
  20#include <linux/rbtree.h>
  21#include <linux/stacktrace.h>
  22#include <linux/jump_label.h>
  23
  24#include "dm.h"
  25
  26#define DM_MSG_PREFIX "bufio"
  27
  28/*
  29 * Memory management policy:
  30 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  31 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  32 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  33 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  34 *	dirty buffers.
  35 */
  36#define DM_BUFIO_MIN_BUFFERS		8
  37
  38#define DM_BUFIO_MEMORY_PERCENT		2
  39#define DM_BUFIO_VMALLOC_PERCENT	25
  40#define DM_BUFIO_WRITEBACK_RATIO	3
  41#define DM_BUFIO_LOW_WATERMARK_RATIO	16
  42
  43/*
  44 * Check buffer ages in this interval (seconds)
  45 */
  46#define DM_BUFIO_WORK_TIMER_SECS	30
  47
  48/*
  49 * Free buffers when they are older than this (seconds)
  50 */
  51#define DM_BUFIO_DEFAULT_AGE_SECS	300
 
 
 
 
 
 
  52
  53/*
  54 * The nr of bytes of cached data to keep around.
  55 */
  56#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
 
 
 
  57
  58/*
  59 * Align buffer writes to this boundary.
  60 * Tests show that SSDs have the highest IOPS when using 4k writes.
  61 */
  62#define DM_BUFIO_WRITE_ALIGN		4096
 
  63
  64/*
  65 * dm_buffer->list_mode
  66 */
  67#define LIST_CLEAN	0
  68#define LIST_DIRTY	1
  69#define LIST_SIZE	2
  70
  71/*--------------------------------------------------------------*/
  72
  73/*
  74 * Rather than use an LRU list, we use a clock algorithm where entries
  75 * are held in a circular list.  When an entry is 'hit' a reference bit
  76 * is set.  The least recently used entry is approximated by running a
  77 * cursor around the list selecting unreferenced entries. Referenced
  78 * entries have their reference bit cleared as the cursor passes them.
  79 */
  80struct lru_entry {
  81	struct list_head list;
  82	atomic_t referenced;
  83};
  84
  85struct lru_iter {
  86	struct lru *lru;
  87	struct list_head list;
  88	struct lru_entry *stop;
  89	struct lru_entry *e;
  90};
  91
  92struct lru {
  93	struct list_head *cursor;
  94	unsigned long count;
  95
  96	struct list_head iterators;
  97};
  98
  99/*--------------*/
 100
 101static void lru_init(struct lru *lru)
 102{
 103	lru->cursor = NULL;
 104	lru->count = 0;
 105	INIT_LIST_HEAD(&lru->iterators);
 106}
 107
 108static void lru_destroy(struct lru *lru)
 109{
 110	WARN_ON_ONCE(lru->cursor);
 111	WARN_ON_ONCE(!list_empty(&lru->iterators));
 112}
 113
 114/*
 115 * Insert a new entry into the lru.
 116 */
 117static void lru_insert(struct lru *lru, struct lru_entry *le)
 118{
 119	/*
 120	 * Don't be tempted to set to 1, makes the lru aspect
 121	 * perform poorly.
 122	 */
 123	atomic_set(&le->referenced, 0);
 124
 125	if (lru->cursor) {
 126		list_add_tail(&le->list, lru->cursor);
 127	} else {
 128		INIT_LIST_HEAD(&le->list);
 129		lru->cursor = &le->list;
 130	}
 131	lru->count++;
 132}
 133
 134/*--------------*/
 135
 136/*
 137 * Convert a list_head pointer to an lru_entry pointer.
 138 */
 139static inline struct lru_entry *to_le(struct list_head *l)
 140{
 141	return container_of(l, struct lru_entry, list);
 142}
 143
 144/*
 145 * Initialize an lru_iter and add it to the list of cursors in the lru.
 146 */
 147static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
 148{
 149	it->lru = lru;
 150	it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
 151	it->e = lru->cursor ? to_le(lru->cursor) : NULL;
 152	list_add(&it->list, &lru->iterators);
 153}
 154
 155/*
 156 * Remove an lru_iter from the list of cursors in the lru.
 157 */
 158static inline void lru_iter_end(struct lru_iter *it)
 159{
 160	list_del(&it->list);
 161}
 162
 163/* Predicate function type to be used with lru_iter_next */
 164typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
 165
 166/*
 167 * Advance the cursor to the next entry that passes the
 168 * predicate, and return that entry.  Returns NULL if the
 169 * iteration is complete.
 170 */
 171static struct lru_entry *lru_iter_next(struct lru_iter *it,
 172				       iter_predicate pred, void *context)
 173{
 174	struct lru_entry *e;
 175
 176	while (it->e) {
 177		e = it->e;
 178
 179		/* advance the cursor */
 180		if (it->e == it->stop)
 181			it->e = NULL;
 182		else
 183			it->e = to_le(it->e->list.next);
 184
 185		if (pred(e, context))
 186			return e;
 187	}
 188
 189	return NULL;
 190}
 191
 192/*
 193 * Invalidate a specific lru_entry and update all cursors in
 194 * the lru accordingly.
 195 */
 196static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
 197{
 198	struct lru_iter *it;
 199
 200	list_for_each_entry(it, &lru->iterators, list) {
 201		/* Move c->e forwards if necc. */
 202		if (it->e == e) {
 203			it->e = to_le(it->e->list.next);
 204			if (it->e == e)
 205				it->e = NULL;
 206		}
 207
 208		/* Move it->stop backwards if necc. */
 209		if (it->stop == e) {
 210			it->stop = to_le(it->stop->list.prev);
 211			if (it->stop == e)
 212				it->stop = NULL;
 213		}
 214	}
 215}
 216
 217/*--------------*/
 218
 219/*
 220 * Remove a specific entry from the lru.
 221 */
 222static void lru_remove(struct lru *lru, struct lru_entry *le)
 223{
 224	lru_iter_invalidate(lru, le);
 225	if (lru->count == 1) {
 226		lru->cursor = NULL;
 227	} else {
 228		if (lru->cursor == &le->list)
 229			lru->cursor = lru->cursor->next;
 230		list_del(&le->list);
 231	}
 232	lru->count--;
 233}
 234
 235/*
 236 * Mark as referenced.
 237 */
 238static inline void lru_reference(struct lru_entry *le)
 239{
 240	atomic_set(&le->referenced, 1);
 241}
 242
 243/*--------------*/
 244
 245/*
 246 * Remove the least recently used entry (approx), that passes the predicate.
 247 * Returns NULL on failure.
 248 */
 249enum evict_result {
 250	ER_EVICT,
 251	ER_DONT_EVICT,
 252	ER_STOP, /* stop looking for something to evict */
 253};
 254
 255typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
 256
 257static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
 258{
 259	unsigned long tested = 0;
 260	struct list_head *h = lru->cursor;
 261	struct lru_entry *le;
 262
 263	if (!h)
 264		return NULL;
 265	/*
 266	 * In the worst case we have to loop around twice. Once to clear
 267	 * the reference flags, and then again to discover the predicate
 268	 * fails for all entries.
 269	 */
 270	while (tested < lru->count) {
 271		le = container_of(h, struct lru_entry, list);
 272
 273		if (atomic_read(&le->referenced)) {
 274			atomic_set(&le->referenced, 0);
 275		} else {
 276			tested++;
 277			switch (pred(le, context)) {
 278			case ER_EVICT:
 279				/*
 280				 * Adjust the cursor, so we start the next
 281				 * search from here.
 282				 */
 283				lru->cursor = le->list.next;
 284				lru_remove(lru, le);
 285				return le;
 286
 287			case ER_DONT_EVICT:
 288				break;
 289
 290			case ER_STOP:
 291				lru->cursor = le->list.next;
 292				return NULL;
 293			}
 294		}
 295
 296		h = h->next;
 297
 298		if (!no_sleep)
 299			cond_resched();
 300	}
 301
 302	return NULL;
 303}
 304
 305/*--------------------------------------------------------------*/
 306
 307/*
 308 * Buffer state bits.
 309 */
 310#define B_READING	0
 311#define B_WRITING	1
 312#define B_DIRTY		2
 313
 314/*
 315 * Describes how the block was allocated:
 316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 317 * See the comment at alloc_buffer_data.
 318 */
 319enum data_mode {
 320	DATA_MODE_SLAB = 0,
 321	DATA_MODE_GET_FREE_PAGES = 1,
 322	DATA_MODE_VMALLOC = 2,
 323	DATA_MODE_LIMIT = 3
 324};
 325
 326struct dm_buffer {
 327	/* protected by the locks in dm_buffer_cache */
 328	struct rb_node node;
 329
 330	/* immutable, so don't need protecting */
 331	sector_t block;
 332	void *data;
 333	unsigned char data_mode;		/* DATA_MODE_* */
 334
 335	/*
 336	 * These two fields are used in isolation, so do not need
 337	 * a surrounding lock.
 338	 */
 339	atomic_t hold_count;
 340	unsigned long last_accessed;
 341
 342	/*
 343	 * Everything else is protected by the mutex in
 344	 * dm_bufio_client
 345	 */
 346	unsigned long state;
 347	struct lru_entry lru;
 348	unsigned char list_mode;		/* LIST_* */
 349	blk_status_t read_error;
 350	blk_status_t write_error;
 351	unsigned int dirty_start;
 352	unsigned int dirty_end;
 353	unsigned int write_start;
 354	unsigned int write_end;
 355	struct list_head write_list;
 356	struct dm_bufio_client *c;
 357	void (*end_io)(struct dm_buffer *b, blk_status_t bs);
 358#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 359#define MAX_STACK 10
 360	unsigned int stack_len;
 361	unsigned long stack_entries[MAX_STACK];
 362#endif
 363};
 364
 365/*--------------------------------------------------------------*/
 366
 367/*
 368 * The buffer cache manages buffers, particularly:
 369 *  - inc/dec of holder count
 370 *  - setting the last_accessed field
 371 *  - maintains clean/dirty state along with lru
 372 *  - selecting buffers that match predicates
 373 *
 374 * It does *not* handle:
 375 *  - allocation/freeing of buffers.
 376 *  - IO
 377 *  - Eviction or cache sizing.
 378 *
 379 * cache_get() and cache_put() are threadsafe, you do not need to
 380 * protect these calls with a surrounding mutex.  All the other
 381 * methods are not threadsafe; they do use locking primitives, but
 382 * only enough to ensure get/put are threadsafe.
 383 */
 384
 385struct buffer_tree {
 386	union {
 387		struct rw_semaphore lock;
 388		rwlock_t spinlock;
 389	} u;
 390	struct rb_root root;
 391} ____cacheline_aligned_in_smp;
 392
 393struct dm_buffer_cache {
 394	struct lru lru[LIST_SIZE];
 395	/*
 396	 * We spread entries across multiple trees to reduce contention
 397	 * on the locks.
 398	 */
 399	unsigned int num_locks;
 400	bool no_sleep;
 401	struct buffer_tree trees[];
 402};
 403
 404static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
 405
 406static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
 407{
 408	return dm_hash_locks_index(block, num_locks);
 409}
 410
 411static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
 412{
 413	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 414		read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 415	else
 416		down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 417}
 418
 419static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
 420{
 421	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 422		read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 423	else
 424		up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 425}
 426
 427static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
 428{
 429	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 430		write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 431	else
 432		down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 433}
 434
 435static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
 436{
 437	if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
 438		write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
 439	else
 440		up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
 441}
 442
 443/*
 444 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
 445 * This struct helps avoid redundant drop and gets of the same lock.
 446 */
 447struct lock_history {
 448	struct dm_buffer_cache *cache;
 449	bool write;
 450	unsigned int previous;
 451	unsigned int no_previous;
 452};
 453
 454static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
 455{
 456	lh->cache = cache;
 457	lh->write = write;
 458	lh->no_previous = cache->num_locks;
 459	lh->previous = lh->no_previous;
 460}
 461
 462static void __lh_lock(struct lock_history *lh, unsigned int index)
 463{
 464	if (lh->write) {
 465		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 466			write_lock_bh(&lh->cache->trees[index].u.spinlock);
 467		else
 468			down_write(&lh->cache->trees[index].u.lock);
 469	} else {
 470		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 471			read_lock_bh(&lh->cache->trees[index].u.spinlock);
 472		else
 473			down_read(&lh->cache->trees[index].u.lock);
 474	}
 475}
 476
 477static void __lh_unlock(struct lock_history *lh, unsigned int index)
 478{
 479	if (lh->write) {
 480		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 481			write_unlock_bh(&lh->cache->trees[index].u.spinlock);
 482		else
 483			up_write(&lh->cache->trees[index].u.lock);
 484	} else {
 485		if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
 486			read_unlock_bh(&lh->cache->trees[index].u.spinlock);
 487		else
 488			up_read(&lh->cache->trees[index].u.lock);
 489	}
 490}
 491
 492/*
 493 * Make sure you call this since it will unlock the final lock.
 494 */
 495static void lh_exit(struct lock_history *lh)
 496{
 497	if (lh->previous != lh->no_previous) {
 498		__lh_unlock(lh, lh->previous);
 499		lh->previous = lh->no_previous;
 500	}
 501}
 502
 503/*
 504 * Named 'next' because there is no corresponding
 505 * 'up/unlock' call since it's done automatically.
 506 */
 507static void lh_next(struct lock_history *lh, sector_t b)
 508{
 509	unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
 510
 511	if (lh->previous != lh->no_previous) {
 512		if (lh->previous != index) {
 513			__lh_unlock(lh, lh->previous);
 514			__lh_lock(lh, index);
 515			lh->previous = index;
 516		}
 517	} else {
 518		__lh_lock(lh, index);
 519		lh->previous = index;
 520	}
 521}
 522
 523static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
 524{
 525	return container_of(le, struct dm_buffer, lru);
 526}
 527
 528static struct dm_buffer *list_to_buffer(struct list_head *l)
 529{
 530	struct lru_entry *le = list_entry(l, struct lru_entry, list);
 531
 532	if (!le)
 533		return NULL;
 534
 535	return le_to_buffer(le);
 536}
 537
 538static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
 539{
 540	unsigned int i;
 541
 542	bc->num_locks = num_locks;
 543	bc->no_sleep = no_sleep;
 544
 545	for (i = 0; i < bc->num_locks; i++) {
 546		if (no_sleep)
 547			rwlock_init(&bc->trees[i].u.spinlock);
 548		else
 549			init_rwsem(&bc->trees[i].u.lock);
 550		bc->trees[i].root = RB_ROOT;
 551	}
 552
 553	lru_init(&bc->lru[LIST_CLEAN]);
 554	lru_init(&bc->lru[LIST_DIRTY]);
 555}
 556
 557static void cache_destroy(struct dm_buffer_cache *bc)
 558{
 559	unsigned int i;
 560
 561	for (i = 0; i < bc->num_locks; i++)
 562		WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
 563
 564	lru_destroy(&bc->lru[LIST_CLEAN]);
 565	lru_destroy(&bc->lru[LIST_DIRTY]);
 566}
 567
 568/*--------------*/
 569
 570/*
 571 * not threadsafe, or racey depending how you look at it
 572 */
 573static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
 574{
 575	return bc->lru[list_mode].count;
 576}
 577
 578static inline unsigned long cache_total(struct dm_buffer_cache *bc)
 579{
 580	return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
 581}
 582
 583/*--------------*/
 584
 585/*
 586 * Gets a specific buffer, indexed by block.
 587 * If the buffer is found then its holder count will be incremented and
 588 * lru_reference will be called.
 589 *
 590 * threadsafe
 591 */
 592static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
 593{
 594	struct rb_node *n = root->rb_node;
 595	struct dm_buffer *b;
 596
 597	while (n) {
 598		b = container_of(n, struct dm_buffer, node);
 599
 600		if (b->block == block)
 601			return b;
 602
 603		n = block < b->block ? n->rb_left : n->rb_right;
 604	}
 605
 606	return NULL;
 607}
 608
 609static void __cache_inc_buffer(struct dm_buffer *b)
 610{
 611	atomic_inc(&b->hold_count);
 612	WRITE_ONCE(b->last_accessed, jiffies);
 613}
 614
 615static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
 616{
 617	struct dm_buffer *b;
 618
 619	cache_read_lock(bc, block);
 620	b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
 621	if (b) {
 622		lru_reference(&b->lru);
 623		__cache_inc_buffer(b);
 624	}
 625	cache_read_unlock(bc, block);
 626
 627	return b;
 628}
 629
 630/*--------------*/
 631
 632/*
 633 * Returns true if the hold count hits zero.
 634 * threadsafe
 635 */
 636static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
 637{
 638	bool r;
 639
 640	cache_read_lock(bc, b->block);
 641	BUG_ON(!atomic_read(&b->hold_count));
 642	r = atomic_dec_and_test(&b->hold_count);
 643	cache_read_unlock(bc, b->block);
 644
 645	return r;
 646}
 647
 648/*--------------*/
 649
 650typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
 651
 652/*
 653 * Evicts a buffer based on a predicate.  The oldest buffer that
 654 * matches the predicate will be selected.  In addition to the
 655 * predicate the hold_count of the selected buffer will be zero.
 656 */
 657struct evict_wrapper {
 658	struct lock_history *lh;
 659	b_predicate pred;
 660	void *context;
 661};
 662
 663/*
 664 * Wraps the buffer predicate turning it into an lru predicate.  Adds
 665 * extra test for hold_count.
 666 */
 667static enum evict_result __evict_pred(struct lru_entry *le, void *context)
 668{
 669	struct evict_wrapper *w = context;
 670	struct dm_buffer *b = le_to_buffer(le);
 671
 672	lh_next(w->lh, b->block);
 673
 674	if (atomic_read(&b->hold_count))
 675		return ER_DONT_EVICT;
 676
 677	return w->pred(b, w->context);
 678}
 679
 680static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
 681				       b_predicate pred, void *context,
 682				       struct lock_history *lh)
 683{
 684	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 685	struct lru_entry *le;
 686	struct dm_buffer *b;
 687
 688	le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
 689	if (!le)
 690		return NULL;
 691
 692	b = le_to_buffer(le);
 693	/* __evict_pred will have locked the appropriate tree. */
 694	rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 695
 696	return b;
 697}
 698
 699static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
 700				     b_predicate pred, void *context)
 701{
 702	struct dm_buffer *b;
 703	struct lock_history lh;
 704
 705	lh_init(&lh, bc, true);
 706	b = __cache_evict(bc, list_mode, pred, context, &lh);
 707	lh_exit(&lh);
 708
 709	return b;
 710}
 711
 712/*--------------*/
 713
 714/*
 715 * Mark a buffer as clean or dirty. Not threadsafe.
 716 */
 717static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
 718{
 719	cache_write_lock(bc, b->block);
 720	if (list_mode != b->list_mode) {
 721		lru_remove(&bc->lru[b->list_mode], &b->lru);
 722		b->list_mode = list_mode;
 723		lru_insert(&bc->lru[b->list_mode], &b->lru);
 724	}
 725	cache_write_unlock(bc, b->block);
 726}
 727
 728/*--------------*/
 729
 730/*
 731 * Runs through the lru associated with 'old_mode', if the predicate matches then
 732 * it moves them to 'new_mode'.  Not threadsafe.
 733 */
 734static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 735			      b_predicate pred, void *context, struct lock_history *lh)
 736{
 737	struct lru_entry *le;
 738	struct dm_buffer *b;
 739	struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
 740
 741	while (true) {
 742		le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
 743		if (!le)
 744			break;
 745
 746		b = le_to_buffer(le);
 747		b->list_mode = new_mode;
 748		lru_insert(&bc->lru[b->list_mode], &b->lru);
 749	}
 750}
 751
 752static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
 753			    b_predicate pred, void *context)
 754{
 755	struct lock_history lh;
 756
 757	lh_init(&lh, bc, true);
 758	__cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
 759	lh_exit(&lh);
 760}
 761
 762/*--------------*/
 763
 764/*
 765 * Iterates through all clean or dirty entries calling a function for each
 766 * entry.  The callback may terminate the iteration early.  Not threadsafe.
 767 */
 768
 769/*
 770 * Iterator functions should return one of these actions to indicate
 771 * how the iteration should proceed.
 772 */
 773enum it_action {
 774	IT_NEXT,
 775	IT_COMPLETE,
 776};
 777
 778typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
 
 779
 780static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 781			    iter_fn fn, void *context, struct lock_history *lh)
 782{
 783	struct lru *lru = &bc->lru[list_mode];
 784	struct lru_entry *le, *first;
 785
 786	if (!lru->cursor)
 787		return;
 788
 789	first = le = to_le(lru->cursor);
 790	do {
 791		struct dm_buffer *b = le_to_buffer(le);
 792
 793		lh_next(lh, b->block);
 794
 795		switch (fn(b, context)) {
 796		case IT_NEXT:
 797			break;
 798
 799		case IT_COMPLETE:
 800			return;
 801		}
 802		cond_resched();
 803
 804		le = to_le(le->list.next);
 805	} while (le != first);
 806}
 807
 808static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
 809			  iter_fn fn, void *context)
 810{
 811	struct lock_history lh;
 812
 813	lh_init(&lh, bc, false);
 814	__cache_iterate(bc, list_mode, fn, context, &lh);
 815	lh_exit(&lh);
 816}
 817
 818/*--------------*/
 819
 820/*
 821 * Passes ownership of the buffer to the cache. Returns false if the
 822 * buffer was already present (in which case ownership does not pass).
 823 * eg, a race with another thread.
 824 *
 825 * Holder count should be 1 on insertion.
 826 *
 827 * Not threadsafe.
 828 */
 829static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
 830{
 831	struct rb_node **new = &root->rb_node, *parent = NULL;
 832	struct dm_buffer *found;
 833
 834	while (*new) {
 835		found = container_of(*new, struct dm_buffer, node);
 836
 837		if (found->block == b->block)
 838			return false;
 839
 840		parent = *new;
 841		new = b->block < found->block ?
 842			&found->node.rb_left : &found->node.rb_right;
 843	}
 844
 845	rb_link_node(&b->node, parent, new);
 846	rb_insert_color(&b->node, root);
 847
 848	return true;
 849}
 850
 851static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
 852{
 853	bool r;
 854
 855	if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
 856		return false;
 857
 858	cache_write_lock(bc, b->block);
 859	BUG_ON(atomic_read(&b->hold_count) != 1);
 860	r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
 861	if (r)
 862		lru_insert(&bc->lru[b->list_mode], &b->lru);
 863	cache_write_unlock(bc, b->block);
 864
 865	return r;
 866}
 867
 868/*--------------*/
 869
 870/*
 871 * Removes buffer from cache, ownership of the buffer passes back to the caller.
 872 * Fails if the hold_count is not one (ie. the caller holds the only reference).
 873 *
 874 * Not threadsafe.
 875 */
 876static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
 877{
 878	bool r;
 879
 880	cache_write_lock(bc, b->block);
 881
 882	if (atomic_read(&b->hold_count) != 1) {
 883		r = false;
 884	} else {
 885		r = true;
 886		rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
 887		lru_remove(&bc->lru[b->list_mode], &b->lru);
 888	}
 889
 890	cache_write_unlock(bc, b->block);
 891
 892	return r;
 893}
 894
 895/*--------------*/
 896
 897typedef void (*b_release)(struct dm_buffer *);
 898
 899static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
 900{
 901	struct rb_node *n = root->rb_node;
 902	struct dm_buffer *b;
 903	struct dm_buffer *best = NULL;
 904
 905	while (n) {
 906		b = container_of(n, struct dm_buffer, node);
 907
 908		if (b->block == block)
 909			return b;
 910
 911		if (block <= b->block) {
 912			n = n->rb_left;
 913			best = b;
 914		} else {
 915			n = n->rb_right;
 916		}
 917	}
 918
 919	return best;
 920}
 921
 922static void __remove_range(struct dm_buffer_cache *bc,
 923			   struct rb_root *root,
 924			   sector_t begin, sector_t end,
 925			   b_predicate pred, b_release release)
 926{
 927	struct dm_buffer *b;
 928
 929	while (true) {
 930		cond_resched();
 931
 932		b = __find_next(root, begin);
 933		if (!b || (b->block >= end))
 934			break;
 935
 936		begin = b->block + 1;
 937
 938		if (atomic_read(&b->hold_count))
 939			continue;
 940
 941		if (pred(b, NULL) == ER_EVICT) {
 942			rb_erase(&b->node, root);
 943			lru_remove(&bc->lru[b->list_mode], &b->lru);
 944			release(b);
 945		}
 946	}
 947}
 948
 949static void cache_remove_range(struct dm_buffer_cache *bc,
 950			       sector_t begin, sector_t end,
 951			       b_predicate pred, b_release release)
 952{
 953	unsigned int i;
 954
 955	BUG_ON(bc->no_sleep);
 956	for (i = 0; i < bc->num_locks; i++) {
 957		down_write(&bc->trees[i].u.lock);
 958		__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
 959		up_write(&bc->trees[i].u.lock);
 960	}
 961}
 962
 963/*----------------------------------------------------------------*/
 964
 965/*
 966 * Linking of buffers:
 967 *	All buffers are linked to buffer_cache with their node field.
 968 *
 969 *	Clean buffers that are not being written (B_WRITING not set)
 970 *	are linked to lru[LIST_CLEAN] with their lru_list field.
 971 *
 972 *	Dirty and clean buffers that are being written are linked to
 973 *	lru[LIST_DIRTY] with their lru_list field. When the write
 974 *	finishes, the buffer cannot be relinked immediately (because we
 975 *	are in an interrupt context and relinking requires process
 976 *	context), so some clean-not-writing buffers can be held on
 977 *	dirty_lru too.  They are later added to lru in the process
 978 *	context.
 979 */
 980struct dm_bufio_client {
 981	struct block_device *bdev;
 982	unsigned int block_size;
 983	s8 sectors_per_block_bits;
 984
 985	bool no_sleep;
 986	struct mutex lock;
 987	spinlock_t spinlock;
 988
 989	int async_write_error;
 990
 991	void (*alloc_callback)(struct dm_buffer *buf);
 992	void (*write_callback)(struct dm_buffer *buf);
 993	struct kmem_cache *slab_buffer;
 994	struct kmem_cache *slab_cache;
 995	struct dm_io_client *dm_io;
 996
 997	struct list_head reserved_buffers;
 998	unsigned int need_reserved_buffers;
 999
1000	unsigned int minimum_buffers;
1001
1002	sector_t start;
1003
1004	struct shrinker *shrinker;
1005	struct work_struct shrink_work;
1006	atomic_long_t need_shrink;
1007
1008	wait_queue_head_t free_buffer_wait;
1009
1010	struct list_head client_list;
1011
1012	/*
1013	 * Used by global_cleanup to sort the clients list.
1014	 */
1015	unsigned long oldest_buffer;
1016
1017	struct dm_buffer_cache cache; /* must be last member */
1018};
1019
1020/*----------------------------------------------------------------*/
1021
1022#define dm_bufio_in_request()	(!!current->bio_list)
1023
1024static void dm_bufio_lock(struct dm_bufio_client *c)
1025{
1026	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1027		spin_lock_bh(&c->spinlock);
1028	else
1029		mutex_lock_nested(&c->lock, dm_bufio_in_request());
1030}
1031
1032static void dm_bufio_unlock(struct dm_bufio_client *c)
1033{
1034	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1035		spin_unlock_bh(&c->spinlock);
1036	else
1037		mutex_unlock(&c->lock);
1038}
1039
1040/*----------------------------------------------------------------*/
1041
1042/*
1043 * Default cache size: available memory divided by the ratio.
1044 */
1045static unsigned long dm_bufio_default_cache_size;
1046
1047/*
1048 * Total cache size set by the user.
1049 */
1050static unsigned long dm_bufio_cache_size;
1051
1052/*
1053 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1054 * at any time.  If it disagrees, the user has changed cache size.
1055 */
1056static unsigned long dm_bufio_cache_size_latch;
1057
1058static DEFINE_SPINLOCK(global_spinlock);
1059
1060/*
1061 * Buffers are freed after this timeout
1062 */
1063static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1064static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1065
1066static unsigned long dm_bufio_peak_allocated;
1067static unsigned long dm_bufio_allocated_kmem_cache;
1068static unsigned long dm_bufio_allocated_get_free_pages;
1069static unsigned long dm_bufio_allocated_vmalloc;
1070static unsigned long dm_bufio_current_allocated;
1071
1072/*----------------------------------------------------------------*/
1073
1074/*
 
 
 
 
 
1075 * The current number of clients.
1076 */
1077static int dm_bufio_client_count;
1078
1079/*
1080 * The list of all clients.
1081 */
1082static LIST_HEAD(dm_bufio_all_clients);
1083
1084/*
1085 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
 
1086 */
1087static DEFINE_MUTEX(dm_bufio_clients_lock);
1088
1089static struct workqueue_struct *dm_bufio_wq;
1090static struct delayed_work dm_bufio_cleanup_old_work;
1091static struct work_struct dm_bufio_replacement_work;
1092
1093
1094#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1095static void buffer_record_stack(struct dm_buffer *b)
1096{
1097	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1098}
1099#endif
1100
1101/*----------------------------------------------------------------*/
1102
1103static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1104{
1105	unsigned char data_mode;
1106	long diff;
1107
1108	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1109		&dm_bufio_allocated_kmem_cache,
1110		&dm_bufio_allocated_get_free_pages,
1111		&dm_bufio_allocated_vmalloc,
1112	};
1113
1114	data_mode = b->data_mode;
1115	diff = (long)b->c->block_size;
1116	if (unlink)
1117		diff = -diff;
1118
1119	spin_lock(&global_spinlock);
1120
1121	*class_ptr[data_mode] += diff;
1122
1123	dm_bufio_current_allocated += diff;
1124
1125	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126		dm_bufio_peak_allocated = dm_bufio_current_allocated;
1127
1128	if (!unlink) {
1129		if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1131	}
1132
1133	spin_unlock(&global_spinlock);
1134}
1135
1136/*
1137 * Change the number of clients and recalculate per-client limit.
1138 */
1139static void __cache_size_refresh(void)
1140{
1141	if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1142		return;
1143	if (WARN_ON(dm_bufio_client_count < 0))
1144		return;
1145
1146	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
 
 
1147
1148	/*
1149	 * Use default if set to 0 and report the actual cache size used.
1150	 */
1151	if (!dm_bufio_cache_size_latch) {
1152		(void)cmpxchg(&dm_bufio_cache_size, 0,
1153			      dm_bufio_default_cache_size);
1154		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1155	}
 
 
 
1156}
1157
1158/*
1159 * Allocating buffer data.
1160 *
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1162 *
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1165 *
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1169 *
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_PAGE_ORDER).
1174 *
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1177 * space.
1178 */
1179static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180			       unsigned char *data_mode)
1181{
1182	if (unlikely(c->slab_cache != NULL)) {
1183		*data_mode = DATA_MODE_SLAB;
1184		return kmem_cache_alloc(c->slab_cache, gfp_mask);
1185	}
1186
1187	if (c->block_size <= KMALLOC_MAX_SIZE &&
1188	    gfp_mask & __GFP_NORETRY) {
1189		*data_mode = DATA_MODE_GET_FREE_PAGES;
1190		return (void *)__get_free_pages(gfp_mask,
1191						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1192	}
1193
1194	*data_mode = DATA_MODE_VMALLOC;
1195
1196	return __vmalloc(c->block_size, gfp_mask);
1197}
1198
1199/*
1200 * Free buffer's data.
1201 */
1202static void free_buffer_data(struct dm_bufio_client *c,
1203			     void *data, unsigned char data_mode)
1204{
1205	switch (data_mode) {
1206	case DATA_MODE_SLAB:
1207		kmem_cache_free(c->slab_cache, data);
1208		break;
1209
1210	case DATA_MODE_GET_FREE_PAGES:
1211		free_pages((unsigned long)data,
1212			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1213		break;
1214
1215	case DATA_MODE_VMALLOC:
1216		vfree(data);
1217		break;
1218
1219	default:
1220		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1221		       data_mode);
1222		BUG();
1223	}
1224}
1225
1226/*
1227 * Allocate buffer and its data.
1228 */
1229static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1230{
1231	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
 
1232
1233	if (!b)
1234		return NULL;
1235
1236	b->c = c;
1237
1238	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1239	if (!b->data) {
1240		kmem_cache_free(c->slab_buffer, b);
1241		return NULL;
1242	}
1243	adjust_total_allocated(b, false);
1244
1245#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1246	b->stack_len = 0;
1247#endif
1248	return b;
1249}
1250
1251/*
1252 * Free buffer and its data.
1253 */
1254static void free_buffer(struct dm_buffer *b)
1255{
1256	struct dm_bufio_client *c = b->c;
1257
1258	adjust_total_allocated(b, true);
 
1259	free_buffer_data(c, b->data, b->data_mode);
1260	kmem_cache_free(c->slab_buffer, b);
1261}
1262
1263/*
1264 *--------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265 * Submit I/O on the buffer.
1266 *
1267 * Bio interface is faster but it has some problems:
1268 *	the vector list is limited (increasing this limit increases
1269 *	memory-consumption per buffer, so it is not viable);
1270 *
1271 *	the memory must be direct-mapped, not vmalloced;
1272 *
 
 
 
 
1273 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1274 * it is not vmalloced, try using the bio interface.
1275 *
1276 * If the buffer is big, if it is vmalloced or if the underlying device
1277 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1278 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1279 * shortcomings.
1280 *--------------------------------------------------------------------------
1281 */
1282
1283/*
1284 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1285 * that the request was handled directly with bio interface.
1286 */
1287static void dmio_complete(unsigned long error, void *context)
1288{
1289	struct dm_buffer *b = context;
1290
1291	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1292}
1293
1294static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1295		     unsigned int n_sectors, unsigned int offset)
1296{
1297	int r;
1298	struct dm_io_request io_req = {
1299		.bi_opf = op,
1300		.notify.fn = dmio_complete,
1301		.notify.context = b,
1302		.client = b->c->dm_io,
1303	};
1304	struct dm_io_region region = {
1305		.bdev = b->c->bdev,
1306		.sector = sector,
1307		.count = n_sectors,
1308	};
1309
1310	if (b->data_mode != DATA_MODE_VMALLOC) {
1311		io_req.mem.type = DM_IO_KMEM;
1312		io_req.mem.ptr.addr = (char *)b->data + offset;
1313	} else {
1314		io_req.mem.type = DM_IO_VMA;
1315		io_req.mem.ptr.vma = (char *)b->data + offset;
1316	}
1317
1318	r = dm_io(&io_req, 1, &region, NULL);
1319	if (unlikely(r))
1320		b->end_io(b, errno_to_blk_status(r));
1321}
1322
1323static void bio_complete(struct bio *bio)
1324{
1325	struct dm_buffer *b = bio->bi_private;
1326	blk_status_t status = bio->bi_status;
1327
1328	bio_uninit(bio);
1329	kfree(bio);
1330	b->end_io(b, status);
1331}
1332
1333static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1334		    unsigned int n_sectors, unsigned int offset)
1335{
1336	struct bio *bio;
1337	char *ptr;
1338	unsigned int len;
1339
1340	bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1341	if (!bio) {
1342		use_dmio(b, op, sector, n_sectors, offset);
1343		return;
1344	}
1345	bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1346	bio->bi_iter.bi_sector = sector;
1347	bio->bi_end_io = bio_complete;
1348	bio->bi_private = b;
1349
1350	ptr = (char *)b->data + offset;
1351	len = n_sectors << SECTOR_SHIFT;
1352
1353	__bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
 
 
1354
1355	submit_bio(bio);
1356}
 
 
1357
1358static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1359{
1360	sector_t sector;
 
 
 
 
 
1361
1362	if (likely(c->sectors_per_block_bits >= 0))
1363		sector = block << c->sectors_per_block_bits;
1364	else
1365		sector = block * (c->block_size >> SECTOR_SHIFT);
1366	sector += c->start;
1367
1368	return sector;
1369}
1370
1371static void submit_io(struct dm_buffer *b, enum req_op op,
1372		      void (*end_io)(struct dm_buffer *, blk_status_t))
1373{
1374	unsigned int n_sectors;
1375	sector_t sector;
1376	unsigned int offset, end;
1377
1378	b->end_io = end_io;
1379
1380	sector = block_to_sector(b->c, b->block);
1381
1382	if (op != REQ_OP_WRITE) {
1383		n_sectors = b->c->block_size >> SECTOR_SHIFT;
1384		offset = 0;
1385	} else {
1386		if (b->c->write_callback)
1387			b->c->write_callback(b);
1388		offset = b->write_start;
1389		end = b->write_end;
1390		offset &= -DM_BUFIO_WRITE_ALIGN;
1391		end += DM_BUFIO_WRITE_ALIGN - 1;
1392		end &= -DM_BUFIO_WRITE_ALIGN;
1393		if (unlikely(end > b->c->block_size))
1394			end = b->c->block_size;
1395
1396		sector += offset >> SECTOR_SHIFT;
1397		n_sectors = (end - offset) >> SECTOR_SHIFT;
1398	}
1399
1400	if (b->data_mode != DATA_MODE_VMALLOC)
1401		use_bio(b, op, sector, n_sectors, offset);
1402	else
1403		use_dmio(b, op, sector, n_sectors, offset);
1404}
1405
1406/*
1407 *--------------------------------------------------------------
1408 * Writing dirty buffers
1409 *--------------------------------------------------------------
1410 */
1411
1412/*
1413 * The endio routine for write.
1414 *
1415 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1416 * it.
1417 */
1418static void write_endio(struct dm_buffer *b, blk_status_t status)
1419{
1420	b->write_error = status;
1421	if (unlikely(status)) {
1422		struct dm_bufio_client *c = b->c;
1423
1424		(void)cmpxchg(&c->async_write_error, 0,
1425				blk_status_to_errno(status));
 
 
1426	}
1427
1428	BUG_ON(!test_bit(B_WRITING, &b->state));
1429
1430	smp_mb__before_atomic();
1431	clear_bit(B_WRITING, &b->state);
1432	smp_mb__after_atomic();
1433
1434	wake_up_bit(&b->state, B_WRITING);
1435}
1436
1437/*
 
 
 
 
 
 
 
 
 
 
1438 * Initiate a write on a dirty buffer, but don't wait for it.
1439 *
1440 * - If the buffer is not dirty, exit.
1441 * - If there some previous write going on, wait for it to finish (we can't
1442 *   have two writes on the same buffer simultaneously).
1443 * - Submit our write and don't wait on it. We set B_WRITING indicating
1444 *   that there is a write in progress.
1445 */
1446static void __write_dirty_buffer(struct dm_buffer *b,
1447				 struct list_head *write_list)
1448{
1449	if (!test_bit(B_DIRTY, &b->state))
1450		return;
1451
1452	clear_bit(B_DIRTY, &b->state);
1453	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1454
1455	b->write_start = b->dirty_start;
1456	b->write_end = b->dirty_end;
1457
1458	if (!write_list)
1459		submit_io(b, REQ_OP_WRITE, write_endio);
1460	else
1461		list_add_tail(&b->write_list, write_list);
1462}
1463
1464static void __flush_write_list(struct list_head *write_list)
1465{
1466	struct blk_plug plug;
1467
1468	blk_start_plug(&plug);
1469	while (!list_empty(write_list)) {
1470		struct dm_buffer *b =
1471			list_entry(write_list->next, struct dm_buffer, write_list);
1472		list_del(&b->write_list);
1473		submit_io(b, REQ_OP_WRITE, write_endio);
1474		cond_resched();
1475	}
1476	blk_finish_plug(&plug);
1477}
1478
1479/*
1480 * Wait until any activity on the buffer finishes.  Possibly write the
1481 * buffer if it is dirty.  When this function finishes, there is no I/O
1482 * running on the buffer and the buffer is not dirty.
1483 */
1484static void __make_buffer_clean(struct dm_buffer *b)
1485{
1486	BUG_ON(atomic_read(&b->hold_count));
1487
1488	/* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1489	if (!smp_load_acquire(&b->state))	/* fast case */
1490		return;
1491
1492	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1493	__write_dirty_buffer(b, NULL);
1494	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1495}
1496
1497static enum evict_result is_clean(struct dm_buffer *b, void *context)
1498{
1499	struct dm_bufio_client *c = context;
1500
1501	/* These should never happen */
1502	if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1503		return ER_DONT_EVICT;
1504	if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1505		return ER_DONT_EVICT;
1506	if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1507		return ER_DONT_EVICT;
1508
1509	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1510	    unlikely(test_bit(B_READING, &b->state)))
1511		return ER_DONT_EVICT;
1512
1513	return ER_EVICT;
1514}
1515
1516static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1517{
1518	/* These should never happen */
1519	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1520		return ER_DONT_EVICT;
1521	if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1522		return ER_DONT_EVICT;
1523
1524	return ER_EVICT;
1525}
1526
1527/*
1528 * Find some buffer that is not held by anybody, clean it, unlink it and
1529 * return it.
1530 */
1531static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1532{
1533	struct dm_buffer *b;
1534
1535	b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1536	if (b) {
1537		/* this also waits for pending reads */
1538		__make_buffer_clean(b);
1539		return b;
 
 
 
 
 
1540	}
1541
1542	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1543		return NULL;
1544
1545	b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1546	if (b) {
1547		__make_buffer_clean(b);
1548		return b;
 
 
1549	}
1550
1551	return NULL;
1552}
1553
1554/*
1555 * Wait until some other threads free some buffer or release hold count on
1556 * some buffer.
1557 *
1558 * This function is entered with c->lock held, drops it and regains it
1559 * before exiting.
1560 */
1561static void __wait_for_free_buffer(struct dm_bufio_client *c)
1562{
1563	DECLARE_WAITQUEUE(wait, current);
1564
1565	add_wait_queue(&c->free_buffer_wait, &wait);
1566	set_current_state(TASK_UNINTERRUPTIBLE);
1567	dm_bufio_unlock(c);
1568
1569	/*
1570	 * It's possible to miss a wake up event since we don't always
1571	 * hold c->lock when wake_up is called.  So we have a timeout here,
1572	 * just in case.
1573	 */
1574	io_schedule_timeout(5 * HZ);
1575
 
1576	remove_wait_queue(&c->free_buffer_wait, &wait);
1577
1578	dm_bufio_lock(c);
1579}
1580
1581enum new_flag {
1582	NF_FRESH = 0,
1583	NF_READ = 1,
1584	NF_GET = 2,
1585	NF_PREFETCH = 3
1586};
1587
1588/*
1589 * Allocate a new buffer. If the allocation is not possible, wait until
1590 * some other thread frees a buffer.
1591 *
1592 * May drop the lock and regain it.
1593 */
1594static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1595{
1596	struct dm_buffer *b;
1597	bool tried_noio_alloc = false;
1598
1599	/*
1600	 * dm-bufio is resistant to allocation failures (it just keeps
1601	 * one buffer reserved in cases all the allocations fail).
1602	 * So set flags to not try too hard:
1603	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1604	 *		    mutex and wait ourselves.
1605	 *	__GFP_NORETRY: don't retry and rather return failure
1606	 *	__GFP_NOMEMALLOC: don't use emergency reserves
1607	 *	__GFP_NOWARN: don't print a warning in case of failure
1608	 *
1609	 * For debugging, if we set the cache size to 1, no new buffers will
1610	 * be allocated.
1611	 */
1612	while (1) {
1613		if (dm_bufio_cache_size_latch != 1) {
1614			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1615			if (b)
1616				return b;
1617		}
1618
1619		if (nf == NF_PREFETCH)
1620			return NULL;
1621
1622		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1623			dm_bufio_unlock(c);
1624			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1625			dm_bufio_lock(c);
1626			if (b)
1627				return b;
1628			tried_noio_alloc = true;
1629		}
1630
1631		if (!list_empty(&c->reserved_buffers)) {
1632			b = list_to_buffer(c->reserved_buffers.next);
1633			list_del(&b->lru.list);
 
1634			c->need_reserved_buffers++;
1635
1636			return b;
1637		}
1638
1639		b = __get_unclaimed_buffer(c);
1640		if (b)
1641			return b;
1642
1643		__wait_for_free_buffer(c);
1644	}
1645}
1646
1647static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1648{
1649	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1650
1651	if (!b)
1652		return NULL;
1653
1654	if (c->alloc_callback)
1655		c->alloc_callback(b);
1656
1657	return b;
1658}
1659
1660/*
1661 * Free a buffer and wake other threads waiting for free buffers.
1662 */
1663static void __free_buffer_wake(struct dm_buffer *b)
1664{
1665	struct dm_bufio_client *c = b->c;
1666
1667	b->block = -1;
1668	if (!c->need_reserved_buffers)
1669		free_buffer(b);
1670	else {
1671		list_add(&b->lru.list, &c->reserved_buffers);
1672		c->need_reserved_buffers--;
1673	}
1674
1675	/*
1676	 * We hold the bufio lock here, so no one can add entries to the
1677	 * wait queue anyway.
1678	 */
1679	if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1680		wake_up(&c->free_buffer_wait);
1681}
1682
1683static enum evict_result cleaned(struct dm_buffer *b, void *context)
1684{
1685	if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1686		return ER_DONT_EVICT; /* should never happen */
1687
1688	if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1689		return ER_DONT_EVICT;
1690	else
1691		return ER_EVICT;
1692}
1693
1694static void __move_clean_buffers(struct dm_bufio_client *c)
1695{
1696	cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1697}
 
1698
1699struct write_context {
1700	int no_wait;
1701	struct list_head *write_list;
1702};
 
 
 
1703
1704static enum it_action write_one(struct dm_buffer *b, void *context)
 
 
 
 
 
1705{
1706	struct write_context *wc = context;
1707
1708	if (wc->no_wait && test_bit(B_WRITING, &b->state))
1709		return IT_COMPLETE;
 
 
 
1710
1711	__write_dirty_buffer(b, wc->write_list);
1712	return IT_NEXT;
1713}
1714
1715static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1716					struct list_head *write_list)
1717{
1718	struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1719
1720	__move_clean_buffers(c);
1721	cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1722}
1723
1724/*
1725 * Check if we're over watermark.
1726 * If we are over threshold_buffers, start freeing buffers.
1727 * If we're over "limit_buffers", block until we get under the limit.
1728 */
1729static void __check_watermark(struct dm_bufio_client *c,
1730			      struct list_head *write_list)
1731{
1732	if (cache_count(&c->cache, LIST_DIRTY) >
1733	    cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1734		__write_dirty_buffers_async(c, 1, write_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735}
1736
1737/*
1738 *--------------------------------------------------------------
1739 * Getting a buffer
1740 *--------------------------------------------------------------
1741 */
1742
1743static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1744{
1745	/*
1746	 * Relying on waitqueue_active() is racey, but we sleep
1747	 * with schedule_timeout anyway.
1748	 */
1749	if (cache_put(&c->cache, b) &&
1750	    unlikely(waitqueue_active(&c->free_buffer_wait)))
1751		wake_up(&c->free_buffer_wait);
 
 
 
 
1752}
1753
1754/*
1755 * This assumes you have already checked the cache to see if the buffer
1756 * is already present (it will recheck after dropping the lock for allocation).
1757 */
1758static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1759				     enum new_flag nf, int *need_submit,
1760				     struct list_head *write_list)
1761{
1762	struct dm_buffer *b, *new_b = NULL;
1763
1764	*need_submit = 0;
1765
1766	/* This can't be called with NF_GET */
1767	if (WARN_ON_ONCE(nf == NF_GET))
 
 
 
1768		return NULL;
1769
1770	new_b = __alloc_buffer_wait(c, nf);
1771	if (!new_b)
1772		return NULL;
1773
1774	/*
1775	 * We've had a period where the mutex was unlocked, so need to
1776	 * recheck the buffer tree.
1777	 */
1778	b = cache_get(&c->cache, block);
1779	if (b) {
1780		__free_buffer_wake(new_b);
1781		goto found_buffer;
1782	}
1783
1784	__check_watermark(c, write_list);
1785
1786	b = new_b;
1787	atomic_set(&b->hold_count, 1);
1788	WRITE_ONCE(b->last_accessed, jiffies);
1789	b->block = block;
1790	b->read_error = 0;
1791	b->write_error = 0;
1792	b->list_mode = LIST_CLEAN;
1793
1794	if (nf == NF_FRESH)
1795		b->state = 0;
1796	else {
1797		b->state = 1 << B_READING;
1798		*need_submit = 1;
1799	}
1800
1801	/*
1802	 * We mustn't insert into the cache until the B_READING state
1803	 * is set.  Otherwise another thread could get it and use
1804	 * it before it had been read.
1805	 */
1806	cache_insert(&c->cache, b);
1807
1808	return b;
1809
1810found_buffer:
1811	if (nf == NF_PREFETCH) {
1812		cache_put_and_wake(c, b);
1813		return NULL;
1814	}
1815
1816	/*
1817	 * Note: it is essential that we don't wait for the buffer to be
1818	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1819	 * dm_bufio_prefetch can be used in the driver request routine.
1820	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1821	 * the same buffer, it would deadlock if we waited.
1822	 */
1823	if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1824		cache_put_and_wake(c, b);
1825		return NULL;
1826	}
1827
 
 
 
1828	return b;
1829}
1830
1831/*
1832 * The endio routine for reading: set the error, clear the bit and wake up
1833 * anyone waiting on the buffer.
1834 */
1835static void read_endio(struct dm_buffer *b, blk_status_t status)
1836{
1837	b->read_error = status;
 
 
1838
1839	BUG_ON(!test_bit(B_READING, &b->state));
1840
1841	smp_mb__before_atomic();
1842	clear_bit(B_READING, &b->state);
1843	smp_mb__after_atomic();
1844
1845	wake_up_bit(&b->state, B_READING);
1846}
1847
1848/*
1849 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1850 * functions is similar except that dm_bufio_new doesn't read the
1851 * buffer from the disk (assuming that the caller overwrites all the data
1852 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1853 */
1854static void *new_read(struct dm_bufio_client *c, sector_t block,
1855		      enum new_flag nf, struct dm_buffer **bp)
1856{
1857	int need_submit = 0;
1858	struct dm_buffer *b;
1859
1860	LIST_HEAD(write_list);
1861
1862	*bp = NULL;
1863
1864	/*
1865	 * Fast path, hopefully the block is already in the cache.  No need
1866	 * to get the client lock for this.
1867	 */
1868	b = cache_get(&c->cache, block);
1869	if (b) {
1870		if (nf == NF_PREFETCH) {
1871			cache_put_and_wake(c, b);
1872			return NULL;
1873		}
1874
1875		/*
1876		 * Note: it is essential that we don't wait for the buffer to be
1877		 * read if dm_bufio_get function is used. Both dm_bufio_get and
1878		 * dm_bufio_prefetch can be used in the driver request routine.
1879		 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1880		 * the same buffer, it would deadlock if we waited.
1881		 */
1882		if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1883			cache_put_and_wake(c, b);
1884			return NULL;
1885		}
1886	}
1887
1888	if (!b) {
1889		if (nf == NF_GET)
1890			return NULL;
1891
1892		dm_bufio_lock(c);
1893		b = __bufio_new(c, block, nf, &need_submit, &write_list);
1894		dm_bufio_unlock(c);
1895	}
1896
1897#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1898	if (b && (atomic_read(&b->hold_count) == 1))
1899		buffer_record_stack(b);
1900#endif
1901
1902	__flush_write_list(&write_list);
1903
1904	if (!b)
1905		return NULL;
1906
1907	if (need_submit)
1908		submit_io(b, REQ_OP_READ, read_endio);
1909
1910	if (nf != NF_GET)	/* we already tested this condition above */
1911		wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1912
1913	if (b->read_error) {
1914		int error = blk_status_to_errno(b->read_error);
1915
1916		dm_bufio_release(b);
1917
1918		return ERR_PTR(error);
1919	}
1920
1921	*bp = b;
1922
1923	return b->data;
1924}
1925
1926void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1927		   struct dm_buffer **bp)
1928{
1929	return new_read(c, block, NF_GET, bp);
1930}
1931EXPORT_SYMBOL_GPL(dm_bufio_get);
1932
1933void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1934		    struct dm_buffer **bp)
1935{
1936	if (WARN_ON_ONCE(dm_bufio_in_request()))
1937		return ERR_PTR(-EINVAL);
1938
1939	return new_read(c, block, NF_READ, bp);
1940}
1941EXPORT_SYMBOL_GPL(dm_bufio_read);
1942
1943void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1944		   struct dm_buffer **bp)
1945{
1946	if (WARN_ON_ONCE(dm_bufio_in_request()))
1947		return ERR_PTR(-EINVAL);
1948
1949	return new_read(c, block, NF_FRESH, bp);
1950}
1951EXPORT_SYMBOL_GPL(dm_bufio_new);
1952
1953void dm_bufio_prefetch(struct dm_bufio_client *c,
1954		       sector_t block, unsigned int n_blocks)
1955{
1956	struct blk_plug plug;
1957
1958	LIST_HEAD(write_list);
1959
1960	if (WARN_ON_ONCE(dm_bufio_in_request()))
1961		return; /* should never happen */
1962
1963	blk_start_plug(&plug);
 
1964
1965	for (; n_blocks--; block++) {
1966		int need_submit;
1967		struct dm_buffer *b;
1968
1969		b = cache_get(&c->cache, block);
1970		if (b) {
1971			/* already in cache */
1972			cache_put_and_wake(c, b);
1973			continue;
1974		}
1975
1976		dm_bufio_lock(c);
1977		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1978				&write_list);
1979		if (unlikely(!list_empty(&write_list))) {
1980			dm_bufio_unlock(c);
1981			blk_finish_plug(&plug);
1982			__flush_write_list(&write_list);
1983			blk_start_plug(&plug);
1984			dm_bufio_lock(c);
1985		}
1986		if (unlikely(b != NULL)) {
1987			dm_bufio_unlock(c);
1988
1989			if (need_submit)
1990				submit_io(b, REQ_OP_READ, read_endio);
1991			dm_bufio_release(b);
1992
1993			cond_resched();
1994
1995			if (!n_blocks)
1996				goto flush_plug;
1997			dm_bufio_lock(c);
1998		}
1999		dm_bufio_unlock(c);
2000	}
2001
 
 
2002flush_plug:
2003	blk_finish_plug(&plug);
2004}
2005EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2006
2007void dm_bufio_release(struct dm_buffer *b)
2008{
2009	struct dm_bufio_client *c = b->c;
2010
2011	/*
2012	 * If there were errors on the buffer, and the buffer is not
2013	 * to be written, free the buffer. There is no point in caching
2014	 * invalid buffer.
2015	 */
2016	if ((b->read_error || b->write_error) &&
2017	    !test_bit_acquire(B_READING, &b->state) &&
2018	    !test_bit(B_WRITING, &b->state) &&
2019	    !test_bit(B_DIRTY, &b->state)) {
2020		dm_bufio_lock(c);
2021
2022		/* cache remove can fail if there are other holders */
2023		if (cache_remove(&c->cache, b)) {
 
 
 
 
 
 
 
 
 
 
 
 
2024			__free_buffer_wake(b);
2025			dm_bufio_unlock(c);
2026			return;
2027		}
2028
2029		dm_bufio_unlock(c);
2030	}
2031
2032	cache_put_and_wake(c, b);
2033}
2034EXPORT_SYMBOL_GPL(dm_bufio_release);
2035
2036void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2037					unsigned int start, unsigned int end)
2038{
2039	struct dm_bufio_client *c = b->c;
2040
2041	BUG_ON(start >= end);
2042	BUG_ON(end > b->c->block_size);
2043
2044	dm_bufio_lock(c);
2045
2046	BUG_ON(test_bit(B_READING, &b->state));
2047
2048	if (!test_and_set_bit(B_DIRTY, &b->state)) {
2049		b->dirty_start = start;
2050		b->dirty_end = end;
2051		cache_mark(&c->cache, b, LIST_DIRTY);
2052	} else {
2053		if (start < b->dirty_start)
2054			b->dirty_start = start;
2055		if (end > b->dirty_end)
2056			b->dirty_end = end;
2057	}
2058
2059	dm_bufio_unlock(c);
2060}
2061EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2062
2063void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2064{
2065	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2066}
2067EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2068
2069void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2070{
2071	LIST_HEAD(write_list);
2072
2073	if (WARN_ON_ONCE(dm_bufio_in_request()))
2074		return; /* should never happen */
2075
2076	dm_bufio_lock(c);
2077	__write_dirty_buffers_async(c, 0, &write_list);
2078	dm_bufio_unlock(c);
2079	__flush_write_list(&write_list);
2080}
2081EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2082
2083/*
2084 * For performance, it is essential that the buffers are written asynchronously
2085 * and simultaneously (so that the block layer can merge the writes) and then
2086 * waited upon.
2087 *
2088 * Finally, we flush hardware disk cache.
2089 */
2090static bool is_writing(struct lru_entry *e, void *context)
2091{
2092	struct dm_buffer *b = le_to_buffer(e);
2093
2094	return test_bit(B_WRITING, &b->state);
2095}
2096
2097int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2098{
2099	int a, f;
2100	unsigned long nr_buffers;
2101	struct lru_entry *e;
2102	struct lru_iter it;
2103
2104	LIST_HEAD(write_list);
2105
2106	dm_bufio_lock(c);
2107	__write_dirty_buffers_async(c, 0, &write_list);
2108	dm_bufio_unlock(c);
2109	__flush_write_list(&write_list);
2110	dm_bufio_lock(c);
 
2111
2112	nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2113	lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2114	while ((e = lru_iter_next(&it, is_writing, c))) {
2115		struct dm_buffer *b = le_to_buffer(e);
2116		__cache_inc_buffer(b);
2117
2118		BUG_ON(test_bit(B_READING, &b->state));
 
2119
2120		if (nr_buffers) {
2121			nr_buffers--;
2122			dm_bufio_unlock(c);
2123			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2124			dm_bufio_lock(c);
2125		} else {
2126			wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2127		}
2128
2129		if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2130			cache_mark(&c->cache, b, LIST_CLEAN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2131
2132		cache_put_and_wake(c, b);
2133
2134		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135	}
2136	lru_iter_end(&it);
2137
2138	wake_up(&c->free_buffer_wait);
2139	dm_bufio_unlock(c);
2140
2141	a = xchg(&c->async_write_error, 0);
2142	f = dm_bufio_issue_flush(c);
2143	if (a)
2144		return a;
2145
2146	return f;
2147}
2148EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2149
2150/*
2151 * Use dm-io to send an empty barrier to flush the device.
2152 */
2153int dm_bufio_issue_flush(struct dm_bufio_client *c)
2154{
2155	struct dm_io_request io_req = {
2156		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2157		.mem.type = DM_IO_KMEM,
2158		.mem.ptr.addr = NULL,
2159		.client = c->dm_io,
2160	};
2161	struct dm_io_region io_reg = {
2162		.bdev = c->bdev,
2163		.sector = 0,
2164		.count = 0,
2165	};
2166
2167	if (WARN_ON_ONCE(dm_bufio_in_request()))
2168		return -EINVAL;
2169
2170	return dm_io(&io_req, 1, &io_reg, NULL);
2171}
2172EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2173
2174/*
2175 * Use dm-io to send a discard request to flush the device.
 
 
 
 
 
 
 
 
 
2176 */
2177int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2178{
2179	struct dm_io_request io_req = {
2180		.bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2181		.mem.type = DM_IO_KMEM,
2182		.mem.ptr.addr = NULL,
2183		.client = c->dm_io,
2184	};
2185	struct dm_io_region io_reg = {
2186		.bdev = c->bdev,
2187		.sector = block_to_sector(c, block),
2188		.count = block_to_sector(c, count),
2189	};
2190
2191	if (WARN_ON_ONCE(dm_bufio_in_request()))
2192		return -EINVAL; /* discards are optional */
2193
2194	return dm_io(&io_req, 1, &io_reg, NULL);
2195}
2196EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2197
2198static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2199{
2200	struct dm_buffer *b;
2201
2202	b = cache_get(&c->cache, block);
2203	if (b) {
2204		if (likely(!smp_load_acquire(&b->state))) {
2205			if (cache_remove(&c->cache, b))
2206				__free_buffer_wake(b);
2207			else
2208				cache_put_and_wake(c, b);
2209		} else {
2210			cache_put_and_wake(c, b);
2211		}
2212	}
2213
2214	return b ? true : false;
2215}
 
 
 
 
 
 
2216
2217/*
2218 * Free the given buffer.
2219 *
2220 * This is just a hint, if the buffer is in use or dirty, this function
2221 * does nothing.
2222 */
2223void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2224{
2225	dm_bufio_lock(c);
2226	forget_buffer(c, block);
2227	dm_bufio_unlock(c);
2228}
2229EXPORT_SYMBOL_GPL(dm_bufio_forget);
2230
2231static enum evict_result idle(struct dm_buffer *b, void *context)
2232{
2233	return b->state ? ER_DONT_EVICT : ER_EVICT;
2234}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2235
2236void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2237{
2238	dm_bufio_lock(c);
2239	cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2240	dm_bufio_unlock(c);
 
2241}
2242EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2243
2244void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2245{
2246	c->minimum_buffers = n;
2247}
2248EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2249
2250unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2251{
2252	return c->block_size;
2253}
2254EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2255
2256sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2257{
2258	sector_t s = bdev_nr_sectors(c->bdev);
2259
2260	if (s >= c->start)
2261		s -= c->start;
2262	else
2263		s = 0;
2264	if (likely(c->sectors_per_block_bits >= 0))
2265		s >>= c->sectors_per_block_bits;
2266	else
2267		sector_div(s, c->block_size >> SECTOR_SHIFT);
2268	return s;
2269}
2270EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2271
2272struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2273{
2274	return c->dm_io;
2275}
2276EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2277
2278sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2279{
2280	return b->block;
2281}
2282EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2283
2284void *dm_bufio_get_block_data(struct dm_buffer *b)
2285{
2286	return b->data;
2287}
2288EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2289
2290void *dm_bufio_get_aux_data(struct dm_buffer *b)
2291{
2292	return b + 1;
2293}
2294EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2295
2296struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2297{
2298	return b->c;
2299}
2300EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2301
2302static enum it_action warn_leak(struct dm_buffer *b, void *context)
2303{
2304	bool *warned = context;
2305
2306	WARN_ON(!(*warned));
2307	*warned = true;
2308	DMERR("leaked buffer %llx, hold count %u, list %d",
2309	      (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2310#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2311	stack_trace_print(b->stack_entries, b->stack_len, 1);
2312	/* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2313	atomic_set(&b->hold_count, 0);
2314#endif
2315	return IT_NEXT;
2316}
2317
2318static void drop_buffers(struct dm_bufio_client *c)
2319{
2320	int i;
2321	struct dm_buffer *b;
 
2322
2323	if (WARN_ON(dm_bufio_in_request()))
2324		return; /* should never happen */
2325
2326	/*
2327	 * An optimization so that the buffers are not written one-by-one.
2328	 */
2329	dm_bufio_write_dirty_buffers_async(c);
2330
2331	dm_bufio_lock(c);
2332
2333	while ((b = __get_unclaimed_buffer(c)))
2334		__free_buffer_wake(b);
2335
2336	for (i = 0; i < LIST_SIZE; i++) {
2337		bool warned = false;
2338
2339		cache_iterate(&c->cache, i, warn_leak, &warned);
2340	}
2341
2342#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2343	while ((b = __get_unclaimed_buffer(c)))
2344		__free_buffer_wake(b);
2345#endif
2346
2347	for (i = 0; i < LIST_SIZE; i++)
2348		WARN_ON(cache_count(&c->cache, i));
2349
2350	dm_bufio_unlock(c);
2351}
2352
2353static unsigned long get_retain_buffers(struct dm_bufio_client *c)
 
 
 
 
 
 
 
2354{
2355	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
 
2356
2357	if (likely(c->sectors_per_block_bits >= 0))
2358		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2359	else
2360		retain_bytes /= c->block_size;
 
 
 
 
 
2361
2362	return retain_bytes;
 
 
 
 
2363}
2364
2365static void __scan(struct dm_bufio_client *c)
 
2366{
2367	int l;
2368	struct dm_buffer *b;
2369	unsigned long freed = 0;
2370	unsigned long retain_target = get_retain_buffers(c);
2371	unsigned long count = cache_total(&c->cache);
2372
2373	for (l = 0; l < LIST_SIZE; l++) {
2374		while (true) {
2375			if (count - freed <= retain_target)
2376				atomic_long_set(&c->need_shrink, 0);
2377			if (!atomic_long_read(&c->need_shrink))
2378				break;
2379
2380			b = cache_evict(&c->cache, l,
2381					l == LIST_CLEAN ? is_clean : is_dirty, c);
2382			if (!b)
2383				break;
2384
2385			__make_buffer_clean(b);
2386			__free_buffer_wake(b);
2387
2388			atomic_long_dec(&c->need_shrink);
2389			freed++;
2390			cond_resched();
2391		}
2392	}
2393}
2394
2395static void shrink_work(struct work_struct *w)
2396{
2397	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2398
2399	dm_bufio_lock(c);
2400	__scan(c);
2401	dm_bufio_unlock(c);
2402}
2403
2404static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2405{
2406	struct dm_bufio_client *c;
2407
2408	c = shrink->private_data;
2409	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2410	queue_work(dm_bufio_wq, &c->shrink_work);
2411
2412	return sc->nr_to_scan;
2413}
2414
2415static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2416{
2417	struct dm_bufio_client *c = shrink->private_data;
2418	unsigned long count = cache_total(&c->cache);
2419	unsigned long retain_target = get_retain_buffers(c);
2420	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2421
2422	if (unlikely(count < retain_target))
2423		count = 0;
2424	else
2425		count -= retain_target;
2426
2427	if (unlikely(count < queued_for_cleanup))
2428		count = 0;
2429	else
2430		count -= queued_for_cleanup;
2431
2432	return count;
2433}
2434
2435/*
2436 * Create the buffering interface
2437 */
2438struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2439					       unsigned int reserved_buffers, unsigned int aux_size,
2440					       void (*alloc_callback)(struct dm_buffer *),
2441					       void (*write_callback)(struct dm_buffer *),
2442					       unsigned int flags)
2443{
2444	int r;
2445	unsigned int num_locks;
2446	struct dm_bufio_client *c;
2447	char slab_name[27];
2448
2449	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2450		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2451		r = -EINVAL;
2452		goto bad_client;
2453	}
2454
2455	num_locks = dm_num_hash_locks();
2456	c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2457	if (!c) {
2458		r = -ENOMEM;
2459		goto bad_client;
2460	}
2461	cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
 
 
 
 
2462
2463	c->bdev = bdev;
2464	c->block_size = block_size;
2465	if (is_power_of_2(block_size))
2466		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2467	else
2468		c->sectors_per_block_bits = -1;
 
2469
 
2470	c->alloc_callback = alloc_callback;
2471	c->write_callback = write_callback;
2472
2473	if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2474		c->no_sleep = true;
2475		static_branch_inc(&no_sleep_enabled);
2476	}
2477
 
 
 
2478	mutex_init(&c->lock);
2479	spin_lock_init(&c->spinlock);
2480	INIT_LIST_HEAD(&c->reserved_buffers);
2481	c->need_reserved_buffers = reserved_buffers;
2482
2483	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2484
2485	init_waitqueue_head(&c->free_buffer_wait);
2486	c->async_write_error = 0;
2487
2488	c->dm_io = dm_io_client_create();
2489	if (IS_ERR(c->dm_io)) {
2490		r = PTR_ERR(c->dm_io);
2491		goto bad_dm_io;
2492	}
2493
2494	if (block_size <= KMALLOC_MAX_SIZE &&
2495	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
2496		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2497
2498		snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
2499		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2500						  SLAB_RECLAIM_ACCOUNT, NULL);
2501		if (!c->slab_cache) {
2502			r = -ENOMEM;
2503			goto bad;
 
 
 
 
 
 
 
 
 
 
2504		}
2505	}
2506	if (aux_size)
2507		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
2508	else
2509		snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
2510	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2511					   0, SLAB_RECLAIM_ACCOUNT, NULL);
2512	if (!c->slab_buffer) {
2513		r = -ENOMEM;
2514		goto bad;
2515	}
2516
2517	while (c->need_reserved_buffers) {
2518		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2519
2520		if (!b) {
2521			r = -ENOMEM;
2522			goto bad;
2523		}
2524		__free_buffer_wake(b);
2525	}
2526
2527	INIT_WORK(&c->shrink_work, shrink_work);
2528	atomic_long_set(&c->need_shrink, 0);
2529
2530	c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2531				     MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2532	if (!c->shrinker) {
2533		r = -ENOMEM;
2534		goto bad;
2535	}
2536
2537	c->shrinker->count_objects = dm_bufio_shrink_count;
2538	c->shrinker->scan_objects = dm_bufio_shrink_scan;
2539	c->shrinker->seeks = 1;
2540	c->shrinker->batch = 0;
2541	c->shrinker->private_data = c;
2542
2543	shrinker_register(c->shrinker);
2544
2545	mutex_lock(&dm_bufio_clients_lock);
2546	dm_bufio_client_count++;
2547	list_add(&c->client_list, &dm_bufio_all_clients);
2548	__cache_size_refresh();
2549	mutex_unlock(&dm_bufio_clients_lock);
2550
 
 
 
 
 
2551	return c;
2552
2553bad:
 
2554	while (!list_empty(&c->reserved_buffers)) {
2555		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2556
2557		list_del(&b->lru.list);
2558		free_buffer(b);
2559	}
2560	kmem_cache_destroy(c->slab_cache);
2561	kmem_cache_destroy(c->slab_buffer);
2562	dm_io_client_destroy(c->dm_io);
2563bad_dm_io:
2564	mutex_destroy(&c->lock);
2565	if (c->no_sleep)
2566		static_branch_dec(&no_sleep_enabled);
2567	kfree(c);
2568bad_client:
2569	return ERR_PTR(r);
2570}
2571EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2572
2573/*
2574 * Free the buffering interface.
2575 * It is required that there are no references on any buffers.
2576 */
2577void dm_bufio_client_destroy(struct dm_bufio_client *c)
2578{
2579	unsigned int i;
2580
2581	drop_buffers(c);
2582
2583	shrinker_free(c->shrinker);
2584	flush_work(&c->shrink_work);
2585
2586	mutex_lock(&dm_bufio_clients_lock);
2587
2588	list_del(&c->client_list);
2589	dm_bufio_client_count--;
2590	__cache_size_refresh();
2591
2592	mutex_unlock(&dm_bufio_clients_lock);
2593
2594	WARN_ON(c->need_reserved_buffers);
 
2595
2596	while (!list_empty(&c->reserved_buffers)) {
2597		struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2598
2599		list_del(&b->lru.list);
 
 
 
2600		free_buffer(b);
2601	}
2602
2603	for (i = 0; i < LIST_SIZE; i++)
2604		if (cache_count(&c->cache, i))
2605			DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2606
2607	for (i = 0; i < LIST_SIZE; i++)
2608		WARN_ON(cache_count(&c->cache, i));
2609
2610	cache_destroy(&c->cache);
2611	kmem_cache_destroy(c->slab_cache);
2612	kmem_cache_destroy(c->slab_buffer);
2613	dm_io_client_destroy(c->dm_io);
2614	mutex_destroy(&c->lock);
2615	if (c->no_sleep)
2616		static_branch_dec(&no_sleep_enabled);
2617	kfree(c);
2618}
2619EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2620
2621void dm_bufio_client_reset(struct dm_bufio_client *c)
2622{
2623	drop_buffers(c);
2624	flush_work(&c->shrink_work);
2625}
2626EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2627
2628void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2629{
2630	c->start = start;
2631}
2632EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2633
2634/*--------------------------------------------------------------*/
2635
2636static unsigned int get_max_age_hz(void)
2637{
2638	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2639
2640	if (max_age > UINT_MAX / HZ)
2641		max_age = UINT_MAX / HZ;
2642
2643	return max_age * HZ;
2644}
2645
2646static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2647{
2648	return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2649}
2650
2651struct evict_params {
2652	gfp_t gfp;
2653	unsigned long age_hz;
2654
2655	/*
2656	 * This gets updated with the largest last_accessed (ie. most
2657	 * recently used) of the evicted buffers.  It will not be reinitialised
2658	 * by __evict_many(), so you can use it across multiple invocations.
2659	 */
2660	unsigned long last_accessed;
2661};
2662
2663/*
2664 * We may not be able to evict this buffer if IO pending or the client
2665 * is still using it.
2666 *
2667 * And if GFP_NOFS is used, we must not do any I/O because we hold
2668 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2669 * rerouted to different bufio client.
2670 */
2671static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2672{
2673	struct evict_params *params = context;
2674
2675	if (!(params->gfp & __GFP_FS) ||
2676	    (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2677		if (test_bit_acquire(B_READING, &b->state) ||
2678		    test_bit(B_WRITING, &b->state) ||
2679		    test_bit(B_DIRTY, &b->state))
2680			return ER_DONT_EVICT;
2681	}
2682
2683	return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2684}
2685
2686static unsigned long __evict_many(struct dm_bufio_client *c,
2687				  struct evict_params *params,
2688				  int list_mode, unsigned long max_count)
2689{
2690	unsigned long count;
2691	unsigned long last_accessed;
2692	struct dm_buffer *b;
2693
2694	for (count = 0; count < max_count; count++) {
2695		b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2696		if (!b)
2697			break;
2698
2699		last_accessed = READ_ONCE(b->last_accessed);
2700		if (time_after_eq(params->last_accessed, last_accessed))
2701			params->last_accessed = last_accessed;
2702
2703		__make_buffer_clean(b);
2704		__free_buffer_wake(b);
2705
2706		cond_resched();
2707	}
2708
2709	return count;
2710}
2711
2712static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2713{
2714	struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2715	unsigned long retain = get_retain_buffers(c);
2716	unsigned long count;
2717	LIST_HEAD(write_list);
2718
2719	dm_bufio_lock(c);
 
 
 
 
 
 
 
2720
2721	__check_watermark(c, &write_list);
2722	if (unlikely(!list_empty(&write_list))) {
2723		dm_bufio_unlock(c);
2724		__flush_write_list(&write_list);
2725		dm_bufio_lock(c);
2726	}
2727
2728	count = cache_total(&c->cache);
2729	if (count > retain)
2730		__evict_many(c, &params, LIST_CLEAN, count - retain);
2731
2732	dm_bufio_unlock(c);
2733}
2734
2735static void cleanup_old_buffers(void)
2736{
2737	unsigned long max_age_hz = get_max_age_hz();
2738	struct dm_bufio_client *c;
2739
2740	mutex_lock(&dm_bufio_clients_lock);
2741
2742	__cache_size_refresh();
2743
2744	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2745		evict_old_buffers(c, max_age_hz);
2746
2747	mutex_unlock(&dm_bufio_clients_lock);
2748}
2749
 
 
 
2750static void work_fn(struct work_struct *w)
2751{
2752	cleanup_old_buffers();
2753
2754	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2755			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2756}
2757
2758/*--------------------------------------------------------------*/
2759
2760/*
2761 * Global cleanup tries to evict the oldest buffers from across _all_
2762 * the clients.  It does this by repeatedly evicting a few buffers from
2763 * the client that holds the oldest buffer.  It's approximate, but hopefully
2764 * good enough.
2765 */
2766static struct dm_bufio_client *__pop_client(void)
2767{
2768	struct list_head *h;
2769
2770	if (list_empty(&dm_bufio_all_clients))
2771		return NULL;
2772
2773	h = dm_bufio_all_clients.next;
2774	list_del(h);
2775	return container_of(h, struct dm_bufio_client, client_list);
2776}
2777
2778/*
2779 * Inserts the client in the global client list based on its
2780 * 'oldest_buffer' field.
2781 */
2782static void __insert_client(struct dm_bufio_client *new_client)
2783{
2784	struct dm_bufio_client *c;
2785	struct list_head *h = dm_bufio_all_clients.next;
2786
2787	while (h != &dm_bufio_all_clients) {
2788		c = container_of(h, struct dm_bufio_client, client_list);
2789		if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2790			break;
2791		h = h->next;
2792	}
2793
2794	list_add_tail(&new_client->client_list, h);
2795}
2796
2797static unsigned long __evict_a_few(unsigned long nr_buffers)
2798{
2799	unsigned long count;
2800	struct dm_bufio_client *c;
2801	struct evict_params params = {
2802		.gfp = GFP_KERNEL,
2803		.age_hz = 0,
2804		/* set to jiffies in case there are no buffers in this client */
2805		.last_accessed = jiffies
2806	};
2807
2808	c = __pop_client();
2809	if (!c)
2810		return 0;
2811
2812	dm_bufio_lock(c);
2813	count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
2814	dm_bufio_unlock(c);
2815
2816	if (count)
2817		c->oldest_buffer = params.last_accessed;
2818	__insert_client(c);
2819
2820	return count;
2821}
2822
2823static void check_watermarks(void)
2824{
2825	LIST_HEAD(write_list);
2826	struct dm_bufio_client *c;
2827
2828	mutex_lock(&dm_bufio_clients_lock);
2829	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2830		dm_bufio_lock(c);
2831		__check_watermark(c, &write_list);
2832		dm_bufio_unlock(c);
2833	}
2834	mutex_unlock(&dm_bufio_clients_lock);
2835
2836	__flush_write_list(&write_list);
2837}
2838
2839static void evict_old(void)
2840{
2841	unsigned long threshold = dm_bufio_cache_size -
2842		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2843
2844	mutex_lock(&dm_bufio_clients_lock);
2845	while (dm_bufio_current_allocated > threshold) {
2846		if (!__evict_a_few(64))
2847			break;
2848		cond_resched();
2849	}
2850	mutex_unlock(&dm_bufio_clients_lock);
2851}
2852
2853static void do_global_cleanup(struct work_struct *w)
2854{
2855	check_watermarks();
2856	evict_old();
2857}
2858
2859/*
2860 *--------------------------------------------------------------
2861 * Module setup
2862 *--------------------------------------------------------------
2863 */
2864
2865/*
2866 * This is called only once for the whole dm_bufio module.
2867 * It initializes memory limit.
2868 */
2869static int __init dm_bufio_init(void)
2870{
2871	__u64 mem;
2872
2873	dm_bufio_allocated_kmem_cache = 0;
2874	dm_bufio_allocated_get_free_pages = 0;
2875	dm_bufio_allocated_vmalloc = 0;
2876	dm_bufio_current_allocated = 0;
2877
2878	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2879			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2880
2881	if (mem > ULONG_MAX)
2882		mem = ULONG_MAX;
2883
2884#ifdef CONFIG_MMU
2885	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2886		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
 
 
 
 
2887#endif
2888
2889	dm_bufio_default_cache_size = mem;
2890
2891	mutex_lock(&dm_bufio_clients_lock);
2892	__cache_size_refresh();
2893	mutex_unlock(&dm_bufio_clients_lock);
2894
2895	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2896	if (!dm_bufio_wq)
2897		return -ENOMEM;
2898
2899	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2900	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2901	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2902			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2903
2904	return 0;
2905}
2906
2907/*
2908 * This is called once when unloading the dm_bufio module.
2909 */
2910static void __exit dm_bufio_exit(void)
2911{
2912	int bug = 0;
 
2913
2914	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2915	destroy_workqueue(dm_bufio_wq);
2916
 
 
 
 
 
 
 
 
 
 
2917	if (dm_bufio_client_count) {
2918		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2919			__func__, dm_bufio_client_count);
2920		bug = 1;
2921	}
2922
2923	if (dm_bufio_current_allocated) {
2924		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2925			__func__, dm_bufio_current_allocated);
2926		bug = 1;
2927	}
2928
2929	if (dm_bufio_allocated_get_free_pages) {
2930		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2931		       __func__, dm_bufio_allocated_get_free_pages);
2932		bug = 1;
2933	}
2934
2935	if (dm_bufio_allocated_vmalloc) {
2936		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2937		       __func__, dm_bufio_allocated_vmalloc);
2938		bug = 1;
2939	}
2940
2941	WARN_ON(bug); /* leaks are not worth crashing the system */
 
2942}
2943
2944module_init(dm_bufio_init)
2945module_exit(dm_bufio_exit)
2946
2947module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2948MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2949
2950module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2951MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2952
2953module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2954MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2955
2956module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2957MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2958
2959module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2960MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2961
2962module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2963MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2964
2965module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2966MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2967
2968module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2969MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2970
2971MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2972MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2973MODULE_LICENSE("GPL");
v3.5.6
 
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bufio.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
 
 
  14#include <linux/vmalloc.h>
  15#include <linux/shrinker.h>
  16#include <linux/module.h>
 
 
 
 
 
  17
  18#define DM_MSG_PREFIX "bufio"
  19
  20/*
  21 * Memory management policy:
  22 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  23 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  24 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  25 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  26 *	dirty buffers.
  27 */
  28#define DM_BUFIO_MIN_BUFFERS		8
  29
  30#define DM_BUFIO_MEMORY_PERCENT		2
  31#define DM_BUFIO_VMALLOC_PERCENT	25
  32#define DM_BUFIO_WRITEBACK_PERCENT	75
 
  33
  34/*
  35 * Check buffer ages in this interval (seconds)
  36 */
  37#define DM_BUFIO_WORK_TIMER_SECS	10
  38
  39/*
  40 * Free buffers when they are older than this (seconds)
  41 */
  42#define DM_BUFIO_DEFAULT_AGE_SECS	60
  43
  44/*
  45 * The number of bvec entries that are embedded directly in the buffer.
  46 * If the chunk size is larger, dm-io is used to do the io.
  47 */
  48#define DM_BUFIO_INLINE_VECS		16
  49
  50/*
  51 * Buffer hash
  52 */
  53#define DM_BUFIO_HASH_BITS	20
  54#define DM_BUFIO_HASH(block) \
  55	((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
  56	 ((1 << DM_BUFIO_HASH_BITS) - 1))
  57
  58/*
  59 * Don't try to use kmem_cache_alloc for blocks larger than this.
  60 * For explanation, see alloc_buffer_data below.
  61 */
  62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
  63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
  64
  65/*
  66 * dm_buffer->list_mode
  67 */
  68#define LIST_CLEAN	0
  69#define LIST_DIRTY	1
  70#define LIST_SIZE	2
  71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72/*
  73 * Linking of buffers:
  74 *	All buffers are linked to cache_hash with their hash_list field.
  75 *
  76 *	Clean buffers that are not being written (B_WRITING not set)
  77 *	are linked to lru[LIST_CLEAN] with their lru_list field.
  78 *
  79 *	Dirty and clean buffers that are being written are linked to
  80 *	lru[LIST_DIRTY] with their lru_list field. When the write
  81 *	finishes, the buffer cannot be relinked immediately (because we
  82 *	are in an interrupt context and relinking requires process
  83 *	context), so some clean-not-writing buffers can be held on
  84 *	dirty_lru too.  They are later added to lru in the process
  85 *	context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86 */
  87struct dm_bufio_client {
  88	struct mutex lock;
 
  89
  90	struct list_head lru[LIST_SIZE];
  91	unsigned long n_buffers[LIST_SIZE];
 
 
 
 
 
  92
  93	struct block_device *bdev;
  94	unsigned block_size;
  95	unsigned char sectors_per_block_bits;
  96	unsigned char pages_per_block_bits;
  97	unsigned char blocks_per_page_bits;
  98	unsigned aux_size;
  99	void (*alloc_callback)(struct dm_buffer *);
 100	void (*write_callback)(struct dm_buffer *);
 101
 102	struct dm_io_client *dm_io;
 103
 104	struct list_head reserved_buffers;
 105	unsigned need_reserved_buffers;
 
 
 
 
 
 
 
 
 
 
 
 
 
 106
 107	struct hlist_head *cache_hash;
 108	wait_queue_head_t free_buffer_wait;
 
 
 
 
 
 109
 110	int async_write_error;
 111
 112	struct list_head client_list;
 113	struct shrinker shrinker;
 
 
 
 
 
 
 114};
 115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116/*
 117 * Buffer state bits.
 118 */
 119#define B_READING	0
 120#define B_WRITING	1
 121#define B_DIRTY		2
 122
 123/*
 124 * Describes how the block was allocated:
 125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 126 * See the comment at alloc_buffer_data.
 127 */
 128enum data_mode {
 129	DATA_MODE_SLAB = 0,
 130	DATA_MODE_GET_FREE_PAGES = 1,
 131	DATA_MODE_VMALLOC = 2,
 132	DATA_MODE_LIMIT = 3
 133};
 134
 135struct dm_buffer {
 136	struct hlist_node hash_list;
 137	struct list_head lru_list;
 
 
 138	sector_t block;
 139	void *data;
 140	enum data_mode data_mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141	unsigned char list_mode;		/* LIST_* */
 142	unsigned hold_count;
 143	int read_error;
 144	int write_error;
 145	unsigned long state;
 146	unsigned long last_accessed;
 
 
 147	struct dm_bufio_client *c;
 148	struct bio bio;
 149	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150};
 151
 152/*----------------------------------------------------------------*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 154static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
 155static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
 156
 157static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
 
 158{
 159	unsigned ret = c->blocks_per_page_bits - 1;
 
 
 
 
 
 
 
 
 
 
 160
 161	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
 
 
 162
 163	return ret;
 
 
 
 
 
 
 164}
 165
 166#define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
 167#define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
 
 
 168
 169#define dm_bufio_in_request()	(!!current->bio_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170
 171static void dm_bufio_lock(struct dm_bufio_client *c)
 172{
 173	mutex_lock_nested(&c->lock, dm_bufio_in_request());
 
 
 
 
 
 
 
 
 
 
 
 
 174}
 175
 176static int dm_bufio_trylock(struct dm_bufio_client *c)
 
 
 
 
 
 
 
 
 177{
 178	return mutex_trylock(&c->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 179}
 180
 181static void dm_bufio_unlock(struct dm_bufio_client *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182{
 183	mutex_unlock(&c->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 184}
 185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 186/*
 187 * FIXME Move to sched.h?
 
 
 
 
 
 
 
 
 
 
 
 
 188 */
 189#ifdef CONFIG_PREEMPT_VOLUNTARY
 190#  define dm_bufio_cond_resched()		\
 191do {						\
 192	if (unlikely(need_resched()))		\
 193		_cond_resched();		\
 194} while (0)
 195#else
 196#  define dm_bufio_cond_resched()                do { } while (0)
 197#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198
 199/*----------------------------------------------------------------*/
 200
 201/*
 202 * Default cache size: available memory divided by the ratio.
 203 */
 204static unsigned long dm_bufio_default_cache_size;
 205
 206/*
 207 * Total cache size set by the user.
 208 */
 209static unsigned long dm_bufio_cache_size;
 210
 211/*
 212 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 213 * at any time.  If it disagrees, the user has changed cache size.
 214 */
 215static unsigned long dm_bufio_cache_size_latch;
 216
 217static DEFINE_SPINLOCK(param_spinlock);
 218
 219/*
 220 * Buffers are freed after this timeout
 221 */
 222static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 
 223
 224static unsigned long dm_bufio_peak_allocated;
 225static unsigned long dm_bufio_allocated_kmem_cache;
 226static unsigned long dm_bufio_allocated_get_free_pages;
 227static unsigned long dm_bufio_allocated_vmalloc;
 228static unsigned long dm_bufio_current_allocated;
 229
 230/*----------------------------------------------------------------*/
 231
 232/*
 233 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
 234 */
 235static unsigned long dm_bufio_cache_size_per_client;
 236
 237/*
 238 * The current number of clients.
 239 */
 240static int dm_bufio_client_count;
 241
 242/*
 243 * The list of all clients.
 244 */
 245static LIST_HEAD(dm_bufio_all_clients);
 246
 247/*
 248 * This mutex protects dm_bufio_cache_size_latch,
 249 * dm_bufio_cache_size_per_client and dm_bufio_client_count
 250 */
 251static DEFINE_MUTEX(dm_bufio_clients_lock);
 252
 
 
 
 
 
 
 
 
 
 
 
 
 253/*----------------------------------------------------------------*/
 254
 255static void adjust_total_allocated(enum data_mode data_mode, long diff)
 256{
 
 
 
 257	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 258		&dm_bufio_allocated_kmem_cache,
 259		&dm_bufio_allocated_get_free_pages,
 260		&dm_bufio_allocated_vmalloc,
 261	};
 262
 263	spin_lock(&param_spinlock);
 
 
 
 
 
 264
 265	*class_ptr[data_mode] += diff;
 266
 267	dm_bufio_current_allocated += diff;
 268
 269	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 270		dm_bufio_peak_allocated = dm_bufio_current_allocated;
 271
 272	spin_unlock(&param_spinlock);
 
 
 
 
 
 273}
 274
 275/*
 276 * Change the number of clients and recalculate per-client limit.
 277 */
 278static void __cache_size_refresh(void)
 279{
 280	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 281	BUG_ON(dm_bufio_client_count < 0);
 
 
 282
 283	dm_bufio_cache_size_latch = dm_bufio_cache_size;
 284
 285	barrier();
 286
 287	/*
 288	 * Use default if set to 0 and report the actual cache size used.
 289	 */
 290	if (!dm_bufio_cache_size_latch) {
 291		(void)cmpxchg(&dm_bufio_cache_size, 0,
 292			      dm_bufio_default_cache_size);
 293		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 294	}
 295
 296	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
 297					 (dm_bufio_client_count ? : 1);
 298}
 299
 300/*
 301 * Allocating buffer data.
 302 *
 303 * Small buffers are allocated with kmem_cache, to use space optimally.
 304 *
 305 * For large buffers, we choose between get_free_pages and vmalloc.
 306 * Each has advantages and disadvantages.
 307 *
 308 * __get_free_pages can randomly fail if the memory is fragmented.
 309 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 310 * as low as 128M) so using it for caching is not appropriate.
 311 *
 312 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 313 * won't have a fatal effect here, but it just causes flushes of some other
 314 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 315 * always fails (i.e. order >= MAX_ORDER).
 316 *
 317 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 318 * initial reserve allocation, so there's no risk of wasting all vmalloc
 319 * space.
 320 */
 321static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 322			       enum data_mode *data_mode)
 323{
 324	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
 325		*data_mode = DATA_MODE_SLAB;
 326		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
 327	}
 328
 329	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
 330	    gfp_mask & __GFP_NORETRY) {
 331		*data_mode = DATA_MODE_GET_FREE_PAGES;
 332		return (void *)__get_free_pages(gfp_mask,
 333						c->pages_per_block_bits);
 334	}
 335
 336	*data_mode = DATA_MODE_VMALLOC;
 337	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 
 338}
 339
 340/*
 341 * Free buffer's data.
 342 */
 343static void free_buffer_data(struct dm_bufio_client *c,
 344			     void *data, enum data_mode data_mode)
 345{
 346	switch (data_mode) {
 347	case DATA_MODE_SLAB:
 348		kmem_cache_free(DM_BUFIO_CACHE(c), data);
 349		break;
 350
 351	case DATA_MODE_GET_FREE_PAGES:
 352		free_pages((unsigned long)data, c->pages_per_block_bits);
 
 353		break;
 354
 355	case DATA_MODE_VMALLOC:
 356		vfree(data);
 357		break;
 358
 359	default:
 360		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 361		       data_mode);
 362		BUG();
 363	}
 364}
 365
 366/*
 367 * Allocate buffer and its data.
 368 */
 369static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 370{
 371	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
 372				      gfp_mask);
 373
 374	if (!b)
 375		return NULL;
 376
 377	b->c = c;
 378
 379	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 380	if (!b->data) {
 381		kfree(b);
 382		return NULL;
 383	}
 
 384
 385	adjust_total_allocated(b->data_mode, (long)c->block_size);
 386
 
 387	return b;
 388}
 389
 390/*
 391 * Free buffer and its data.
 392 */
 393static void free_buffer(struct dm_buffer *b)
 394{
 395	struct dm_bufio_client *c = b->c;
 396
 397	adjust_total_allocated(b->data_mode, -(long)c->block_size);
 398
 399	free_buffer_data(c, b->data, b->data_mode);
 400	kfree(b);
 401}
 402
 403/*
 404 * Link buffer to the hash list and clean or dirty queue.
 405 */
 406static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 407{
 408	struct dm_bufio_client *c = b->c;
 409
 410	c->n_buffers[dirty]++;
 411	b->block = block;
 412	b->list_mode = dirty;
 413	list_add(&b->lru_list, &c->lru[dirty]);
 414	hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
 415	b->last_accessed = jiffies;
 416}
 417
 418/*
 419 * Unlink buffer from the hash list and dirty or clean queue.
 420 */
 421static void __unlink_buffer(struct dm_buffer *b)
 422{
 423	struct dm_bufio_client *c = b->c;
 424
 425	BUG_ON(!c->n_buffers[b->list_mode]);
 426
 427	c->n_buffers[b->list_mode]--;
 428	hlist_del(&b->hash_list);
 429	list_del(&b->lru_list);
 430}
 431
 432/*
 433 * Place the buffer to the head of dirty or clean LRU queue.
 434 */
 435static void __relink_lru(struct dm_buffer *b, int dirty)
 436{
 437	struct dm_bufio_client *c = b->c;
 438
 439	BUG_ON(!c->n_buffers[b->list_mode]);
 440
 441	c->n_buffers[b->list_mode]--;
 442	c->n_buffers[dirty]++;
 443	b->list_mode = dirty;
 444	list_del(&b->lru_list);
 445	list_add(&b->lru_list, &c->lru[dirty]);
 446}
 447
 448/*----------------------------------------------------------------
 449 * Submit I/O on the buffer.
 450 *
 451 * Bio interface is faster but it has some problems:
 452 *	the vector list is limited (increasing this limit increases
 453 *	memory-consumption per buffer, so it is not viable);
 454 *
 455 *	the memory must be direct-mapped, not vmalloced;
 456 *
 457 *	the I/O driver can reject requests spuriously if it thinks that
 458 *	the requests are too big for the device or if they cross a
 459 *	controller-defined memory boundary.
 460 *
 461 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 462 * it is not vmalloced, try using the bio interface.
 463 *
 464 * If the buffer is big, if it is vmalloced or if the underlying device
 465 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 466 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 467 * shortcomings.
 468 *--------------------------------------------------------------*/
 
 469
 470/*
 471 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 472 * that the request was handled directly with bio interface.
 473 */
 474static void dmio_complete(unsigned long error, void *context)
 475{
 476	struct dm_buffer *b = context;
 477
 478	b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
 479}
 480
 481static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
 482		     bio_end_io_t *end_io)
 483{
 484	int r;
 485	struct dm_io_request io_req = {
 486		.bi_rw = rw,
 487		.notify.fn = dmio_complete,
 488		.notify.context = b,
 489		.client = b->c->dm_io,
 490	};
 491	struct dm_io_region region = {
 492		.bdev = b->c->bdev,
 493		.sector = block << b->c->sectors_per_block_bits,
 494		.count = b->c->block_size >> SECTOR_SHIFT,
 495	};
 496
 497	if (b->data_mode != DATA_MODE_VMALLOC) {
 498		io_req.mem.type = DM_IO_KMEM;
 499		io_req.mem.ptr.addr = b->data;
 500	} else {
 501		io_req.mem.type = DM_IO_VMA;
 502		io_req.mem.ptr.vma = b->data;
 503	}
 504
 505	b->bio.bi_end_io = end_io;
 
 
 
 
 
 
 
 
 506
 507	r = dm_io(&io_req, 1, &region, NULL);
 508	if (r)
 509		end_io(&b->bio, r);
 510}
 511
 512static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
 513			   bio_end_io_t *end_io)
 514{
 
 515	char *ptr;
 516	int len;
 517
 518	bio_init(&b->bio);
 519	b->bio.bi_io_vec = b->bio_vec;
 520	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
 521	b->bio.bi_sector = block << b->c->sectors_per_block_bits;
 522	b->bio.bi_bdev = b->c->bdev;
 523	b->bio.bi_end_io = end_io;
 
 
 
 524
 525	/*
 526	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
 527	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
 528	 */
 529	ptr = b->data;
 530	len = b->c->block_size;
 531
 532	if (len >= PAGE_SIZE)
 533		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
 534	else
 535		BUG_ON((unsigned long)ptr & (len - 1));
 536
 537	do {
 538		if (!bio_add_page(&b->bio, virt_to_page(ptr),
 539				  len < PAGE_SIZE ? len : PAGE_SIZE,
 540				  virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
 541			BUG_ON(b->c->block_size <= PAGE_SIZE);
 542			use_dmio(b, rw, block, end_io);
 543			return;
 544		}
 545
 546		len -= PAGE_SIZE;
 547		ptr += PAGE_SIZE;
 548	} while (len > 0);
 
 
 549
 550	submit_bio(rw, &b->bio);
 551}
 552
 553static void submit_io(struct dm_buffer *b, int rw, sector_t block,
 554		      bio_end_io_t *end_io)
 555{
 556	if (rw == WRITE && b->c->write_callback)
 557		b->c->write_callback(b);
 
 
 
 558
 559	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
 560	    b->data_mode != DATA_MODE_VMALLOC)
 561		use_inline_bio(b, rw, block, end_io);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562	else
 563		use_dmio(b, rw, block, end_io);
 564}
 565
 566/*----------------------------------------------------------------
 
 567 * Writing dirty buffers
 568 *--------------------------------------------------------------*/
 
 569
 570/*
 571 * The endio routine for write.
 572 *
 573 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 574 * it.
 575 */
 576static void write_endio(struct bio *bio, int error)
 577{
 578	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
 
 579
 580	b->write_error = error;
 581	if (unlikely(error)) {
 582		struct dm_bufio_client *c = b->c;
 583		(void)cmpxchg(&c->async_write_error, 0, error);
 584	}
 585
 586	BUG_ON(!test_bit(B_WRITING, &b->state));
 587
 588	smp_mb__before_clear_bit();
 589	clear_bit(B_WRITING, &b->state);
 590	smp_mb__after_clear_bit();
 591
 592	wake_up_bit(&b->state, B_WRITING);
 593}
 594
 595/*
 596 * This function is called when wait_on_bit is actually waiting.
 597 */
 598static int do_io_schedule(void *word)
 599{
 600	io_schedule();
 601
 602	return 0;
 603}
 604
 605/*
 606 * Initiate a write on a dirty buffer, but don't wait for it.
 607 *
 608 * - If the buffer is not dirty, exit.
 609 * - If there some previous write going on, wait for it to finish (we can't
 610 *   have two writes on the same buffer simultaneously).
 611 * - Submit our write and don't wait on it. We set B_WRITING indicating
 612 *   that there is a write in progress.
 613 */
 614static void __write_dirty_buffer(struct dm_buffer *b)
 
 615{
 616	if (!test_bit(B_DIRTY, &b->state))
 617		return;
 618
 619	clear_bit(B_DIRTY, &b->state);
 620	wait_on_bit_lock(&b->state, B_WRITING,
 621			 do_io_schedule, TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 
 
 
 
 
 
 622
 623	submit_io(b, WRITE, b->block, write_endio);
 
 
 
 
 
 
 
 
 624}
 625
 626/*
 627 * Wait until any activity on the buffer finishes.  Possibly write the
 628 * buffer if it is dirty.  When this function finishes, there is no I/O
 629 * running on the buffer and the buffer is not dirty.
 630 */
 631static void __make_buffer_clean(struct dm_buffer *b)
 632{
 633	BUG_ON(b->hold_count);
 634
 635	if (!b->state)	/* fast case */
 
 636		return;
 637
 638	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 639	__write_dirty_buffer(b);
 640	wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641}
 642
 643/*
 644 * Find some buffer that is not held by anybody, clean it, unlink it and
 645 * return it.
 646 */
 647static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 648{
 649	struct dm_buffer *b;
 650
 651	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 652		BUG_ON(test_bit(B_WRITING, &b->state));
 653		BUG_ON(test_bit(B_DIRTY, &b->state));
 654
 655		if (!b->hold_count) {
 656			__make_buffer_clean(b);
 657			__unlink_buffer(b);
 658			return b;
 659		}
 660		dm_bufio_cond_resched();
 661	}
 662
 663	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 664		BUG_ON(test_bit(B_READING, &b->state));
 665
 666		if (!b->hold_count) {
 667			__make_buffer_clean(b);
 668			__unlink_buffer(b);
 669			return b;
 670		}
 671		dm_bufio_cond_resched();
 672	}
 673
 674	return NULL;
 675}
 676
 677/*
 678 * Wait until some other threads free some buffer or release hold count on
 679 * some buffer.
 680 *
 681 * This function is entered with c->lock held, drops it and regains it
 682 * before exiting.
 683 */
 684static void __wait_for_free_buffer(struct dm_bufio_client *c)
 685{
 686	DECLARE_WAITQUEUE(wait, current);
 687
 688	add_wait_queue(&c->free_buffer_wait, &wait);
 689	set_task_state(current, TASK_UNINTERRUPTIBLE);
 690	dm_bufio_unlock(c);
 691
 692	io_schedule();
 
 
 
 
 
 693
 694	set_task_state(current, TASK_RUNNING);
 695	remove_wait_queue(&c->free_buffer_wait, &wait);
 696
 697	dm_bufio_lock(c);
 698}
 699
 700enum new_flag {
 701	NF_FRESH = 0,
 702	NF_READ = 1,
 703	NF_GET = 2,
 704	NF_PREFETCH = 3
 705};
 706
 707/*
 708 * Allocate a new buffer. If the allocation is not possible, wait until
 709 * some other thread frees a buffer.
 710 *
 711 * May drop the lock and regain it.
 712 */
 713static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 714{
 715	struct dm_buffer *b;
 
 716
 717	/*
 718	 * dm-bufio is resistant to allocation failures (it just keeps
 719	 * one buffer reserved in cases all the allocations fail).
 720	 * So set flags to not try too hard:
 721	 *	GFP_NOIO: don't recurse into the I/O layer
 
 722	 *	__GFP_NORETRY: don't retry and rather return failure
 723	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 724	 *	__GFP_NOWARN: don't print a warning in case of failure
 725	 *
 726	 * For debugging, if we set the cache size to 1, no new buffers will
 727	 * be allocated.
 728	 */
 729	while (1) {
 730		if (dm_bufio_cache_size_latch != 1) {
 731			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 732			if (b)
 733				return b;
 734		}
 735
 736		if (nf == NF_PREFETCH)
 737			return NULL;
 738
 
 
 
 
 
 
 
 
 
 739		if (!list_empty(&c->reserved_buffers)) {
 740			b = list_entry(c->reserved_buffers.next,
 741				       struct dm_buffer, lru_list);
 742			list_del(&b->lru_list);
 743			c->need_reserved_buffers++;
 744
 745			return b;
 746		}
 747
 748		b = __get_unclaimed_buffer(c);
 749		if (b)
 750			return b;
 751
 752		__wait_for_free_buffer(c);
 753	}
 754}
 755
 756static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 757{
 758	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 759
 760	if (!b)
 761		return NULL;
 762
 763	if (c->alloc_callback)
 764		c->alloc_callback(b);
 765
 766	return b;
 767}
 768
 769/*
 770 * Free a buffer and wake other threads waiting for free buffers.
 771 */
 772static void __free_buffer_wake(struct dm_buffer *b)
 773{
 774	struct dm_bufio_client *c = b->c;
 775
 
 776	if (!c->need_reserved_buffers)
 777		free_buffer(b);
 778	else {
 779		list_add(&b->lru_list, &c->reserved_buffers);
 780		c->need_reserved_buffers--;
 781	}
 782
 783	wake_up(&c->free_buffer_wait);
 
 
 
 
 
 784}
 785
 786static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
 787{
 788	struct dm_buffer *b, *tmp;
 
 789
 790	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 791		BUG_ON(test_bit(B_READING, &b->state));
 
 
 
 792
 793		if (!test_bit(B_DIRTY, &b->state) &&
 794		    !test_bit(B_WRITING, &b->state)) {
 795			__relink_lru(b, LIST_CLEAN);
 796			continue;
 797		}
 798
 799		if (no_wait && test_bit(B_WRITING, &b->state))
 800			return;
 801
 802		__write_dirty_buffer(b);
 803		dm_bufio_cond_resched();
 804	}
 805}
 806
 807/*
 808 * Get writeback threshold and buffer limit for a given client.
 809 */
 810static void __get_memory_limit(struct dm_bufio_client *c,
 811			       unsigned long *threshold_buffers,
 812			       unsigned long *limit_buffers)
 813{
 814	unsigned long buffers;
 815
 816	if (dm_bufio_cache_size != dm_bufio_cache_size_latch) {
 817		mutex_lock(&dm_bufio_clients_lock);
 818		__cache_size_refresh();
 819		mutex_unlock(&dm_bufio_clients_lock);
 820	}
 821
 822	buffers = dm_bufio_cache_size_per_client >>
 823		  (c->sectors_per_block_bits + SECTOR_SHIFT);
 
 824
 825	if (buffers < DM_BUFIO_MIN_BUFFERS)
 826		buffers = DM_BUFIO_MIN_BUFFERS;
 
 
 827
 828	*limit_buffers = buffers;
 829	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
 830}
 831
 832/*
 833 * Check if we're over watermark.
 834 * If we are over threshold_buffers, start freeing buffers.
 835 * If we're over "limit_buffers", block until we get under the limit.
 836 */
 837static void __check_watermark(struct dm_bufio_client *c)
 
 838{
 839	unsigned long threshold_buffers, limit_buffers;
 840
 841	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
 842
 843	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
 844	       limit_buffers) {
 845
 846		struct dm_buffer *b = __get_unclaimed_buffer(c);
 847
 848		if (!b)
 849			return;
 850
 851		__free_buffer_wake(b);
 852		dm_bufio_cond_resched();
 853	}
 854
 855	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
 856		__write_dirty_buffers_async(c, 1);
 857}
 858
 859/*
 860 * Find a buffer in the hash.
 
 
 861 */
 862static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 
 863{
 864	struct dm_buffer *b;
 865	struct hlist_node *hn;
 866
 867	hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
 868			     hash_list) {
 869		dm_bufio_cond_resched();
 870		if (b->block == block)
 871			return b;
 872	}
 873
 874	return NULL;
 875}
 876
 877/*----------------------------------------------------------------
 878 * Getting a buffer
 879 *--------------------------------------------------------------*/
 880
 881static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 882				     enum new_flag nf, int *need_submit)
 
 883{
 884	struct dm_buffer *b, *new_b = NULL;
 885
 886	*need_submit = 0;
 887
 888	b = __find(c, block);
 889	if (b)
 890		goto found_buffer;
 891
 892	if (nf == NF_GET)
 893		return NULL;
 894
 895	new_b = __alloc_buffer_wait(c, nf);
 896	if (!new_b)
 897		return NULL;
 898
 899	/*
 900	 * We've had a period where the mutex was unlocked, so need to
 901	 * recheck the hash table.
 902	 */
 903	b = __find(c, block);
 904	if (b) {
 905		__free_buffer_wake(new_b);
 906		goto found_buffer;
 907	}
 908
 909	__check_watermark(c);
 910
 911	b = new_b;
 912	b->hold_count = 1;
 
 
 913	b->read_error = 0;
 914	b->write_error = 0;
 915	__link_buffer(b, block, LIST_CLEAN);
 916
 917	if (nf == NF_FRESH) {
 918		b->state = 0;
 919		return b;
 
 
 920	}
 921
 922	b->state = 1 << B_READING;
 923	*need_submit = 1;
 
 
 
 
 924
 925	return b;
 926
 927found_buffer:
 928	if (nf == NF_PREFETCH)
 
 929		return NULL;
 
 
 930	/*
 931	 * Note: it is essential that we don't wait for the buffer to be
 932	 * read if dm_bufio_get function is used. Both dm_bufio_get and
 933	 * dm_bufio_prefetch can be used in the driver request routine.
 934	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
 935	 * the same buffer, it would deadlock if we waited.
 936	 */
 937	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
 
 938		return NULL;
 
 939
 940	b->hold_count++;
 941	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
 942		     test_bit(B_WRITING, &b->state));
 943	return b;
 944}
 945
 946/*
 947 * The endio routine for reading: set the error, clear the bit and wake up
 948 * anyone waiting on the buffer.
 949 */
 950static void read_endio(struct bio *bio, int error)
 951{
 952	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 953
 954	b->read_error = error;
 955
 956	BUG_ON(!test_bit(B_READING, &b->state));
 957
 958	smp_mb__before_clear_bit();
 959	clear_bit(B_READING, &b->state);
 960	smp_mb__after_clear_bit();
 961
 962	wake_up_bit(&b->state, B_READING);
 963}
 964
 965/*
 966 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
 967 * functions is similar except that dm_bufio_new doesn't read the
 968 * buffer from the disk (assuming that the caller overwrites all the data
 969 * and uses dm_bufio_mark_buffer_dirty to write new data back).
 970 */
 971static void *new_read(struct dm_bufio_client *c, sector_t block,
 972		      enum new_flag nf, struct dm_buffer **bp)
 973{
 974	int need_submit;
 975	struct dm_buffer *b;
 976
 977	dm_bufio_lock(c);
 978	b = __bufio_new(c, block, nf, &need_submit);
 979	dm_bufio_unlock(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 980
 981	if (!b)
 982		return b;
 983
 984	if (need_submit)
 985		submit_io(b, READ, b->block, read_endio);
 986
 987	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 
 988
 989	if (b->read_error) {
 990		int error = b->read_error;
 991
 992		dm_bufio_release(b);
 993
 994		return ERR_PTR(error);
 995	}
 996
 997	*bp = b;
 998
 999	return b->data;
1000}
1001
1002void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1003		   struct dm_buffer **bp)
1004{
1005	return new_read(c, block, NF_GET, bp);
1006}
1007EXPORT_SYMBOL_GPL(dm_bufio_get);
1008
1009void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1010		    struct dm_buffer **bp)
1011{
1012	BUG_ON(dm_bufio_in_request());
 
1013
1014	return new_read(c, block, NF_READ, bp);
1015}
1016EXPORT_SYMBOL_GPL(dm_bufio_read);
1017
1018void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1019		   struct dm_buffer **bp)
1020{
1021	BUG_ON(dm_bufio_in_request());
 
1022
1023	return new_read(c, block, NF_FRESH, bp);
1024}
1025EXPORT_SYMBOL_GPL(dm_bufio_new);
1026
1027void dm_bufio_prefetch(struct dm_bufio_client *c,
1028		       sector_t block, unsigned n_blocks)
1029{
1030	struct blk_plug plug;
1031
 
 
 
 
 
1032	blk_start_plug(&plug);
1033	dm_bufio_lock(c);
1034
1035	for (; n_blocks--; block++) {
1036		int need_submit;
1037		struct dm_buffer *b;
1038		b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039		if (unlikely(b != NULL)) {
1040			dm_bufio_unlock(c);
1041
1042			if (need_submit)
1043				submit_io(b, READ, b->block, read_endio);
1044			dm_bufio_release(b);
1045
1046			dm_bufio_cond_resched();
1047
1048			if (!n_blocks)
1049				goto flush_plug;
1050			dm_bufio_lock(c);
1051		}
1052
1053	}
1054
1055	dm_bufio_unlock(c);
1056
1057flush_plug:
1058	blk_finish_plug(&plug);
1059}
1060EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1061
1062void dm_bufio_release(struct dm_buffer *b)
1063{
1064	struct dm_bufio_client *c = b->c;
1065
1066	dm_bufio_lock(c);
1067
1068	BUG_ON(!b->hold_count);
 
 
 
 
 
 
 
1069
1070	b->hold_count--;
1071	if (!b->hold_count) {
1072		wake_up(&c->free_buffer_wait);
1073
1074		/*
1075		 * If there were errors on the buffer, and the buffer is not
1076		 * to be written, free the buffer. There is no point in caching
1077		 * invalid buffer.
1078		 */
1079		if ((b->read_error || b->write_error) &&
1080		    !test_bit(B_READING, &b->state) &&
1081		    !test_bit(B_WRITING, &b->state) &&
1082		    !test_bit(B_DIRTY, &b->state)) {
1083			__unlink_buffer(b);
1084			__free_buffer_wake(b);
 
 
1085		}
 
 
1086	}
1087
1088	dm_bufio_unlock(c);
1089}
1090EXPORT_SYMBOL_GPL(dm_bufio_release);
1091
1092void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
 
1093{
1094	struct dm_bufio_client *c = b->c;
1095
 
 
 
1096	dm_bufio_lock(c);
1097
1098	BUG_ON(test_bit(B_READING, &b->state));
1099
1100	if (!test_and_set_bit(B_DIRTY, &b->state))
1101		__relink_lru(b, LIST_DIRTY);
 
 
 
 
 
 
 
 
1102
1103	dm_bufio_unlock(c);
1104}
 
 
 
 
 
 
1105EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1106
1107void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1108{
1109	BUG_ON(dm_bufio_in_request());
 
 
 
1110
1111	dm_bufio_lock(c);
1112	__write_dirty_buffers_async(c, 0);
1113	dm_bufio_unlock(c);
 
1114}
1115EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1116
1117/*
1118 * For performance, it is essential that the buffers are written asynchronously
1119 * and simultaneously (so that the block layer can merge the writes) and then
1120 * waited upon.
1121 *
1122 * Finally, we flush hardware disk cache.
1123 */
 
 
 
 
 
 
 
1124int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1125{
1126	int a, f;
1127	unsigned long buffers_processed = 0;
1128	struct dm_buffer *b, *tmp;
 
1129
 
 
 
 
 
 
1130	dm_bufio_lock(c);
1131	__write_dirty_buffers_async(c, 0);
1132
1133again:
1134	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1135		int dropped_lock = 0;
 
 
1136
1137		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1138			buffers_processed++;
1139
1140		BUG_ON(test_bit(B_READING, &b->state));
 
 
 
 
 
 
 
1141
1142		if (test_bit(B_WRITING, &b->state)) {
1143			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1144				dropped_lock = 1;
1145				b->hold_count++;
1146				dm_bufio_unlock(c);
1147				wait_on_bit(&b->state, B_WRITING,
1148					    do_io_schedule,
1149					    TASK_UNINTERRUPTIBLE);
1150				dm_bufio_lock(c);
1151				b->hold_count--;
1152			} else
1153				wait_on_bit(&b->state, B_WRITING,
1154					    do_io_schedule,
1155					    TASK_UNINTERRUPTIBLE);
1156		}
1157
1158		if (!test_bit(B_DIRTY, &b->state) &&
1159		    !test_bit(B_WRITING, &b->state))
1160			__relink_lru(b, LIST_CLEAN);
1161
1162		dm_bufio_cond_resched();
1163
1164		/*
1165		 * If we dropped the lock, the list is no longer consistent,
1166		 * so we must restart the search.
1167		 *
1168		 * In the most common case, the buffer just processed is
1169		 * relinked to the clean list, so we won't loop scanning the
1170		 * same buffer again and again.
1171		 *
1172		 * This may livelock if there is another thread simultaneously
1173		 * dirtying buffers, so we count the number of buffers walked
1174		 * and if it exceeds the total number of buffers, it means that
1175		 * someone is doing some writes simultaneously with us.  In
1176		 * this case, stop, dropping the lock.
1177		 */
1178		if (dropped_lock)
1179			goto again;
1180	}
 
 
1181	wake_up(&c->free_buffer_wait);
1182	dm_bufio_unlock(c);
1183
1184	a = xchg(&c->async_write_error, 0);
1185	f = dm_bufio_issue_flush(c);
1186	if (a)
1187		return a;
1188
1189	return f;
1190}
1191EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1192
1193/*
1194 * Use dm-io to send and empty barrier flush the device.
1195 */
1196int dm_bufio_issue_flush(struct dm_bufio_client *c)
1197{
1198	struct dm_io_request io_req = {
1199		.bi_rw = REQ_FLUSH,
1200		.mem.type = DM_IO_KMEM,
1201		.mem.ptr.addr = NULL,
1202		.client = c->dm_io,
1203	};
1204	struct dm_io_region io_reg = {
1205		.bdev = c->bdev,
1206		.sector = 0,
1207		.count = 0,
1208	};
1209
1210	BUG_ON(dm_bufio_in_request());
 
1211
1212	return dm_io(&io_req, 1, &io_reg, NULL);
1213}
1214EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1215
1216/*
1217 * We first delete any other buffer that may be at that new location.
1218 *
1219 * Then, we write the buffer to the original location if it was dirty.
1220 *
1221 * Then, if we are the only one who is holding the buffer, relink the buffer
1222 * in the hash queue for the new location.
1223 *
1224 * If there was someone else holding the buffer, we write it to the new
1225 * location but not relink it, because that other user needs to have the buffer
1226 * at the same place.
1227 */
1228void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1229{
1230	struct dm_bufio_client *c = b->c;
1231	struct dm_buffer *new;
 
 
 
 
 
 
 
 
 
 
 
 
1232
1233	BUG_ON(dm_bufio_in_request());
 
 
1234
1235	dm_bufio_lock(c);
 
 
1236
1237retry:
1238	new = __find(c, new_block);
1239	if (new) {
1240		if (new->hold_count) {
1241			__wait_for_free_buffer(c);
1242			goto retry;
 
 
 
1243		}
 
1244
1245		/*
1246		 * FIXME: Is there any point waiting for a write that's going
1247		 * to be overwritten in a bit?
1248		 */
1249		__make_buffer_clean(new);
1250		__unlink_buffer(new);
1251		__free_buffer_wake(new);
1252	}
1253
1254	BUG_ON(!b->hold_count);
1255	BUG_ON(test_bit(B_READING, &b->state));
 
 
 
 
 
 
 
 
 
 
 
1256
1257	__write_dirty_buffer(b);
1258	if (b->hold_count == 1) {
1259		wait_on_bit(&b->state, B_WRITING,
1260			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1261		set_bit(B_DIRTY, &b->state);
1262		__unlink_buffer(b);
1263		__link_buffer(b, new_block, LIST_DIRTY);
1264	} else {
1265		sector_t old_block;
1266		wait_on_bit_lock(&b->state, B_WRITING,
1267				 do_io_schedule, TASK_UNINTERRUPTIBLE);
1268		/*
1269		 * Relink buffer to "new_block" so that write_callback
1270		 * sees "new_block" as a block number.
1271		 * After the write, link the buffer back to old_block.
1272		 * All this must be done in bufio lock, so that block number
1273		 * change isn't visible to other threads.
1274		 */
1275		old_block = b->block;
1276		__unlink_buffer(b);
1277		__link_buffer(b, new_block, b->list_mode);
1278		submit_io(b, WRITE, new_block, write_endio);
1279		wait_on_bit(&b->state, B_WRITING,
1280			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1281		__unlink_buffer(b);
1282		__link_buffer(b, old_block, b->list_mode);
1283	}
1284
 
 
 
 
1285	dm_bufio_unlock(c);
1286	dm_bufio_release(b);
1287}
1288EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1289
1290unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
 
 
 
 
 
 
1291{
1292	return c->block_size;
1293}
1294EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1295
1296sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1297{
1298	return i_size_read(c->bdev->bd_inode) >>
1299			   (SECTOR_SHIFT + c->sectors_per_block_bits);
 
 
 
 
 
 
 
 
 
1300}
1301EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1302
 
 
 
 
 
 
1303sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1304{
1305	return b->block;
1306}
1307EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1308
1309void *dm_bufio_get_block_data(struct dm_buffer *b)
1310{
1311	return b->data;
1312}
1313EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1314
1315void *dm_bufio_get_aux_data(struct dm_buffer *b)
1316{
1317	return b + 1;
1318}
1319EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1320
1321struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1322{
1323	return b->c;
1324}
1325EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1327static void drop_buffers(struct dm_bufio_client *c)
1328{
 
1329	struct dm_buffer *b;
1330	int i;
1331
1332	BUG_ON(dm_bufio_in_request());
 
1333
1334	/*
1335	 * An optimization so that the buffers are not written one-by-one.
1336	 */
1337	dm_bufio_write_dirty_buffers_async(c);
1338
1339	dm_bufio_lock(c);
1340
1341	while ((b = __get_unclaimed_buffer(c)))
1342		__free_buffer_wake(b);
1343
1344	for (i = 0; i < LIST_SIZE; i++)
1345		list_for_each_entry(b, &c->lru[i], lru_list)
1346			DMERR("leaked buffer %llx, hold count %u, list %d",
1347			      (unsigned long long)b->block, b->hold_count, i);
 
 
 
 
 
 
1348
1349	for (i = 0; i < LIST_SIZE; i++)
1350		BUG_ON(!list_empty(&c->lru[i]));
1351
1352	dm_bufio_unlock(c);
1353}
1354
1355/*
1356 * Test if the buffer is unused and too old, and commit it.
1357 * At if noio is set, we must not do any I/O because we hold
1358 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1359 * different bufio client.
1360 */
1361static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1362				unsigned long max_jiffies)
1363{
1364	if (jiffies - b->last_accessed < max_jiffies)
1365		return 1;
1366
1367	if (!(gfp & __GFP_IO)) {
1368		if (test_bit(B_READING, &b->state) ||
1369		    test_bit(B_WRITING, &b->state) ||
1370		    test_bit(B_DIRTY, &b->state))
1371			return 1;
1372	}
1373
1374	if (b->hold_count)
1375		return 1;
1376
1377	__make_buffer_clean(b);
1378	__unlink_buffer(b);
1379	__free_buffer_wake(b);
1380
1381	return 0;
1382}
1383
1384static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1385		   struct shrink_control *sc)
1386{
1387	int l;
1388	struct dm_buffer *b, *tmp;
 
 
 
1389
1390	for (l = 0; l < LIST_SIZE; l++) {
1391		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1392			if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1393			    !--nr_to_scan)
1394				return;
1395		dm_bufio_cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
1396	}
1397}
1398
1399static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1400{
1401	struct dm_bufio_client *c =
1402	    container_of(shrinker, struct dm_bufio_client, shrinker);
1403	unsigned long r;
1404	unsigned long nr_to_scan = sc->nr_to_scan;
 
 
1405
1406	if (sc->gfp_mask & __GFP_IO)
1407		dm_bufio_lock(c);
1408	else if (!dm_bufio_trylock(c))
1409		return !nr_to_scan ? 0 : -1;
 
 
 
 
 
 
1410
1411	if (nr_to_scan)
1412		__scan(c, nr_to_scan, sc);
 
 
 
 
1413
1414	r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1415	if (r > INT_MAX)
1416		r = INT_MAX;
 
1417
1418	dm_bufio_unlock(c);
 
 
 
1419
1420	return r;
1421}
1422
1423/*
1424 * Create the buffering interface
1425 */
1426struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1427					       unsigned reserved_buffers, unsigned aux_size,
1428					       void (*alloc_callback)(struct dm_buffer *),
1429					       void (*write_callback)(struct dm_buffer *))
 
1430{
1431	int r;
 
1432	struct dm_bufio_client *c;
1433	unsigned i;
1434
1435	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1436	       (block_size & (block_size - 1)));
 
 
 
1437
1438	c = kmalloc(sizeof(*c), GFP_KERNEL);
 
1439	if (!c) {
1440		r = -ENOMEM;
1441		goto bad_client;
1442	}
1443	c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1444	if (!c->cache_hash) {
1445		r = -ENOMEM;
1446		goto bad_hash;
1447	}
1448
1449	c->bdev = bdev;
1450	c->block_size = block_size;
1451	c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1452	c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1453				  ffs(block_size) - 1 - PAGE_SHIFT : 0;
1454	c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1455				  PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1456
1457	c->aux_size = aux_size;
1458	c->alloc_callback = alloc_callback;
1459	c->write_callback = write_callback;
1460
1461	for (i = 0; i < LIST_SIZE; i++) {
1462		INIT_LIST_HEAD(&c->lru[i]);
1463		c->n_buffers[i] = 0;
1464	}
1465
1466	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1467		INIT_HLIST_HEAD(&c->cache_hash[i]);
1468
1469	mutex_init(&c->lock);
 
1470	INIT_LIST_HEAD(&c->reserved_buffers);
1471	c->need_reserved_buffers = reserved_buffers;
1472
 
 
1473	init_waitqueue_head(&c->free_buffer_wait);
1474	c->async_write_error = 0;
1475
1476	c->dm_io = dm_io_client_create();
1477	if (IS_ERR(c->dm_io)) {
1478		r = PTR_ERR(c->dm_io);
1479		goto bad_dm_io;
1480	}
1481
1482	mutex_lock(&dm_bufio_clients_lock);
1483	if (c->blocks_per_page_bits) {
1484		if (!DM_BUFIO_CACHE_NAME(c)) {
1485			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1486			if (!DM_BUFIO_CACHE_NAME(c)) {
1487				r = -ENOMEM;
1488				mutex_unlock(&dm_bufio_clients_lock);
1489				goto bad_cache;
1490			}
1491		}
1492
1493		if (!DM_BUFIO_CACHE(c)) {
1494			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1495							      c->block_size,
1496							      c->block_size, 0, NULL);
1497			if (!DM_BUFIO_CACHE(c)) {
1498				r = -ENOMEM;
1499				mutex_unlock(&dm_bufio_clients_lock);
1500				goto bad_cache;
1501			}
1502		}
1503	}
1504	mutex_unlock(&dm_bufio_clients_lock);
 
 
 
 
 
 
 
 
 
1505
1506	while (c->need_reserved_buffers) {
1507		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1508
1509		if (!b) {
1510			r = -ENOMEM;
1511			goto bad_buffer;
1512		}
1513		__free_buffer_wake(b);
1514	}
1515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1516	mutex_lock(&dm_bufio_clients_lock);
1517	dm_bufio_client_count++;
1518	list_add(&c->client_list, &dm_bufio_all_clients);
1519	__cache_size_refresh();
1520	mutex_unlock(&dm_bufio_clients_lock);
1521
1522	c->shrinker.shrink = shrink;
1523	c->shrinker.seeks = 1;
1524	c->shrinker.batch = 0;
1525	register_shrinker(&c->shrinker);
1526
1527	return c;
1528
1529bad_buffer:
1530bad_cache:
1531	while (!list_empty(&c->reserved_buffers)) {
1532		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1533						 struct dm_buffer, lru_list);
1534		list_del(&b->lru_list);
1535		free_buffer(b);
1536	}
 
 
1537	dm_io_client_destroy(c->dm_io);
1538bad_dm_io:
1539	vfree(c->cache_hash);
1540bad_hash:
 
1541	kfree(c);
1542bad_client:
1543	return ERR_PTR(r);
1544}
1545EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1546
1547/*
1548 * Free the buffering interface.
1549 * It is required that there are no references on any buffers.
1550 */
1551void dm_bufio_client_destroy(struct dm_bufio_client *c)
1552{
1553	unsigned i;
1554
1555	drop_buffers(c);
1556
1557	unregister_shrinker(&c->shrinker);
 
1558
1559	mutex_lock(&dm_bufio_clients_lock);
1560
1561	list_del(&c->client_list);
1562	dm_bufio_client_count--;
1563	__cache_size_refresh();
1564
1565	mutex_unlock(&dm_bufio_clients_lock);
1566
1567	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1568		BUG_ON(!hlist_empty(&c->cache_hash[i]));
1569
1570	BUG_ON(c->need_reserved_buffers);
 
1571
1572	while (!list_empty(&c->reserved_buffers)) {
1573		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1574						 struct dm_buffer, lru_list);
1575		list_del(&b->lru_list);
1576		free_buffer(b);
1577	}
1578
1579	for (i = 0; i < LIST_SIZE; i++)
1580		if (c->n_buffers[i])
1581			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1582
1583	for (i = 0; i < LIST_SIZE; i++)
1584		BUG_ON(c->n_buffers[i]);
1585
 
 
 
1586	dm_io_client_destroy(c->dm_io);
1587	vfree(c->cache_hash);
 
 
1588	kfree(c);
1589}
1590EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1591
1592static void cleanup_old_buffers(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593{
1594	unsigned long max_age = dm_bufio_max_age;
1595	struct dm_bufio_client *c;
 
 
 
 
 
 
 
 
 
 
1596
1597	barrier();
 
1598
1599	if (max_age > ULONG_MAX / HZ)
1600		max_age = ULONG_MAX / HZ;
1601
1602	mutex_lock(&dm_bufio_clients_lock);
1603	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1604		if (!dm_bufio_trylock(c))
1605			continue;
 
 
 
 
 
1606
1607		while (!list_empty(&c->lru[LIST_CLEAN])) {
1608			struct dm_buffer *b;
1609			b = list_entry(c->lru[LIST_CLEAN].prev,
1610				       struct dm_buffer, lru_list);
1611			if (__cleanup_old_buffer(b, 0, max_age * HZ))
1612				break;
1613			dm_bufio_cond_resched();
1614		}
1615
 
 
1616		dm_bufio_unlock(c);
1617		dm_bufio_cond_resched();
 
1618	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1619	mutex_unlock(&dm_bufio_clients_lock);
1620}
1621
1622static struct workqueue_struct *dm_bufio_wq;
1623static struct delayed_work dm_bufio_work;
1624
1625static void work_fn(struct work_struct *w)
1626{
1627	cleanup_old_buffers();
1628
1629	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1630			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1631}
1632
1633/*----------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634 * Module setup
1635 *--------------------------------------------------------------*/
 
1636
1637/*
1638 * This is called only once for the whole dm_bufio module.
1639 * It initializes memory limit.
1640 */
1641static int __init dm_bufio_init(void)
1642{
1643	__u64 mem;
1644
1645	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1646	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
 
 
1647
1648	mem = (__u64)((totalram_pages - totalhigh_pages) *
1649		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1650
1651	if (mem > ULONG_MAX)
1652		mem = ULONG_MAX;
1653
1654#ifdef CONFIG_MMU
1655	/*
1656	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1657	 * in fs/proc/internal.h
1658	 */
1659	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1660		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1661#endif
1662
1663	dm_bufio_default_cache_size = mem;
1664
1665	mutex_lock(&dm_bufio_clients_lock);
1666	__cache_size_refresh();
1667	mutex_unlock(&dm_bufio_clients_lock);
1668
1669	dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1670	if (!dm_bufio_wq)
1671		return -ENOMEM;
1672
1673	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1674	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
 
1675			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1676
1677	return 0;
1678}
1679
1680/*
1681 * This is called once when unloading the dm_bufio module.
1682 */
1683static void __exit dm_bufio_exit(void)
1684{
1685	int bug = 0;
1686	int i;
1687
1688	cancel_delayed_work_sync(&dm_bufio_work);
1689	destroy_workqueue(dm_bufio_wq);
1690
1691	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1692		struct kmem_cache *kc = dm_bufio_caches[i];
1693
1694		if (kc)
1695			kmem_cache_destroy(kc);
1696	}
1697
1698	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1699		kfree(dm_bufio_cache_names[i]);
1700
1701	if (dm_bufio_client_count) {
1702		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1703			__func__, dm_bufio_client_count);
1704		bug = 1;
1705	}
1706
1707	if (dm_bufio_current_allocated) {
1708		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1709			__func__, dm_bufio_current_allocated);
1710		bug = 1;
1711	}
1712
1713	if (dm_bufio_allocated_get_free_pages) {
1714		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1715		       __func__, dm_bufio_allocated_get_free_pages);
1716		bug = 1;
1717	}
1718
1719	if (dm_bufio_allocated_vmalloc) {
1720		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1721		       __func__, dm_bufio_allocated_vmalloc);
1722		bug = 1;
1723	}
1724
1725	if (bug)
1726		BUG();
1727}
1728
1729module_init(dm_bufio_init)
1730module_exit(dm_bufio_exit)
1731
1732module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1733MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1734
1735module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1736MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1737
1738module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
 
 
 
1739MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1740
1741module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1742MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1743
1744module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1745MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1746
1747module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1748MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1749
1750module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1751MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1752
1753MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1754MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1755MODULE_LICENSE("GPL");