Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/dm-bufio.h>
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
  14#include <linux/sched/mm.h>
  15#include <linux/jiffies.h>
  16#include <linux/vmalloc.h>
  17#include <linux/shrinker.h>
  18#include <linux/module.h>
  19#include <linux/rbtree.h>
  20#include <linux/stacktrace.h>
  21
  22#define DM_MSG_PREFIX "bufio"
  23
  24/*
  25 * Memory management policy:
  26 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  27 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  28 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  29 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  30 *	dirty buffers.
  31 */
  32#define DM_BUFIO_MIN_BUFFERS		8
  33
  34#define DM_BUFIO_MEMORY_PERCENT		2
  35#define DM_BUFIO_VMALLOC_PERCENT	25
  36#define DM_BUFIO_WRITEBACK_RATIO	3
  37#define DM_BUFIO_LOW_WATERMARK_RATIO	16
  38
  39/*
  40 * Check buffer ages in this interval (seconds)
  41 */
  42#define DM_BUFIO_WORK_TIMER_SECS	30
  43
  44/*
  45 * Free buffers when they are older than this (seconds)
  46 */
  47#define DM_BUFIO_DEFAULT_AGE_SECS	300
  48
  49/*
  50 * The nr of bytes of cached data to keep around.
  51 */
  52#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  53
  54/*
  55 * Align buffer writes to this boundary.
  56 * Tests show that SSDs have the highest IOPS when using 4k writes.
  57 */
  58#define DM_BUFIO_WRITE_ALIGN		4096
 
 
 
 
 
 
 
  59
  60/*
  61 * dm_buffer->list_mode
  62 */
  63#define LIST_CLEAN	0
  64#define LIST_DIRTY	1
  65#define LIST_SIZE	2
  66
  67/*
  68 * Linking of buffers:
  69 *	All buffers are linked to buffer_tree with their node field.
  70 *
  71 *	Clean buffers that are not being written (B_WRITING not set)
  72 *	are linked to lru[LIST_CLEAN] with their lru_list field.
  73 *
  74 *	Dirty and clean buffers that are being written are linked to
  75 *	lru[LIST_DIRTY] with their lru_list field. When the write
  76 *	finishes, the buffer cannot be relinked immediately (because we
  77 *	are in an interrupt context and relinking requires process
  78 *	context), so some clean-not-writing buffers can be held on
  79 *	dirty_lru too.  They are later added to lru in the process
  80 *	context.
  81 */
  82struct dm_bufio_client {
  83	struct mutex lock;
  84
  85	struct list_head lru[LIST_SIZE];
  86	unsigned long n_buffers[LIST_SIZE];
  87
  88	struct block_device *bdev;
  89	unsigned block_size;
  90	s8 sectors_per_block_bits;
 
 
 
  91	void (*alloc_callback)(struct dm_buffer *);
  92	void (*write_callback)(struct dm_buffer *);
  93
  94	struct kmem_cache *slab_buffer;
  95	struct kmem_cache *slab_cache;
  96	struct dm_io_client *dm_io;
  97
  98	struct list_head reserved_buffers;
  99	unsigned need_reserved_buffers;
 100
 101	unsigned minimum_buffers;
 102
 103	struct rb_root buffer_tree;
 104	wait_queue_head_t free_buffer_wait;
 105
 106	sector_t start;
 107
 108	int async_write_error;
 109
 110	struct list_head client_list;
 111
 112	struct shrinker shrinker;
 113	struct work_struct shrink_work;
 114	atomic_long_t need_shrink;
 115};
 116
 117/*
 118 * Buffer state bits.
 119 */
 120#define B_READING	0
 121#define B_WRITING	1
 122#define B_DIRTY		2
 123
 124/*
 125 * Describes how the block was allocated:
 126 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 127 * See the comment at alloc_buffer_data.
 128 */
 129enum data_mode {
 130	DATA_MODE_SLAB = 0,
 131	DATA_MODE_GET_FREE_PAGES = 1,
 132	DATA_MODE_VMALLOC = 2,
 133	DATA_MODE_LIMIT = 3
 134};
 135
 136struct dm_buffer {
 137	struct rb_node node;
 138	struct list_head lru_list;
 139	struct list_head global_list;
 140	sector_t block;
 141	void *data;
 142	unsigned char data_mode;		/* DATA_MODE_* */
 143	unsigned char list_mode;		/* LIST_* */
 144	blk_status_t read_error;
 145	blk_status_t write_error;
 146	unsigned accessed;
 147	unsigned hold_count;
 
 
 148	unsigned long state;
 149	unsigned long last_accessed;
 150	unsigned dirty_start;
 151	unsigned dirty_end;
 152	unsigned write_start;
 153	unsigned write_end;
 154	struct dm_bufio_client *c;
 155	struct list_head write_list;
 156	void (*end_io)(struct dm_buffer *, blk_status_t);
 
 157#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 158#define MAX_STACK 10
 159	unsigned int stack_len;
 160	unsigned long stack_entries[MAX_STACK];
 161#endif
 162};
 163
 164/*----------------------------------------------------------------*/
 165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166#define dm_bufio_in_request()	(!!current->bio_list)
 167
 168static void dm_bufio_lock(struct dm_bufio_client *c)
 169{
 170	mutex_lock_nested(&c->lock, dm_bufio_in_request());
 171}
 172
 173static int dm_bufio_trylock(struct dm_bufio_client *c)
 174{
 175	return mutex_trylock(&c->lock);
 176}
 177
 178static void dm_bufio_unlock(struct dm_bufio_client *c)
 179{
 180	mutex_unlock(&c->lock);
 181}
 182
 
 
 
 
 
 
 
 
 
 
 
 
 
 183/*----------------------------------------------------------------*/
 184
 185/*
 186 * Default cache size: available memory divided by the ratio.
 187 */
 188static unsigned long dm_bufio_default_cache_size;
 189
 190/*
 191 * Total cache size set by the user.
 192 */
 193static unsigned long dm_bufio_cache_size;
 194
 195/*
 196 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 197 * at any time.  If it disagrees, the user has changed cache size.
 198 */
 199static unsigned long dm_bufio_cache_size_latch;
 200
 201static DEFINE_SPINLOCK(global_spinlock);
 202
 203static LIST_HEAD(global_queue);
 204
 205static unsigned long global_num = 0;
 206
 207/*
 208 * Buffers are freed after this timeout
 209 */
 210static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 211static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 212
 213static unsigned long dm_bufio_peak_allocated;
 214static unsigned long dm_bufio_allocated_kmem_cache;
 215static unsigned long dm_bufio_allocated_get_free_pages;
 216static unsigned long dm_bufio_allocated_vmalloc;
 217static unsigned long dm_bufio_current_allocated;
 218
 219/*----------------------------------------------------------------*/
 220
 221/*
 
 
 
 
 
 222 * The current number of clients.
 223 */
 224static int dm_bufio_client_count;
 225
 226/*
 227 * The list of all clients.
 228 */
 229static LIST_HEAD(dm_bufio_all_clients);
 230
 231/*
 232 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
 
 233 */
 234static DEFINE_MUTEX(dm_bufio_clients_lock);
 235
 236static struct workqueue_struct *dm_bufio_wq;
 237static struct delayed_work dm_bufio_cleanup_old_work;
 238static struct work_struct dm_bufio_replacement_work;
 239
 240
 241#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 242static void buffer_record_stack(struct dm_buffer *b)
 243{
 244	b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
 
 
 
 
 245}
 246#endif
 247
 248/*----------------------------------------------------------------
 249 * A red/black tree acts as an index for all the buffers.
 250 *--------------------------------------------------------------*/
 251static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 252{
 253	struct rb_node *n = c->buffer_tree.rb_node;
 254	struct dm_buffer *b;
 255
 256	while (n) {
 257		b = container_of(n, struct dm_buffer, node);
 258
 259		if (b->block == block)
 260			return b;
 261
 262		n = block < b->block ? n->rb_left : n->rb_right;
 263	}
 264
 265	return NULL;
 266}
 267
 268static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
 269{
 270	struct rb_node *n = c->buffer_tree.rb_node;
 271	struct dm_buffer *b;
 272	struct dm_buffer *best = NULL;
 273
 274	while (n) {
 275		b = container_of(n, struct dm_buffer, node);
 276
 277		if (b->block == block)
 278			return b;
 279
 280		if (block <= b->block) {
 281			n = n->rb_left;
 282			best = b;
 283		} else {
 284			n = n->rb_right;
 285		}
 286	}
 287
 288	return best;
 289}
 290
 291static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
 292{
 293	struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
 294	struct dm_buffer *found;
 295
 296	while (*new) {
 297		found = container_of(*new, struct dm_buffer, node);
 298
 299		if (found->block == b->block) {
 300			BUG_ON(found != b);
 301			return;
 302		}
 303
 304		parent = *new;
 305		new = b->block < found->block ?
 306			&found->node.rb_left : &found->node.rb_right;
 307	}
 308
 309	rb_link_node(&b->node, parent, new);
 310	rb_insert_color(&b->node, &c->buffer_tree);
 311}
 312
 313static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
 314{
 315	rb_erase(&b->node, &c->buffer_tree);
 316}
 317
 318/*----------------------------------------------------------------*/
 319
 320static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
 321{
 322	unsigned char data_mode;
 323	long diff;
 324
 325	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 326		&dm_bufio_allocated_kmem_cache,
 327		&dm_bufio_allocated_get_free_pages,
 328		&dm_bufio_allocated_vmalloc,
 329	};
 330
 331	data_mode = b->data_mode;
 332	diff = (long)b->c->block_size;
 333	if (unlink)
 334		diff = -diff;
 335
 336	spin_lock(&global_spinlock);
 337
 338	*class_ptr[data_mode] += diff;
 339
 340	dm_bufio_current_allocated += diff;
 341
 342	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 343		dm_bufio_peak_allocated = dm_bufio_current_allocated;
 344
 345	b->accessed = 1;
 346
 347	if (!unlink) {
 348		list_add(&b->global_list, &global_queue);
 349		global_num++;
 350		if (dm_bufio_current_allocated > dm_bufio_cache_size)
 351			queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
 352	} else {
 353		list_del(&b->global_list);
 354		global_num--;
 355	}
 356
 357	spin_unlock(&global_spinlock);
 358}
 359
 360/*
 361 * Change the number of clients and recalculate per-client limit.
 362 */
 363static void __cache_size_refresh(void)
 364{
 365	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 366	BUG_ON(dm_bufio_client_count < 0);
 367
 368	dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
 369
 370	/*
 371	 * Use default if set to 0 and report the actual cache size used.
 372	 */
 373	if (!dm_bufio_cache_size_latch) {
 374		(void)cmpxchg(&dm_bufio_cache_size, 0,
 375			      dm_bufio_default_cache_size);
 376		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 377	}
 
 
 
 378}
 379
 380/*
 381 * Allocating buffer data.
 382 *
 383 * Small buffers are allocated with kmem_cache, to use space optimally.
 384 *
 385 * For large buffers, we choose between get_free_pages and vmalloc.
 386 * Each has advantages and disadvantages.
 387 *
 388 * __get_free_pages can randomly fail if the memory is fragmented.
 389 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 390 * as low as 128M) so using it for caching is not appropriate.
 391 *
 392 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 393 * won't have a fatal effect here, but it just causes flushes of some other
 394 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 395 * always fails (i.e. order >= MAX_ORDER).
 396 *
 397 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 398 * initial reserve allocation, so there's no risk of wasting all vmalloc
 399 * space.
 400 */
 401static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 402			       unsigned char *data_mode)
 403{
 404	if (unlikely(c->slab_cache != NULL)) {
 
 
 
 405		*data_mode = DATA_MODE_SLAB;
 406		return kmem_cache_alloc(c->slab_cache, gfp_mask);
 407	}
 408
 409	if (c->block_size <= KMALLOC_MAX_SIZE &&
 410	    gfp_mask & __GFP_NORETRY) {
 411		*data_mode = DATA_MODE_GET_FREE_PAGES;
 412		return (void *)__get_free_pages(gfp_mask,
 413						c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 414	}
 415
 416	*data_mode = DATA_MODE_VMALLOC;
 417
 418	/*
 419	 * __vmalloc allocates the data pages and auxiliary structures with
 420	 * gfp_flags that were specified, but pagetables are always allocated
 421	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
 422	 *
 423	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
 424	 * all allocations done by this process (including pagetables) are done
 425	 * as if GFP_NOIO was specified.
 426	 */
 427	if (gfp_mask & __GFP_NORETRY) {
 428		unsigned noio_flag = memalloc_noio_save();
 429		void *ptr = __vmalloc(c->block_size, gfp_mask);
 430
 
 
 
 
 
 
 431		memalloc_noio_restore(noio_flag);
 432		return ptr;
 433	}
 434
 435	return __vmalloc(c->block_size, gfp_mask);
 436}
 437
 438/*
 439 * Free buffer's data.
 440 */
 441static void free_buffer_data(struct dm_bufio_client *c,
 442			     void *data, unsigned char data_mode)
 443{
 444	switch (data_mode) {
 445	case DATA_MODE_SLAB:
 446		kmem_cache_free(c->slab_cache, data);
 447		break;
 448
 449	case DATA_MODE_GET_FREE_PAGES:
 450		free_pages((unsigned long)data,
 451			   c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 452		break;
 453
 454	case DATA_MODE_VMALLOC:
 455		vfree(data);
 456		break;
 457
 458	default:
 459		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 460		       data_mode);
 461		BUG();
 462	}
 463}
 464
 465/*
 466 * Allocate buffer and its data.
 467 */
 468static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 469{
 470	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
 
 471
 472	if (!b)
 473		return NULL;
 474
 475	b->c = c;
 476
 477	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 478	if (!b->data) {
 479		kmem_cache_free(c->slab_buffer, b);
 480		return NULL;
 481	}
 482
 
 
 483#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 484	b->stack_len = 0;
 485#endif
 486	return b;
 487}
 488
 489/*
 490 * Free buffer and its data.
 491 */
 492static void free_buffer(struct dm_buffer *b)
 493{
 494	struct dm_bufio_client *c = b->c;
 495
 
 
 496	free_buffer_data(c, b->data, b->data_mode);
 497	kmem_cache_free(c->slab_buffer, b);
 498}
 499
 500/*
 501 * Link buffer to the buffer tree and clean or dirty queue.
 502 */
 503static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 504{
 505	struct dm_bufio_client *c = b->c;
 506
 507	c->n_buffers[dirty]++;
 508	b->block = block;
 509	b->list_mode = dirty;
 510	list_add(&b->lru_list, &c->lru[dirty]);
 511	__insert(b->c, b);
 512	b->last_accessed = jiffies;
 513
 514	adjust_total_allocated(b, false);
 515}
 516
 517/*
 518 * Unlink buffer from the buffer tree and dirty or clean queue.
 519 */
 520static void __unlink_buffer(struct dm_buffer *b)
 521{
 522	struct dm_bufio_client *c = b->c;
 523
 524	BUG_ON(!c->n_buffers[b->list_mode]);
 525
 526	c->n_buffers[b->list_mode]--;
 527	__remove(b->c, b);
 528	list_del(&b->lru_list);
 529
 530	adjust_total_allocated(b, true);
 531}
 532
 533/*
 534 * Place the buffer to the head of dirty or clean LRU queue.
 535 */
 536static void __relink_lru(struct dm_buffer *b, int dirty)
 537{
 538	struct dm_bufio_client *c = b->c;
 539
 540	b->accessed = 1;
 541
 542	BUG_ON(!c->n_buffers[b->list_mode]);
 543
 544	c->n_buffers[b->list_mode]--;
 545	c->n_buffers[dirty]++;
 546	b->list_mode = dirty;
 547	list_move(&b->lru_list, &c->lru[dirty]);
 548	b->last_accessed = jiffies;
 549}
 550
 551/*----------------------------------------------------------------
 552 * Submit I/O on the buffer.
 553 *
 554 * Bio interface is faster but it has some problems:
 555 *	the vector list is limited (increasing this limit increases
 556 *	memory-consumption per buffer, so it is not viable);
 557 *
 558 *	the memory must be direct-mapped, not vmalloced;
 559 *
 
 
 
 
 560 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 561 * it is not vmalloced, try using the bio interface.
 562 *
 563 * If the buffer is big, if it is vmalloced or if the underlying device
 564 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 565 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 566 * shortcomings.
 567 *--------------------------------------------------------------*/
 568
 569/*
 570 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 571 * that the request was handled directly with bio interface.
 572 */
 573static void dmio_complete(unsigned long error, void *context)
 574{
 575	struct dm_buffer *b = context;
 576
 577	b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
 
 578}
 579
 580static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 581		     unsigned n_sectors, unsigned offset)
 582{
 583	int r;
 584	struct dm_io_request io_req = {
 585		.bi_op = rw,
 586		.bi_op_flags = 0,
 587		.notify.fn = dmio_complete,
 588		.notify.context = b,
 589		.client = b->c->dm_io,
 590	};
 591	struct dm_io_region region = {
 592		.bdev = b->c->bdev,
 593		.sector = sector,
 594		.count = n_sectors,
 595	};
 596
 597	if (b->data_mode != DATA_MODE_VMALLOC) {
 598		io_req.mem.type = DM_IO_KMEM;
 599		io_req.mem.ptr.addr = (char *)b->data + offset;
 600	} else {
 601		io_req.mem.type = DM_IO_VMA;
 602		io_req.mem.ptr.vma = (char *)b->data + offset;
 603	}
 604
 
 
 605	r = dm_io(&io_req, 1, &region, NULL);
 606	if (unlikely(r))
 607		b->end_io(b, errno_to_blk_status(r));
 
 
 608}
 609
 610static void bio_complete(struct bio *bio)
 611{
 612	struct dm_buffer *b = bio->bi_private;
 613	blk_status_t status = bio->bi_status;
 614	bio_put(bio);
 615	b->end_io(b, status);
 
 
 
 
 
 
 
 616}
 617
 618static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
 619		    unsigned n_sectors, unsigned offset)
 620{
 621	struct bio *bio;
 622	char *ptr;
 623	unsigned vec_size, len;
 624
 625	vec_size = b->c->block_size >> PAGE_SHIFT;
 626	if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
 627		vec_size += 2;
 628
 629	bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
 630	if (!bio) {
 631dmio:
 632		use_dmio(b, rw, sector, n_sectors, offset);
 633		return;
 634	}
 
 635
 636	bio->bi_iter.bi_sector = sector;
 637	bio_set_dev(bio, b->c->bdev);
 638	bio_set_op_attrs(bio, rw, 0);
 639	bio->bi_end_io = bio_complete;
 640	bio->bi_private = b;
 
 641
 642	ptr = (char *)b->data + offset;
 643	len = n_sectors << SECTOR_SHIFT;
 
 
 644
 645	do {
 646		unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
 647		if (!bio_add_page(bio, virt_to_page(ptr), this_step,
 648				  offset_in_page(ptr))) {
 649			bio_put(bio);
 650			goto dmio;
 
 651		}
 652
 653		len -= this_step;
 654		ptr += this_step;
 655	} while (len > 0);
 656
 657	submit_bio(bio);
 658}
 659
 660static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
 661{
 662	sector_t sector;
 663
 664	if (likely(c->sectors_per_block_bits >= 0))
 665		sector = block << c->sectors_per_block_bits;
 666	else
 667		sector = block * (c->block_size >> SECTOR_SHIFT);
 668	sector += c->start;
 669
 670	return sector;
 671}
 672
 673static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
 
 674{
 675	unsigned n_sectors;
 676	sector_t sector;
 677	unsigned offset, end;
 678
 679	b->end_io = end_io;
 680
 681	sector = block_to_sector(b->c, b->block);
 682
 683	if (rw != REQ_OP_WRITE) {
 684		n_sectors = b->c->block_size >> SECTOR_SHIFT;
 685		offset = 0;
 686	} else {
 687		if (b->c->write_callback)
 688			b->c->write_callback(b);
 689		offset = b->write_start;
 690		end = b->write_end;
 691		offset &= -DM_BUFIO_WRITE_ALIGN;
 692		end += DM_BUFIO_WRITE_ALIGN - 1;
 693		end &= -DM_BUFIO_WRITE_ALIGN;
 694		if (unlikely(end > b->c->block_size))
 695			end = b->c->block_size;
 696
 697		sector += offset >> SECTOR_SHIFT;
 698		n_sectors = (end - offset) >> SECTOR_SHIFT;
 699	}
 700
 701	if (b->data_mode != DATA_MODE_VMALLOC)
 702		use_bio(b, rw, sector, n_sectors, offset);
 703	else
 704		use_dmio(b, rw, sector, n_sectors, offset);
 705}
 706
 707/*----------------------------------------------------------------
 708 * Writing dirty buffers
 709 *--------------------------------------------------------------*/
 710
 711/*
 712 * The endio routine for write.
 713 *
 714 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 715 * it.
 716 */
 717static void write_endio(struct dm_buffer *b, blk_status_t status)
 718{
 719	b->write_error = status;
 720	if (unlikely(status)) {
 721		struct dm_bufio_client *c = b->c;
 722
 723		(void)cmpxchg(&c->async_write_error, 0,
 724				blk_status_to_errno(status));
 
 
 
 725	}
 726
 727	BUG_ON(!test_bit(B_WRITING, &b->state));
 728
 729	smp_mb__before_atomic();
 730	clear_bit(B_WRITING, &b->state);
 731	smp_mb__after_atomic();
 732
 733	wake_up_bit(&b->state, B_WRITING);
 734}
 735
 736/*
 737 * Initiate a write on a dirty buffer, but don't wait for it.
 738 *
 739 * - If the buffer is not dirty, exit.
 740 * - If there some previous write going on, wait for it to finish (we can't
 741 *   have two writes on the same buffer simultaneously).
 742 * - Submit our write and don't wait on it. We set B_WRITING indicating
 743 *   that there is a write in progress.
 744 */
 745static void __write_dirty_buffer(struct dm_buffer *b,
 746				 struct list_head *write_list)
 747{
 748	if (!test_bit(B_DIRTY, &b->state))
 749		return;
 750
 751	clear_bit(B_DIRTY, &b->state);
 752	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 753
 754	b->write_start = b->dirty_start;
 755	b->write_end = b->dirty_end;
 756
 757	if (!write_list)
 758		submit_io(b, REQ_OP_WRITE, write_endio);
 759	else
 760		list_add_tail(&b->write_list, write_list);
 761}
 762
 763static void __flush_write_list(struct list_head *write_list)
 764{
 765	struct blk_plug plug;
 766	blk_start_plug(&plug);
 767	while (!list_empty(write_list)) {
 768		struct dm_buffer *b =
 769			list_entry(write_list->next, struct dm_buffer, write_list);
 770		list_del(&b->write_list);
 771		submit_io(b, REQ_OP_WRITE, write_endio);
 772		cond_resched();
 773	}
 774	blk_finish_plug(&plug);
 775}
 776
 777/*
 778 * Wait until any activity on the buffer finishes.  Possibly write the
 779 * buffer if it is dirty.  When this function finishes, there is no I/O
 780 * running on the buffer and the buffer is not dirty.
 781 */
 782static void __make_buffer_clean(struct dm_buffer *b)
 783{
 784	BUG_ON(b->hold_count);
 785
 786	if (!b->state)	/* fast case */
 787		return;
 788
 789	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 790	__write_dirty_buffer(b, NULL);
 791	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 792}
 793
 794/*
 795 * Find some buffer that is not held by anybody, clean it, unlink it and
 796 * return it.
 797 */
 798static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 799{
 800	struct dm_buffer *b;
 801
 802	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 803		BUG_ON(test_bit(B_WRITING, &b->state));
 804		BUG_ON(test_bit(B_DIRTY, &b->state));
 805
 806		if (!b->hold_count) {
 807			__make_buffer_clean(b);
 808			__unlink_buffer(b);
 809			return b;
 810		}
 811		cond_resched();
 812	}
 813
 814	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 815		BUG_ON(test_bit(B_READING, &b->state));
 816
 817		if (!b->hold_count) {
 818			__make_buffer_clean(b);
 819			__unlink_buffer(b);
 820			return b;
 821		}
 822		cond_resched();
 823	}
 824
 825	return NULL;
 826}
 827
 828/*
 829 * Wait until some other threads free some buffer or release hold count on
 830 * some buffer.
 831 *
 832 * This function is entered with c->lock held, drops it and regains it
 833 * before exiting.
 834 */
 835static void __wait_for_free_buffer(struct dm_bufio_client *c)
 836{
 837	DECLARE_WAITQUEUE(wait, current);
 838
 839	add_wait_queue(&c->free_buffer_wait, &wait);
 840	set_current_state(TASK_UNINTERRUPTIBLE);
 841	dm_bufio_unlock(c);
 842
 843	io_schedule();
 844
 845	remove_wait_queue(&c->free_buffer_wait, &wait);
 846
 847	dm_bufio_lock(c);
 848}
 849
 850enum new_flag {
 851	NF_FRESH = 0,
 852	NF_READ = 1,
 853	NF_GET = 2,
 854	NF_PREFETCH = 3
 855};
 856
 857/*
 858 * Allocate a new buffer. If the allocation is not possible, wait until
 859 * some other thread frees a buffer.
 860 *
 861 * May drop the lock and regain it.
 862 */
 863static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 864{
 865	struct dm_buffer *b;
 866	bool tried_noio_alloc = false;
 867
 868	/*
 869	 * dm-bufio is resistant to allocation failures (it just keeps
 870	 * one buffer reserved in cases all the allocations fail).
 871	 * So set flags to not try too hard:
 872	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
 873	 *		    mutex and wait ourselves.
 874	 *	__GFP_NORETRY: don't retry and rather return failure
 875	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 876	 *	__GFP_NOWARN: don't print a warning in case of failure
 877	 *
 878	 * For debugging, if we set the cache size to 1, no new buffers will
 879	 * be allocated.
 880	 */
 881	while (1) {
 882		if (dm_bufio_cache_size_latch != 1) {
 883			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 884			if (b)
 885				return b;
 886		}
 887
 888		if (nf == NF_PREFETCH)
 889			return NULL;
 890
 891		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
 892			dm_bufio_unlock(c);
 893			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 894			dm_bufio_lock(c);
 895			if (b)
 896				return b;
 897			tried_noio_alloc = true;
 898		}
 899
 900		if (!list_empty(&c->reserved_buffers)) {
 901			b = list_entry(c->reserved_buffers.next,
 902				       struct dm_buffer, lru_list);
 903			list_del(&b->lru_list);
 904			c->need_reserved_buffers++;
 905
 906			return b;
 907		}
 908
 909		b = __get_unclaimed_buffer(c);
 910		if (b)
 911			return b;
 912
 913		__wait_for_free_buffer(c);
 914	}
 915}
 916
 917static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 918{
 919	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 920
 921	if (!b)
 922		return NULL;
 923
 924	if (c->alloc_callback)
 925		c->alloc_callback(b);
 926
 927	return b;
 928}
 929
 930/*
 931 * Free a buffer and wake other threads waiting for free buffers.
 932 */
 933static void __free_buffer_wake(struct dm_buffer *b)
 934{
 935	struct dm_bufio_client *c = b->c;
 936
 937	if (!c->need_reserved_buffers)
 938		free_buffer(b);
 939	else {
 940		list_add(&b->lru_list, &c->reserved_buffers);
 941		c->need_reserved_buffers--;
 942	}
 943
 944	wake_up(&c->free_buffer_wait);
 945}
 946
 947static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
 948					struct list_head *write_list)
 949{
 950	struct dm_buffer *b, *tmp;
 951
 952	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 953		BUG_ON(test_bit(B_READING, &b->state));
 954
 955		if (!test_bit(B_DIRTY, &b->state) &&
 956		    !test_bit(B_WRITING, &b->state)) {
 957			__relink_lru(b, LIST_CLEAN);
 958			continue;
 959		}
 960
 961		if (no_wait && test_bit(B_WRITING, &b->state))
 962			return;
 963
 964		__write_dirty_buffer(b, write_list);
 965		cond_resched();
 966	}
 967}
 968
 969/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970 * Check if we're over watermark.
 971 * If we are over threshold_buffers, start freeing buffers.
 972 * If we're over "limit_buffers", block until we get under the limit.
 973 */
 974static void __check_watermark(struct dm_bufio_client *c,
 975			      struct list_head *write_list)
 976{
 977	if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978		__write_dirty_buffers_async(c, 1, write_list);
 979}
 980
 981/*----------------------------------------------------------------
 982 * Getting a buffer
 983 *--------------------------------------------------------------*/
 984
 985static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 986				     enum new_flag nf, int *need_submit,
 987				     struct list_head *write_list)
 988{
 989	struct dm_buffer *b, *new_b = NULL;
 990
 991	*need_submit = 0;
 992
 993	b = __find(c, block);
 994	if (b)
 995		goto found_buffer;
 996
 997	if (nf == NF_GET)
 998		return NULL;
 999
1000	new_b = __alloc_buffer_wait(c, nf);
1001	if (!new_b)
1002		return NULL;
1003
1004	/*
1005	 * We've had a period where the mutex was unlocked, so need to
1006	 * recheck the buffer tree.
1007	 */
1008	b = __find(c, block);
1009	if (b) {
1010		__free_buffer_wake(new_b);
1011		goto found_buffer;
1012	}
1013
1014	__check_watermark(c, write_list);
1015
1016	b = new_b;
1017	b->hold_count = 1;
1018	b->read_error = 0;
1019	b->write_error = 0;
1020	__link_buffer(b, block, LIST_CLEAN);
1021
1022	if (nf == NF_FRESH) {
1023		b->state = 0;
1024		return b;
1025	}
1026
1027	b->state = 1 << B_READING;
1028	*need_submit = 1;
1029
1030	return b;
1031
1032found_buffer:
1033	if (nf == NF_PREFETCH)
1034		return NULL;
1035	/*
1036	 * Note: it is essential that we don't wait for the buffer to be
1037	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1038	 * dm_bufio_prefetch can be used in the driver request routine.
1039	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1040	 * the same buffer, it would deadlock if we waited.
1041	 */
1042	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1043		return NULL;
1044
1045	b->hold_count++;
1046	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1047		     test_bit(B_WRITING, &b->state));
1048	return b;
1049}
1050
1051/*
1052 * The endio routine for reading: set the error, clear the bit and wake up
1053 * anyone waiting on the buffer.
1054 */
1055static void read_endio(struct dm_buffer *b, blk_status_t status)
1056{
1057	b->read_error = status;
 
 
1058
1059	BUG_ON(!test_bit(B_READING, &b->state));
1060
1061	smp_mb__before_atomic();
1062	clear_bit(B_READING, &b->state);
1063	smp_mb__after_atomic();
1064
1065	wake_up_bit(&b->state, B_READING);
1066}
1067
1068/*
1069 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1070 * functions is similar except that dm_bufio_new doesn't read the
1071 * buffer from the disk (assuming that the caller overwrites all the data
1072 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1073 */
1074static void *new_read(struct dm_bufio_client *c, sector_t block,
1075		      enum new_flag nf, struct dm_buffer **bp)
1076{
1077	int need_submit;
1078	struct dm_buffer *b;
1079
1080	LIST_HEAD(write_list);
1081
1082	dm_bufio_lock(c);
1083	b = __bufio_new(c, block, nf, &need_submit, &write_list);
1084#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1085	if (b && b->hold_count == 1)
1086		buffer_record_stack(b);
1087#endif
1088	dm_bufio_unlock(c);
1089
1090	__flush_write_list(&write_list);
1091
1092	if (!b)
1093		return NULL;
1094
1095	if (need_submit)
1096		submit_io(b, REQ_OP_READ, read_endio);
1097
1098	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1099
1100	if (b->read_error) {
1101		int error = blk_status_to_errno(b->read_error);
1102
1103		dm_bufio_release(b);
1104
1105		return ERR_PTR(error);
1106	}
1107
1108	*bp = b;
1109
1110	return b->data;
1111}
1112
1113void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1114		   struct dm_buffer **bp)
1115{
1116	return new_read(c, block, NF_GET, bp);
1117}
1118EXPORT_SYMBOL_GPL(dm_bufio_get);
1119
1120void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1121		    struct dm_buffer **bp)
1122{
1123	BUG_ON(dm_bufio_in_request());
1124
1125	return new_read(c, block, NF_READ, bp);
1126}
1127EXPORT_SYMBOL_GPL(dm_bufio_read);
1128
1129void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1130		   struct dm_buffer **bp)
1131{
1132	BUG_ON(dm_bufio_in_request());
1133
1134	return new_read(c, block, NF_FRESH, bp);
1135}
1136EXPORT_SYMBOL_GPL(dm_bufio_new);
1137
1138void dm_bufio_prefetch(struct dm_bufio_client *c,
1139		       sector_t block, unsigned n_blocks)
1140{
1141	struct blk_plug plug;
1142
1143	LIST_HEAD(write_list);
1144
1145	BUG_ON(dm_bufio_in_request());
1146
1147	blk_start_plug(&plug);
1148	dm_bufio_lock(c);
1149
1150	for (; n_blocks--; block++) {
1151		int need_submit;
1152		struct dm_buffer *b;
1153		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1154				&write_list);
1155		if (unlikely(!list_empty(&write_list))) {
1156			dm_bufio_unlock(c);
1157			blk_finish_plug(&plug);
1158			__flush_write_list(&write_list);
1159			blk_start_plug(&plug);
1160			dm_bufio_lock(c);
1161		}
1162		if (unlikely(b != NULL)) {
1163			dm_bufio_unlock(c);
1164
1165			if (need_submit)
1166				submit_io(b, REQ_OP_READ, read_endio);
1167			dm_bufio_release(b);
1168
1169			cond_resched();
1170
1171			if (!n_blocks)
1172				goto flush_plug;
1173			dm_bufio_lock(c);
1174		}
1175	}
1176
1177	dm_bufio_unlock(c);
1178
1179flush_plug:
1180	blk_finish_plug(&plug);
1181}
1182EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1183
1184void dm_bufio_release(struct dm_buffer *b)
1185{
1186	struct dm_bufio_client *c = b->c;
1187
1188	dm_bufio_lock(c);
1189
1190	BUG_ON(!b->hold_count);
1191
1192	b->hold_count--;
1193	if (!b->hold_count) {
1194		wake_up(&c->free_buffer_wait);
1195
1196		/*
1197		 * If there were errors on the buffer, and the buffer is not
1198		 * to be written, free the buffer. There is no point in caching
1199		 * invalid buffer.
1200		 */
1201		if ((b->read_error || b->write_error) &&
1202		    !test_bit(B_READING, &b->state) &&
1203		    !test_bit(B_WRITING, &b->state) &&
1204		    !test_bit(B_DIRTY, &b->state)) {
1205			__unlink_buffer(b);
1206			__free_buffer_wake(b);
1207		}
1208	}
1209
1210	dm_bufio_unlock(c);
1211}
1212EXPORT_SYMBOL_GPL(dm_bufio_release);
1213
1214void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1215					unsigned start, unsigned end)
1216{
1217	struct dm_bufio_client *c = b->c;
1218
1219	BUG_ON(start >= end);
1220	BUG_ON(end > b->c->block_size);
1221
1222	dm_bufio_lock(c);
1223
1224	BUG_ON(test_bit(B_READING, &b->state));
1225
1226	if (!test_and_set_bit(B_DIRTY, &b->state)) {
1227		b->dirty_start = start;
1228		b->dirty_end = end;
1229		__relink_lru(b, LIST_DIRTY);
1230	} else {
1231		if (start < b->dirty_start)
1232			b->dirty_start = start;
1233		if (end > b->dirty_end)
1234			b->dirty_end = end;
1235	}
1236
1237	dm_bufio_unlock(c);
1238}
1239EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1240
1241void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1242{
1243	dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1244}
1245EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1246
1247void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1248{
1249	LIST_HEAD(write_list);
1250
1251	BUG_ON(dm_bufio_in_request());
1252
1253	dm_bufio_lock(c);
1254	__write_dirty_buffers_async(c, 0, &write_list);
1255	dm_bufio_unlock(c);
1256	__flush_write_list(&write_list);
1257}
1258EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1259
1260/*
1261 * For performance, it is essential that the buffers are written asynchronously
1262 * and simultaneously (so that the block layer can merge the writes) and then
1263 * waited upon.
1264 *
1265 * Finally, we flush hardware disk cache.
1266 */
1267int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1268{
1269	int a, f;
1270	unsigned long buffers_processed = 0;
1271	struct dm_buffer *b, *tmp;
1272
1273	LIST_HEAD(write_list);
1274
1275	dm_bufio_lock(c);
1276	__write_dirty_buffers_async(c, 0, &write_list);
1277	dm_bufio_unlock(c);
1278	__flush_write_list(&write_list);
1279	dm_bufio_lock(c);
1280
1281again:
1282	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1283		int dropped_lock = 0;
1284
1285		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1286			buffers_processed++;
1287
1288		BUG_ON(test_bit(B_READING, &b->state));
1289
1290		if (test_bit(B_WRITING, &b->state)) {
1291			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1292				dropped_lock = 1;
1293				b->hold_count++;
1294				dm_bufio_unlock(c);
1295				wait_on_bit_io(&b->state, B_WRITING,
1296					       TASK_UNINTERRUPTIBLE);
1297				dm_bufio_lock(c);
1298				b->hold_count--;
1299			} else
1300				wait_on_bit_io(&b->state, B_WRITING,
1301					       TASK_UNINTERRUPTIBLE);
1302		}
1303
1304		if (!test_bit(B_DIRTY, &b->state) &&
1305		    !test_bit(B_WRITING, &b->state))
1306			__relink_lru(b, LIST_CLEAN);
1307
1308		cond_resched();
1309
1310		/*
1311		 * If we dropped the lock, the list is no longer consistent,
1312		 * so we must restart the search.
1313		 *
1314		 * In the most common case, the buffer just processed is
1315		 * relinked to the clean list, so we won't loop scanning the
1316		 * same buffer again and again.
1317		 *
1318		 * This may livelock if there is another thread simultaneously
1319		 * dirtying buffers, so we count the number of buffers walked
1320		 * and if it exceeds the total number of buffers, it means that
1321		 * someone is doing some writes simultaneously with us.  In
1322		 * this case, stop, dropping the lock.
1323		 */
1324		if (dropped_lock)
1325			goto again;
1326	}
1327	wake_up(&c->free_buffer_wait);
1328	dm_bufio_unlock(c);
1329
1330	a = xchg(&c->async_write_error, 0);
1331	f = dm_bufio_issue_flush(c);
1332	if (a)
1333		return a;
1334
1335	return f;
1336}
1337EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1338
1339/*
1340 * Use dm-io to send an empty barrier to flush the device.
1341 */
1342int dm_bufio_issue_flush(struct dm_bufio_client *c)
1343{
1344	struct dm_io_request io_req = {
1345		.bi_op = REQ_OP_WRITE,
1346		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1347		.mem.type = DM_IO_KMEM,
1348		.mem.ptr.addr = NULL,
1349		.client = c->dm_io,
1350	};
1351	struct dm_io_region io_reg = {
1352		.bdev = c->bdev,
1353		.sector = 0,
1354		.count = 0,
1355	};
1356
1357	BUG_ON(dm_bufio_in_request());
1358
1359	return dm_io(&io_req, 1, &io_reg, NULL);
1360}
1361EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1362
1363/*
1364 * Use dm-io to send a discard request to flush the device.
1365 */
1366int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1367{
1368	struct dm_io_request io_req = {
1369		.bi_op = REQ_OP_DISCARD,
1370		.bi_op_flags = REQ_SYNC,
1371		.mem.type = DM_IO_KMEM,
1372		.mem.ptr.addr = NULL,
1373		.client = c->dm_io,
1374	};
1375	struct dm_io_region io_reg = {
1376		.bdev = c->bdev,
1377		.sector = block_to_sector(c, block),
1378		.count = block_to_sector(c, count),
1379	};
1380
1381	BUG_ON(dm_bufio_in_request());
1382
1383	return dm_io(&io_req, 1, &io_reg, NULL);
1384}
1385EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1386
1387/*
1388 * We first delete any other buffer that may be at that new location.
1389 *
1390 * Then, we write the buffer to the original location if it was dirty.
1391 *
1392 * Then, if we are the only one who is holding the buffer, relink the buffer
1393 * in the buffer tree for the new location.
1394 *
1395 * If there was someone else holding the buffer, we write it to the new
1396 * location but not relink it, because that other user needs to have the buffer
1397 * at the same place.
1398 */
1399void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1400{
1401	struct dm_bufio_client *c = b->c;
1402	struct dm_buffer *new;
1403
1404	BUG_ON(dm_bufio_in_request());
1405
1406	dm_bufio_lock(c);
1407
1408retry:
1409	new = __find(c, new_block);
1410	if (new) {
1411		if (new->hold_count) {
1412			__wait_for_free_buffer(c);
1413			goto retry;
1414		}
1415
1416		/*
1417		 * FIXME: Is there any point waiting for a write that's going
1418		 * to be overwritten in a bit?
1419		 */
1420		__make_buffer_clean(new);
1421		__unlink_buffer(new);
1422		__free_buffer_wake(new);
1423	}
1424
1425	BUG_ON(!b->hold_count);
1426	BUG_ON(test_bit(B_READING, &b->state));
1427
1428	__write_dirty_buffer(b, NULL);
1429	if (b->hold_count == 1) {
1430		wait_on_bit_io(&b->state, B_WRITING,
1431			       TASK_UNINTERRUPTIBLE);
1432		set_bit(B_DIRTY, &b->state);
1433		b->dirty_start = 0;
1434		b->dirty_end = c->block_size;
1435		__unlink_buffer(b);
1436		__link_buffer(b, new_block, LIST_DIRTY);
1437	} else {
1438		sector_t old_block;
1439		wait_on_bit_lock_io(&b->state, B_WRITING,
1440				    TASK_UNINTERRUPTIBLE);
1441		/*
1442		 * Relink buffer to "new_block" so that write_callback
1443		 * sees "new_block" as a block number.
1444		 * After the write, link the buffer back to old_block.
1445		 * All this must be done in bufio lock, so that block number
1446		 * change isn't visible to other threads.
1447		 */
1448		old_block = b->block;
1449		__unlink_buffer(b);
1450		__link_buffer(b, new_block, b->list_mode);
1451		submit_io(b, REQ_OP_WRITE, write_endio);
1452		wait_on_bit_io(&b->state, B_WRITING,
1453			       TASK_UNINTERRUPTIBLE);
1454		__unlink_buffer(b);
1455		__link_buffer(b, old_block, b->list_mode);
1456	}
1457
1458	dm_bufio_unlock(c);
1459	dm_bufio_release(b);
1460}
1461EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1462
1463static void forget_buffer_locked(struct dm_buffer *b)
1464{
1465	if (likely(!b->hold_count) && likely(!b->state)) {
1466		__unlink_buffer(b);
1467		__free_buffer_wake(b);
1468	}
1469}
1470
1471/*
1472 * Free the given buffer.
1473 *
1474 * This is just a hint, if the buffer is in use or dirty, this function
1475 * does nothing.
1476 */
1477void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1478{
1479	struct dm_buffer *b;
1480
1481	dm_bufio_lock(c);
1482
1483	b = __find(c, block);
1484	if (b)
1485		forget_buffer_locked(b);
1486
1487	dm_bufio_unlock(c);
1488}
1489EXPORT_SYMBOL_GPL(dm_bufio_forget);
1490
1491void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1492{
1493	struct dm_buffer *b;
1494	sector_t end_block = block + n_blocks;
1495
1496	while (block < end_block) {
1497		dm_bufio_lock(c);
1498
1499		b = __find_next(c, block);
1500		if (b) {
1501			block = b->block + 1;
1502			forget_buffer_locked(b);
1503		}
1504
1505		dm_bufio_unlock(c);
1506
1507		if (!b)
1508			break;
1509	}
1510
 
1511}
1512EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1513
1514void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1515{
1516	c->minimum_buffers = n;
1517}
1518EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1519
1520unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1521{
1522	return c->block_size;
1523}
1524EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1525
1526sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1527{
1528	sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1529	if (likely(c->sectors_per_block_bits >= 0))
1530		s >>= c->sectors_per_block_bits;
1531	else
1532		sector_div(s, c->block_size >> SECTOR_SHIFT);
1533	return s;
1534}
1535EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1536
1537sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1538{
1539	return b->block;
1540}
1541EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1542
1543void *dm_bufio_get_block_data(struct dm_buffer *b)
1544{
1545	return b->data;
1546}
1547EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1548
1549void *dm_bufio_get_aux_data(struct dm_buffer *b)
1550{
1551	return b + 1;
1552}
1553EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1554
1555struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1556{
1557	return b->c;
1558}
1559EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1560
1561static void drop_buffers(struct dm_bufio_client *c)
1562{
1563	struct dm_buffer *b;
1564	int i;
1565	bool warned = false;
1566
1567	BUG_ON(dm_bufio_in_request());
1568
1569	/*
1570	 * An optimization so that the buffers are not written one-by-one.
1571	 */
1572	dm_bufio_write_dirty_buffers_async(c);
1573
1574	dm_bufio_lock(c);
1575
1576	while ((b = __get_unclaimed_buffer(c)))
1577		__free_buffer_wake(b);
1578
1579	for (i = 0; i < LIST_SIZE; i++)
1580		list_for_each_entry(b, &c->lru[i], lru_list) {
1581			WARN_ON(!warned);
1582			warned = true;
1583			DMERR("leaked buffer %llx, hold count %u, list %d",
1584			      (unsigned long long)b->block, b->hold_count, i);
1585#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1586			stack_trace_print(b->stack_entries, b->stack_len, 1);
1587			/* mark unclaimed to avoid BUG_ON below */
1588			b->hold_count = 0;
1589#endif
1590		}
1591
1592#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1593	while ((b = __get_unclaimed_buffer(c)))
1594		__free_buffer_wake(b);
1595#endif
1596
1597	for (i = 0; i < LIST_SIZE; i++)
1598		BUG_ON(!list_empty(&c->lru[i]));
1599
1600	dm_bufio_unlock(c);
1601}
1602
1603/*
1604 * We may not be able to evict this buffer if IO pending or the client
1605 * is still using it.  Caller is expected to know buffer is too old.
1606 *
1607 * And if GFP_NOFS is used, we must not do any I/O because we hold
1608 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1609 * rerouted to different bufio client.
1610 */
1611static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1612{
1613	if (!(gfp & __GFP_FS)) {
1614		if (test_bit(B_READING, &b->state) ||
1615		    test_bit(B_WRITING, &b->state) ||
1616		    test_bit(B_DIRTY, &b->state))
1617			return false;
1618	}
1619
1620	if (b->hold_count)
1621		return false;
1622
1623	__make_buffer_clean(b);
1624	__unlink_buffer(b);
1625	__free_buffer_wake(b);
1626
1627	return true;
1628}
1629
1630static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1631{
1632	unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1633	if (likely(c->sectors_per_block_bits >= 0))
1634		retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1635	else
1636		retain_bytes /= c->block_size;
1637	return retain_bytes;
1638}
1639
1640static void __scan(struct dm_bufio_client *c)
 
1641{
1642	int l;
1643	struct dm_buffer *b, *tmp;
1644	unsigned long freed = 0;
1645	unsigned long count = c->n_buffers[LIST_CLEAN] +
1646			      c->n_buffers[LIST_DIRTY];
1647	unsigned long retain_target = get_retain_buffers(c);
1648
1649	for (l = 0; l < LIST_SIZE; l++) {
1650		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1651			if (count - freed <= retain_target)
1652				atomic_long_set(&c->need_shrink, 0);
1653			if (!atomic_long_read(&c->need_shrink))
1654				return;
1655			if (__try_evict_buffer(b, GFP_KERNEL)) {
1656				atomic_long_dec(&c->need_shrink);
1657				freed++;
1658			}
1659			cond_resched();
 
1660		}
1661	}
 
1662}
1663
1664static void shrink_work(struct work_struct *w)
1665{
1666	struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1667
1668	dm_bufio_lock(c);
1669	__scan(c);
1670	dm_bufio_unlock(c);
1671}
1672
1673static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1674{
1675	struct dm_bufio_client *c;
 
1676
1677	c = container_of(shrink, struct dm_bufio_client, shrinker);
1678	atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1679	queue_work(dm_bufio_wq, &c->shrink_work);
 
 
1680
1681	return sc->nr_to_scan;
 
 
1682}
1683
1684static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 
1685{
1686	struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1687	unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1688			      READ_ONCE(c->n_buffers[LIST_DIRTY]);
1689	unsigned long retain_target = get_retain_buffers(c);
1690	unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1691
1692	if (unlikely(count < retain_target))
1693		count = 0;
1694	else
1695		count -= retain_target;
1696
1697	if (unlikely(count < queued_for_cleanup))
1698		count = 0;
1699	else
1700		count -= queued_for_cleanup;
 
1701
 
 
1702	return count;
1703}
1704
1705/*
1706 * Create the buffering interface
1707 */
1708struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1709					       unsigned reserved_buffers, unsigned aux_size,
1710					       void (*alloc_callback)(struct dm_buffer *),
1711					       void (*write_callback)(struct dm_buffer *))
1712{
1713	int r;
1714	struct dm_bufio_client *c;
1715	unsigned i;
1716	char slab_name[27];
1717
1718	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1719		DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1720		r = -EINVAL;
1721		goto bad_client;
1722	}
1723
1724	c = kzalloc(sizeof(*c), GFP_KERNEL);
1725	if (!c) {
1726		r = -ENOMEM;
1727		goto bad_client;
1728	}
1729	c->buffer_tree = RB_ROOT;
1730
1731	c->bdev = bdev;
1732	c->block_size = block_size;
1733	if (is_power_of_2(block_size))
1734		c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1735	else
1736		c->sectors_per_block_bits = -1;
 
1737
 
1738	c->alloc_callback = alloc_callback;
1739	c->write_callback = write_callback;
1740
1741	for (i = 0; i < LIST_SIZE; i++) {
1742		INIT_LIST_HEAD(&c->lru[i]);
1743		c->n_buffers[i] = 0;
1744	}
1745
1746	mutex_init(&c->lock);
1747	INIT_LIST_HEAD(&c->reserved_buffers);
1748	c->need_reserved_buffers = reserved_buffers;
1749
1750	dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1751
1752	init_waitqueue_head(&c->free_buffer_wait);
1753	c->async_write_error = 0;
1754
1755	c->dm_io = dm_io_client_create();
1756	if (IS_ERR(c->dm_io)) {
1757		r = PTR_ERR(c->dm_io);
1758		goto bad_dm_io;
1759	}
1760
1761	if (block_size <= KMALLOC_MAX_SIZE &&
1762	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1763		unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1764		snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1765		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1766						  SLAB_RECLAIM_ACCOUNT, NULL);
1767		if (!c->slab_cache) {
1768			r = -ENOMEM;
1769			goto bad;
 
 
 
 
 
 
 
 
 
 
 
1770		}
1771	}
1772	if (aux_size)
1773		snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1774	else
1775		snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1776	c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1777					   0, SLAB_RECLAIM_ACCOUNT, NULL);
1778	if (!c->slab_buffer) {
1779		r = -ENOMEM;
1780		goto bad;
1781	}
1782
1783	while (c->need_reserved_buffers) {
1784		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1785
1786		if (!b) {
1787			r = -ENOMEM;
1788			goto bad;
1789		}
1790		__free_buffer_wake(b);
1791	}
1792
1793	INIT_WORK(&c->shrink_work, shrink_work);
1794	atomic_long_set(&c->need_shrink, 0);
1795
1796	c->shrinker.count_objects = dm_bufio_shrink_count;
1797	c->shrinker.scan_objects = dm_bufio_shrink_scan;
1798	c->shrinker.seeks = 1;
1799	c->shrinker.batch = 0;
1800	r = register_shrinker(&c->shrinker);
1801	if (r)
1802		goto bad;
1803
1804	mutex_lock(&dm_bufio_clients_lock);
1805	dm_bufio_client_count++;
1806	list_add(&c->client_list, &dm_bufio_all_clients);
1807	__cache_size_refresh();
1808	mutex_unlock(&dm_bufio_clients_lock);
1809
 
 
 
 
 
 
1810	return c;
1811
1812bad:
 
1813	while (!list_empty(&c->reserved_buffers)) {
1814		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1815						 struct dm_buffer, lru_list);
1816		list_del(&b->lru_list);
1817		free_buffer(b);
1818	}
1819	kmem_cache_destroy(c->slab_cache);
1820	kmem_cache_destroy(c->slab_buffer);
1821	dm_io_client_destroy(c->dm_io);
1822bad_dm_io:
1823	mutex_destroy(&c->lock);
1824	kfree(c);
1825bad_client:
1826	return ERR_PTR(r);
1827}
1828EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1829
1830/*
1831 * Free the buffering interface.
1832 * It is required that there are no references on any buffers.
1833 */
1834void dm_bufio_client_destroy(struct dm_bufio_client *c)
1835{
1836	unsigned i;
1837
1838	drop_buffers(c);
1839
1840	unregister_shrinker(&c->shrinker);
1841	flush_work(&c->shrink_work);
1842
1843	mutex_lock(&dm_bufio_clients_lock);
1844
1845	list_del(&c->client_list);
1846	dm_bufio_client_count--;
1847	__cache_size_refresh();
1848
1849	mutex_unlock(&dm_bufio_clients_lock);
1850
1851	BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1852	BUG_ON(c->need_reserved_buffers);
1853
1854	while (!list_empty(&c->reserved_buffers)) {
1855		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1856						 struct dm_buffer, lru_list);
1857		list_del(&b->lru_list);
1858		free_buffer(b);
1859	}
1860
1861	for (i = 0; i < LIST_SIZE; i++)
1862		if (c->n_buffers[i])
1863			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1864
1865	for (i = 0; i < LIST_SIZE; i++)
1866		BUG_ON(c->n_buffers[i]);
1867
1868	kmem_cache_destroy(c->slab_cache);
1869	kmem_cache_destroy(c->slab_buffer);
1870	dm_io_client_destroy(c->dm_io);
1871	mutex_destroy(&c->lock);
1872	kfree(c);
1873}
1874EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1875
1876void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1877{
1878	c->start = start;
1879}
1880EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1881
1882static unsigned get_max_age_hz(void)
1883{
1884	unsigned max_age = READ_ONCE(dm_bufio_max_age);
1885
1886	if (max_age > UINT_MAX / HZ)
1887		max_age = UINT_MAX / HZ;
1888
1889	return max_age * HZ;
1890}
1891
1892static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1893{
1894	return time_after_eq(jiffies, b->last_accessed + age_hz);
1895}
1896
1897static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1898{
1899	struct dm_buffer *b, *tmp;
1900	unsigned long retain_target = get_retain_buffers(c);
1901	unsigned long count;
1902	LIST_HEAD(write_list);
1903
1904	dm_bufio_lock(c);
1905
1906	__check_watermark(c, &write_list);
1907	if (unlikely(!list_empty(&write_list))) {
1908		dm_bufio_unlock(c);
1909		__flush_write_list(&write_list);
1910		dm_bufio_lock(c);
1911	}
1912
1913	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1914	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1915		if (count <= retain_target)
1916			break;
1917
1918		if (!older_than(b, age_hz))
1919			break;
1920
1921		if (__try_evict_buffer(b, 0))
1922			count--;
1923
1924		cond_resched();
1925	}
1926
1927	dm_bufio_unlock(c);
1928}
1929
1930static void do_global_cleanup(struct work_struct *w)
1931{
1932	struct dm_bufio_client *locked_client = NULL;
1933	struct dm_bufio_client *current_client;
1934	struct dm_buffer *b;
1935	unsigned spinlock_hold_count;
1936	unsigned long threshold = dm_bufio_cache_size -
1937		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1938	unsigned long loops = global_num * 2;
1939
1940	mutex_lock(&dm_bufio_clients_lock);
1941
1942	while (1) {
1943		cond_resched();
1944
1945		spin_lock(&global_spinlock);
1946		if (unlikely(dm_bufio_current_allocated <= threshold))
1947			break;
1948
1949		spinlock_hold_count = 0;
1950get_next:
1951		if (!loops--)
1952			break;
1953		if (unlikely(list_empty(&global_queue)))
1954			break;
1955		b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1956
1957		if (b->accessed) {
1958			b->accessed = 0;
1959			list_move(&b->global_list, &global_queue);
1960			if (likely(++spinlock_hold_count < 16))
1961				goto get_next;
1962			spin_unlock(&global_spinlock);
1963			continue;
1964		}
1965
1966		current_client = b->c;
1967		if (unlikely(current_client != locked_client)) {
1968			if (locked_client)
1969				dm_bufio_unlock(locked_client);
1970
1971			if (!dm_bufio_trylock(current_client)) {
1972				spin_unlock(&global_spinlock);
1973				dm_bufio_lock(current_client);
1974				locked_client = current_client;
1975				continue;
1976			}
1977
1978			locked_client = current_client;
1979		}
1980
1981		spin_unlock(&global_spinlock);
1982
1983		if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1984			spin_lock(&global_spinlock);
1985			list_move(&b->global_list, &global_queue);
1986			spin_unlock(&global_spinlock);
1987		}
1988	}
1989
1990	spin_unlock(&global_spinlock);
1991
1992	if (locked_client)
1993		dm_bufio_unlock(locked_client);
1994
1995	mutex_unlock(&dm_bufio_clients_lock);
1996}
1997
1998static void cleanup_old_buffers(void)
1999{
2000	unsigned long max_age_hz = get_max_age_hz();
2001	struct dm_bufio_client *c;
2002
2003	mutex_lock(&dm_bufio_clients_lock);
2004
2005	__cache_size_refresh();
2006
2007	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2008		__evict_old_buffers(c, max_age_hz);
2009
2010	mutex_unlock(&dm_bufio_clients_lock);
2011}
2012
 
 
 
2013static void work_fn(struct work_struct *w)
2014{
2015	cleanup_old_buffers();
2016
2017	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2018			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2019}
2020
2021/*----------------------------------------------------------------
2022 * Module setup
2023 *--------------------------------------------------------------*/
2024
2025/*
2026 * This is called only once for the whole dm_bufio module.
2027 * It initializes memory limit.
2028 */
2029static int __init dm_bufio_init(void)
2030{
2031	__u64 mem;
2032
2033	dm_bufio_allocated_kmem_cache = 0;
2034	dm_bufio_allocated_get_free_pages = 0;
2035	dm_bufio_allocated_vmalloc = 0;
2036	dm_bufio_current_allocated = 0;
2037
2038	mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2039			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
 
 
 
2040
2041	if (mem > ULONG_MAX)
2042		mem = ULONG_MAX;
2043
2044#ifdef CONFIG_MMU
2045	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2046		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
 
 
 
 
2047#endif
2048
2049	dm_bufio_default_cache_size = mem;
2050
2051	mutex_lock(&dm_bufio_clients_lock);
2052	__cache_size_refresh();
2053	mutex_unlock(&dm_bufio_clients_lock);
2054
2055	dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2056	if (!dm_bufio_wq)
2057		return -ENOMEM;
2058
2059	INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2060	INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2061	queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2062			   DM_BUFIO_WORK_TIMER_SECS * HZ);
2063
2064	return 0;
2065}
2066
2067/*
2068 * This is called once when unloading the dm_bufio module.
2069 */
2070static void __exit dm_bufio_exit(void)
2071{
2072	int bug = 0;
 
2073
2074	cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2075	flush_workqueue(dm_bufio_wq);
2076	destroy_workqueue(dm_bufio_wq);
2077
 
 
 
 
 
 
2078	if (dm_bufio_client_count) {
2079		DMCRIT("%s: dm_bufio_client_count leaked: %d",
2080			__func__, dm_bufio_client_count);
2081		bug = 1;
2082	}
2083
2084	if (dm_bufio_current_allocated) {
2085		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2086			__func__, dm_bufio_current_allocated);
2087		bug = 1;
2088	}
2089
2090	if (dm_bufio_allocated_get_free_pages) {
2091		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2092		       __func__, dm_bufio_allocated_get_free_pages);
2093		bug = 1;
2094	}
2095
2096	if (dm_bufio_allocated_vmalloc) {
2097		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2098		       __func__, dm_bufio_allocated_vmalloc);
2099		bug = 1;
2100	}
2101
2102	BUG_ON(bug);
2103}
2104
2105module_init(dm_bufio_init)
2106module_exit(dm_bufio_exit)
2107
2108module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2109MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2110
2111module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2112MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2113
2114module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2115MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2116
2117module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2118MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2119
2120module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2121MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2122
2123module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2124MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2125
2126module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2127MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2128
2129module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2130MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2131
2132MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2133MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2134MODULE_LICENSE("GPL");
v4.6
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bufio.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
 
  14#include <linux/jiffies.h>
  15#include <linux/vmalloc.h>
  16#include <linux/shrinker.h>
  17#include <linux/module.h>
  18#include <linux/rbtree.h>
  19#include <linux/stacktrace.h>
  20
  21#define DM_MSG_PREFIX "bufio"
  22
  23/*
  24 * Memory management policy:
  25 *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  26 *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  27 *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  28 *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  29 *	dirty buffers.
  30 */
  31#define DM_BUFIO_MIN_BUFFERS		8
  32
  33#define DM_BUFIO_MEMORY_PERCENT		2
  34#define DM_BUFIO_VMALLOC_PERCENT	25
  35#define DM_BUFIO_WRITEBACK_PERCENT	75
 
  36
  37/*
  38 * Check buffer ages in this interval (seconds)
  39 */
  40#define DM_BUFIO_WORK_TIMER_SECS	30
  41
  42/*
  43 * Free buffers when they are older than this (seconds)
  44 */
  45#define DM_BUFIO_DEFAULT_AGE_SECS	300
  46
  47/*
  48 * The nr of bytes of cached data to keep around.
  49 */
  50#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  51
  52/*
  53 * The number of bvec entries that are embedded directly in the buffer.
  54 * If the chunk size is larger, dm-io is used to do the io.
  55 */
  56#define DM_BUFIO_INLINE_VECS		16
  57
  58/*
  59 * Don't try to use kmem_cache_alloc for blocks larger than this.
  60 * For explanation, see alloc_buffer_data below.
  61 */
  62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
  63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
  64
  65/*
  66 * dm_buffer->list_mode
  67 */
  68#define LIST_CLEAN	0
  69#define LIST_DIRTY	1
  70#define LIST_SIZE	2
  71
  72/*
  73 * Linking of buffers:
  74 *	All buffers are linked to cache_hash with their hash_list field.
  75 *
  76 *	Clean buffers that are not being written (B_WRITING not set)
  77 *	are linked to lru[LIST_CLEAN] with their lru_list field.
  78 *
  79 *	Dirty and clean buffers that are being written are linked to
  80 *	lru[LIST_DIRTY] with their lru_list field. When the write
  81 *	finishes, the buffer cannot be relinked immediately (because we
  82 *	are in an interrupt context and relinking requires process
  83 *	context), so some clean-not-writing buffers can be held on
  84 *	dirty_lru too.  They are later added to lru in the process
  85 *	context.
  86 */
  87struct dm_bufio_client {
  88	struct mutex lock;
  89
  90	struct list_head lru[LIST_SIZE];
  91	unsigned long n_buffers[LIST_SIZE];
  92
  93	struct block_device *bdev;
  94	unsigned block_size;
  95	unsigned char sectors_per_block_bits;
  96	unsigned char pages_per_block_bits;
  97	unsigned char blocks_per_page_bits;
  98	unsigned aux_size;
  99	void (*alloc_callback)(struct dm_buffer *);
 100	void (*write_callback)(struct dm_buffer *);
 101
 
 
 102	struct dm_io_client *dm_io;
 103
 104	struct list_head reserved_buffers;
 105	unsigned need_reserved_buffers;
 106
 107	unsigned minimum_buffers;
 108
 109	struct rb_root buffer_tree;
 110	wait_queue_head_t free_buffer_wait;
 111
 
 
 112	int async_write_error;
 113
 114	struct list_head client_list;
 
 115	struct shrinker shrinker;
 
 
 116};
 117
 118/*
 119 * Buffer state bits.
 120 */
 121#define B_READING	0
 122#define B_WRITING	1
 123#define B_DIRTY		2
 124
 125/*
 126 * Describes how the block was allocated:
 127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 128 * See the comment at alloc_buffer_data.
 129 */
 130enum data_mode {
 131	DATA_MODE_SLAB = 0,
 132	DATA_MODE_GET_FREE_PAGES = 1,
 133	DATA_MODE_VMALLOC = 2,
 134	DATA_MODE_LIMIT = 3
 135};
 136
 137struct dm_buffer {
 138	struct rb_node node;
 139	struct list_head lru_list;
 
 140	sector_t block;
 141	void *data;
 142	enum data_mode data_mode;
 143	unsigned char list_mode;		/* LIST_* */
 
 
 
 144	unsigned hold_count;
 145	int read_error;
 146	int write_error;
 147	unsigned long state;
 148	unsigned long last_accessed;
 
 
 
 
 149	struct dm_bufio_client *c;
 150	struct list_head write_list;
 151	struct bio bio;
 152	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
 153#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 154#define MAX_STACK 10
 155	struct stack_trace stack_trace;
 156	unsigned long stack_entries[MAX_STACK];
 157#endif
 158};
 159
 160/*----------------------------------------------------------------*/
 161
 162static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
 163static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
 164
 165static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
 166{
 167	unsigned ret = c->blocks_per_page_bits - 1;
 168
 169	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
 170
 171	return ret;
 172}
 173
 174#define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
 175#define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
 176
 177#define dm_bufio_in_request()	(!!current->bio_list)
 178
 179static void dm_bufio_lock(struct dm_bufio_client *c)
 180{
 181	mutex_lock_nested(&c->lock, dm_bufio_in_request());
 182}
 183
 184static int dm_bufio_trylock(struct dm_bufio_client *c)
 185{
 186	return mutex_trylock(&c->lock);
 187}
 188
 189static void dm_bufio_unlock(struct dm_bufio_client *c)
 190{
 191	mutex_unlock(&c->lock);
 192}
 193
 194/*
 195 * FIXME Move to sched.h?
 196 */
 197#ifdef CONFIG_PREEMPT_VOLUNTARY
 198#  define dm_bufio_cond_resched()		\
 199do {						\
 200	if (unlikely(need_resched()))		\
 201		_cond_resched();		\
 202} while (0)
 203#else
 204#  define dm_bufio_cond_resched()                do { } while (0)
 205#endif
 206
 207/*----------------------------------------------------------------*/
 208
 209/*
 210 * Default cache size: available memory divided by the ratio.
 211 */
 212static unsigned long dm_bufio_default_cache_size;
 213
 214/*
 215 * Total cache size set by the user.
 216 */
 217static unsigned long dm_bufio_cache_size;
 218
 219/*
 220 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 221 * at any time.  If it disagrees, the user has changed cache size.
 222 */
 223static unsigned long dm_bufio_cache_size_latch;
 224
 225static DEFINE_SPINLOCK(param_spinlock);
 
 
 
 
 226
 227/*
 228 * Buffers are freed after this timeout
 229 */
 230static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 231static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 232
 233static unsigned long dm_bufio_peak_allocated;
 234static unsigned long dm_bufio_allocated_kmem_cache;
 235static unsigned long dm_bufio_allocated_get_free_pages;
 236static unsigned long dm_bufio_allocated_vmalloc;
 237static unsigned long dm_bufio_current_allocated;
 238
 239/*----------------------------------------------------------------*/
 240
 241/*
 242 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
 243 */
 244static unsigned long dm_bufio_cache_size_per_client;
 245
 246/*
 247 * The current number of clients.
 248 */
 249static int dm_bufio_client_count;
 250
 251/*
 252 * The list of all clients.
 253 */
 254static LIST_HEAD(dm_bufio_all_clients);
 255
 256/*
 257 * This mutex protects dm_bufio_cache_size_latch,
 258 * dm_bufio_cache_size_per_client and dm_bufio_client_count
 259 */
 260static DEFINE_MUTEX(dm_bufio_clients_lock);
 261
 
 
 
 
 
 262#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 263static void buffer_record_stack(struct dm_buffer *b)
 264{
 265	b->stack_trace.nr_entries = 0;
 266	b->stack_trace.max_entries = MAX_STACK;
 267	b->stack_trace.entries = b->stack_entries;
 268	b->stack_trace.skip = 2;
 269	save_stack_trace(&b->stack_trace);
 270}
 271#endif
 272
 273/*----------------------------------------------------------------
 274 * A red/black tree acts as an index for all the buffers.
 275 *--------------------------------------------------------------*/
 276static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 277{
 278	struct rb_node *n = c->buffer_tree.rb_node;
 279	struct dm_buffer *b;
 280
 281	while (n) {
 282		b = container_of(n, struct dm_buffer, node);
 283
 284		if (b->block == block)
 285			return b;
 286
 287		n = (b->block < block) ? n->rb_left : n->rb_right;
 288	}
 289
 290	return NULL;
 291}
 292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
 294{
 295	struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
 296	struct dm_buffer *found;
 297
 298	while (*new) {
 299		found = container_of(*new, struct dm_buffer, node);
 300
 301		if (found->block == b->block) {
 302			BUG_ON(found != b);
 303			return;
 304		}
 305
 306		parent = *new;
 307		new = (found->block < b->block) ?
 308			&((*new)->rb_left) : &((*new)->rb_right);
 309	}
 310
 311	rb_link_node(&b->node, parent, new);
 312	rb_insert_color(&b->node, &c->buffer_tree);
 313}
 314
 315static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
 316{
 317	rb_erase(&b->node, &c->buffer_tree);
 318}
 319
 320/*----------------------------------------------------------------*/
 321
 322static void adjust_total_allocated(enum data_mode data_mode, long diff)
 323{
 
 
 
 324	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 325		&dm_bufio_allocated_kmem_cache,
 326		&dm_bufio_allocated_get_free_pages,
 327		&dm_bufio_allocated_vmalloc,
 328	};
 329
 330	spin_lock(&param_spinlock);
 
 
 
 
 
 331
 332	*class_ptr[data_mode] += diff;
 333
 334	dm_bufio_current_allocated += diff;
 335
 336	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 337		dm_bufio_peak_allocated = dm_bufio_current_allocated;
 338
 339	spin_unlock(&param_spinlock);
 
 
 
 
 
 
 
 
 
 
 
 
 340}
 341
 342/*
 343 * Change the number of clients and recalculate per-client limit.
 344 */
 345static void __cache_size_refresh(void)
 346{
 347	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 348	BUG_ON(dm_bufio_client_count < 0);
 349
 350	dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
 351
 352	/*
 353	 * Use default if set to 0 and report the actual cache size used.
 354	 */
 355	if (!dm_bufio_cache_size_latch) {
 356		(void)cmpxchg(&dm_bufio_cache_size, 0,
 357			      dm_bufio_default_cache_size);
 358		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 359	}
 360
 361	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
 362					 (dm_bufio_client_count ? : 1);
 363}
 364
 365/*
 366 * Allocating buffer data.
 367 *
 368 * Small buffers are allocated with kmem_cache, to use space optimally.
 369 *
 370 * For large buffers, we choose between get_free_pages and vmalloc.
 371 * Each has advantages and disadvantages.
 372 *
 373 * __get_free_pages can randomly fail if the memory is fragmented.
 374 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 375 * as low as 128M) so using it for caching is not appropriate.
 376 *
 377 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 378 * won't have a fatal effect here, but it just causes flushes of some other
 379 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 380 * always fails (i.e. order >= MAX_ORDER).
 381 *
 382 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 383 * initial reserve allocation, so there's no risk of wasting all vmalloc
 384 * space.
 385 */
 386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 387			       enum data_mode *data_mode)
 388{
 389	unsigned noio_flag;
 390	void *ptr;
 391
 392	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
 393		*data_mode = DATA_MODE_SLAB;
 394		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
 395	}
 396
 397	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
 398	    gfp_mask & __GFP_NORETRY) {
 399		*data_mode = DATA_MODE_GET_FREE_PAGES;
 400		return (void *)__get_free_pages(gfp_mask,
 401						c->pages_per_block_bits);
 402	}
 403
 404	*data_mode = DATA_MODE_VMALLOC;
 405
 406	/*
 407	 * __vmalloc allocates the data pages and auxiliary structures with
 408	 * gfp_flags that were specified, but pagetables are always allocated
 409	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
 410	 *
 411	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
 412	 * all allocations done by this process (including pagetables) are done
 413	 * as if GFP_NOIO was specified.
 414	 */
 
 
 
 415
 416	if (gfp_mask & __GFP_NORETRY)
 417		noio_flag = memalloc_noio_save();
 418
 419	ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
 420
 421	if (gfp_mask & __GFP_NORETRY)
 422		memalloc_noio_restore(noio_flag);
 
 
 423
 424	return ptr;
 425}
 426
 427/*
 428 * Free buffer's data.
 429 */
 430static void free_buffer_data(struct dm_bufio_client *c,
 431			     void *data, enum data_mode data_mode)
 432{
 433	switch (data_mode) {
 434	case DATA_MODE_SLAB:
 435		kmem_cache_free(DM_BUFIO_CACHE(c), data);
 436		break;
 437
 438	case DATA_MODE_GET_FREE_PAGES:
 439		free_pages((unsigned long)data, c->pages_per_block_bits);
 
 440		break;
 441
 442	case DATA_MODE_VMALLOC:
 443		vfree(data);
 444		break;
 445
 446	default:
 447		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 448		       data_mode);
 449		BUG();
 450	}
 451}
 452
 453/*
 454 * Allocate buffer and its data.
 455 */
 456static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 457{
 458	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
 459				      gfp_mask);
 460
 461	if (!b)
 462		return NULL;
 463
 464	b->c = c;
 465
 466	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 467	if (!b->data) {
 468		kfree(b);
 469		return NULL;
 470	}
 471
 472	adjust_total_allocated(b->data_mode, (long)c->block_size);
 473
 474#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 475	memset(&b->stack_trace, 0, sizeof(b->stack_trace));
 476#endif
 477	return b;
 478}
 479
 480/*
 481 * Free buffer and its data.
 482 */
 483static void free_buffer(struct dm_buffer *b)
 484{
 485	struct dm_bufio_client *c = b->c;
 486
 487	adjust_total_allocated(b->data_mode, -(long)c->block_size);
 488
 489	free_buffer_data(c, b->data, b->data_mode);
 490	kfree(b);
 491}
 492
 493/*
 494 * Link buffer to the hash list and clean or dirty queue.
 495 */
 496static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 497{
 498	struct dm_bufio_client *c = b->c;
 499
 500	c->n_buffers[dirty]++;
 501	b->block = block;
 502	b->list_mode = dirty;
 503	list_add(&b->lru_list, &c->lru[dirty]);
 504	__insert(b->c, b);
 505	b->last_accessed = jiffies;
 
 
 506}
 507
 508/*
 509 * Unlink buffer from the hash list and dirty or clean queue.
 510 */
 511static void __unlink_buffer(struct dm_buffer *b)
 512{
 513	struct dm_bufio_client *c = b->c;
 514
 515	BUG_ON(!c->n_buffers[b->list_mode]);
 516
 517	c->n_buffers[b->list_mode]--;
 518	__remove(b->c, b);
 519	list_del(&b->lru_list);
 
 
 520}
 521
 522/*
 523 * Place the buffer to the head of dirty or clean LRU queue.
 524 */
 525static void __relink_lru(struct dm_buffer *b, int dirty)
 526{
 527	struct dm_bufio_client *c = b->c;
 528
 
 
 529	BUG_ON(!c->n_buffers[b->list_mode]);
 530
 531	c->n_buffers[b->list_mode]--;
 532	c->n_buffers[dirty]++;
 533	b->list_mode = dirty;
 534	list_move(&b->lru_list, &c->lru[dirty]);
 535	b->last_accessed = jiffies;
 536}
 537
 538/*----------------------------------------------------------------
 539 * Submit I/O on the buffer.
 540 *
 541 * Bio interface is faster but it has some problems:
 542 *	the vector list is limited (increasing this limit increases
 543 *	memory-consumption per buffer, so it is not viable);
 544 *
 545 *	the memory must be direct-mapped, not vmalloced;
 546 *
 547 *	the I/O driver can reject requests spuriously if it thinks that
 548 *	the requests are too big for the device or if they cross a
 549 *	controller-defined memory boundary.
 550 *
 551 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 552 * it is not vmalloced, try using the bio interface.
 553 *
 554 * If the buffer is big, if it is vmalloced or if the underlying device
 555 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 556 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 557 * shortcomings.
 558 *--------------------------------------------------------------*/
 559
 560/*
 561 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 562 * that the request was handled directly with bio interface.
 563 */
 564static void dmio_complete(unsigned long error, void *context)
 565{
 566	struct dm_buffer *b = context;
 567
 568	b->bio.bi_error = error ? -EIO : 0;
 569	b->bio.bi_end_io(&b->bio);
 570}
 571
 572static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
 573		     bio_end_io_t *end_io)
 574{
 575	int r;
 576	struct dm_io_request io_req = {
 577		.bi_rw = rw,
 
 578		.notify.fn = dmio_complete,
 579		.notify.context = b,
 580		.client = b->c->dm_io,
 581	};
 582	struct dm_io_region region = {
 583		.bdev = b->c->bdev,
 584		.sector = block << b->c->sectors_per_block_bits,
 585		.count = b->c->block_size >> SECTOR_SHIFT,
 586	};
 587
 588	if (b->data_mode != DATA_MODE_VMALLOC) {
 589		io_req.mem.type = DM_IO_KMEM;
 590		io_req.mem.ptr.addr = b->data;
 591	} else {
 592		io_req.mem.type = DM_IO_VMA;
 593		io_req.mem.ptr.vma = b->data;
 594	}
 595
 596	b->bio.bi_end_io = end_io;
 597
 598	r = dm_io(&io_req, 1, &region, NULL);
 599	if (r) {
 600		b->bio.bi_error = r;
 601		end_io(&b->bio);
 602	}
 603}
 604
 605static void inline_endio(struct bio *bio)
 606{
 607	bio_end_io_t *end_fn = bio->bi_private;
 608	int error = bio->bi_error;
 609
 610	/*
 611	 * Reset the bio to free any attached resources
 612	 * (e.g. bio integrity profiles).
 613	 */
 614	bio_reset(bio);
 615
 616	bio->bi_error = error;
 617	end_fn(bio);
 618}
 619
 620static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
 621			   bio_end_io_t *end_io)
 622{
 
 623	char *ptr;
 624	int len;
 625
 626	bio_init(&b->bio);
 627	b->bio.bi_io_vec = b->bio_vec;
 628	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
 629	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
 630	b->bio.bi_bdev = b->c->bdev;
 631	b->bio.bi_end_io = inline_endio;
 632	/*
 633	 * Use of .bi_private isn't a problem here because
 634	 * the dm_buffer's inline bio is local to bufio.
 635	 */
 636	b->bio.bi_private = end_io;
 637
 638	/*
 639	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
 640	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
 641	 */
 642	ptr = b->data;
 643	len = b->c->block_size;
 644
 645	if (len >= PAGE_SIZE)
 646		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
 647	else
 648		BUG_ON((unsigned long)ptr & (len - 1));
 649
 650	do {
 651		if (!bio_add_page(&b->bio, virt_to_page(ptr),
 652				  len < PAGE_SIZE ? len : PAGE_SIZE,
 653				  offset_in_page(ptr))) {
 654			BUG_ON(b->c->block_size <= PAGE_SIZE);
 655			use_dmio(b, rw, block, end_io);
 656			return;
 657		}
 658
 659		len -= PAGE_SIZE;
 660		ptr += PAGE_SIZE;
 661	} while (len > 0);
 662
 663	submit_bio(rw, &b->bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 664}
 665
 666static void submit_io(struct dm_buffer *b, int rw, sector_t block,
 667		      bio_end_io_t *end_io)
 668{
 669	if (rw == WRITE && b->c->write_callback)
 670		b->c->write_callback(b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671
 672	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
 673	    b->data_mode != DATA_MODE_VMALLOC)
 674		use_inline_bio(b, rw, block, end_io);
 
 
 
 675	else
 676		use_dmio(b, rw, block, end_io);
 677}
 678
 679/*----------------------------------------------------------------
 680 * Writing dirty buffers
 681 *--------------------------------------------------------------*/
 682
 683/*
 684 * The endio routine for write.
 685 *
 686 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 687 * it.
 688 */
 689static void write_endio(struct bio *bio)
 690{
 691	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
 
 692
 693	b->write_error = bio->bi_error;
 694	if (unlikely(bio->bi_error)) {
 695		struct dm_bufio_client *c = b->c;
 696		int error = bio->bi_error;
 697		(void)cmpxchg(&c->async_write_error, 0, error);
 698	}
 699
 700	BUG_ON(!test_bit(B_WRITING, &b->state));
 701
 702	smp_mb__before_atomic();
 703	clear_bit(B_WRITING, &b->state);
 704	smp_mb__after_atomic();
 705
 706	wake_up_bit(&b->state, B_WRITING);
 707}
 708
 709/*
 710 * Initiate a write on a dirty buffer, but don't wait for it.
 711 *
 712 * - If the buffer is not dirty, exit.
 713 * - If there some previous write going on, wait for it to finish (we can't
 714 *   have two writes on the same buffer simultaneously).
 715 * - Submit our write and don't wait on it. We set B_WRITING indicating
 716 *   that there is a write in progress.
 717 */
 718static void __write_dirty_buffer(struct dm_buffer *b,
 719				 struct list_head *write_list)
 720{
 721	if (!test_bit(B_DIRTY, &b->state))
 722		return;
 723
 724	clear_bit(B_DIRTY, &b->state);
 725	wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 726
 
 
 
 727	if (!write_list)
 728		submit_io(b, WRITE, b->block, write_endio);
 729	else
 730		list_add_tail(&b->write_list, write_list);
 731}
 732
 733static void __flush_write_list(struct list_head *write_list)
 734{
 735	struct blk_plug plug;
 736	blk_start_plug(&plug);
 737	while (!list_empty(write_list)) {
 738		struct dm_buffer *b =
 739			list_entry(write_list->next, struct dm_buffer, write_list);
 740		list_del(&b->write_list);
 741		submit_io(b, WRITE, b->block, write_endio);
 742		dm_bufio_cond_resched();
 743	}
 744	blk_finish_plug(&plug);
 745}
 746
 747/*
 748 * Wait until any activity on the buffer finishes.  Possibly write the
 749 * buffer if it is dirty.  When this function finishes, there is no I/O
 750 * running on the buffer and the buffer is not dirty.
 751 */
 752static void __make_buffer_clean(struct dm_buffer *b)
 753{
 754	BUG_ON(b->hold_count);
 755
 756	if (!b->state)	/* fast case */
 757		return;
 758
 759	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 760	__write_dirty_buffer(b, NULL);
 761	wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 762}
 763
 764/*
 765 * Find some buffer that is not held by anybody, clean it, unlink it and
 766 * return it.
 767 */
 768static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 769{
 770	struct dm_buffer *b;
 771
 772	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 773		BUG_ON(test_bit(B_WRITING, &b->state));
 774		BUG_ON(test_bit(B_DIRTY, &b->state));
 775
 776		if (!b->hold_count) {
 777			__make_buffer_clean(b);
 778			__unlink_buffer(b);
 779			return b;
 780		}
 781		dm_bufio_cond_resched();
 782	}
 783
 784	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 785		BUG_ON(test_bit(B_READING, &b->state));
 786
 787		if (!b->hold_count) {
 788			__make_buffer_clean(b);
 789			__unlink_buffer(b);
 790			return b;
 791		}
 792		dm_bufio_cond_resched();
 793	}
 794
 795	return NULL;
 796}
 797
 798/*
 799 * Wait until some other threads free some buffer or release hold count on
 800 * some buffer.
 801 *
 802 * This function is entered with c->lock held, drops it and regains it
 803 * before exiting.
 804 */
 805static void __wait_for_free_buffer(struct dm_bufio_client *c)
 806{
 807	DECLARE_WAITQUEUE(wait, current);
 808
 809	add_wait_queue(&c->free_buffer_wait, &wait);
 810	set_task_state(current, TASK_UNINTERRUPTIBLE);
 811	dm_bufio_unlock(c);
 812
 813	io_schedule();
 814
 815	remove_wait_queue(&c->free_buffer_wait, &wait);
 816
 817	dm_bufio_lock(c);
 818}
 819
 820enum new_flag {
 821	NF_FRESH = 0,
 822	NF_READ = 1,
 823	NF_GET = 2,
 824	NF_PREFETCH = 3
 825};
 826
 827/*
 828 * Allocate a new buffer. If the allocation is not possible, wait until
 829 * some other thread frees a buffer.
 830 *
 831 * May drop the lock and regain it.
 832 */
 833static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 834{
 835	struct dm_buffer *b;
 
 836
 837	/*
 838	 * dm-bufio is resistant to allocation failures (it just keeps
 839	 * one buffer reserved in cases all the allocations fail).
 840	 * So set flags to not try too hard:
 841	 *	GFP_NOIO: don't recurse into the I/O layer
 
 842	 *	__GFP_NORETRY: don't retry and rather return failure
 843	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 844	 *	__GFP_NOWARN: don't print a warning in case of failure
 845	 *
 846	 * For debugging, if we set the cache size to 1, no new buffers will
 847	 * be allocated.
 848	 */
 849	while (1) {
 850		if (dm_bufio_cache_size_latch != 1) {
 851			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 852			if (b)
 853				return b;
 854		}
 855
 856		if (nf == NF_PREFETCH)
 857			return NULL;
 858
 
 
 
 
 
 
 
 
 
 859		if (!list_empty(&c->reserved_buffers)) {
 860			b = list_entry(c->reserved_buffers.next,
 861				       struct dm_buffer, lru_list);
 862			list_del(&b->lru_list);
 863			c->need_reserved_buffers++;
 864
 865			return b;
 866		}
 867
 868		b = __get_unclaimed_buffer(c);
 869		if (b)
 870			return b;
 871
 872		__wait_for_free_buffer(c);
 873	}
 874}
 875
 876static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 877{
 878	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 879
 880	if (!b)
 881		return NULL;
 882
 883	if (c->alloc_callback)
 884		c->alloc_callback(b);
 885
 886	return b;
 887}
 888
 889/*
 890 * Free a buffer and wake other threads waiting for free buffers.
 891 */
 892static void __free_buffer_wake(struct dm_buffer *b)
 893{
 894	struct dm_bufio_client *c = b->c;
 895
 896	if (!c->need_reserved_buffers)
 897		free_buffer(b);
 898	else {
 899		list_add(&b->lru_list, &c->reserved_buffers);
 900		c->need_reserved_buffers--;
 901	}
 902
 903	wake_up(&c->free_buffer_wait);
 904}
 905
 906static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
 907					struct list_head *write_list)
 908{
 909	struct dm_buffer *b, *tmp;
 910
 911	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 912		BUG_ON(test_bit(B_READING, &b->state));
 913
 914		if (!test_bit(B_DIRTY, &b->state) &&
 915		    !test_bit(B_WRITING, &b->state)) {
 916			__relink_lru(b, LIST_CLEAN);
 917			continue;
 918		}
 919
 920		if (no_wait && test_bit(B_WRITING, &b->state))
 921			return;
 922
 923		__write_dirty_buffer(b, write_list);
 924		dm_bufio_cond_resched();
 925	}
 926}
 927
 928/*
 929 * Get writeback threshold and buffer limit for a given client.
 930 */
 931static void __get_memory_limit(struct dm_bufio_client *c,
 932			       unsigned long *threshold_buffers,
 933			       unsigned long *limit_buffers)
 934{
 935	unsigned long buffers;
 936
 937	if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
 938		mutex_lock(&dm_bufio_clients_lock);
 939		__cache_size_refresh();
 940		mutex_unlock(&dm_bufio_clients_lock);
 941	}
 942
 943	buffers = dm_bufio_cache_size_per_client >>
 944		  (c->sectors_per_block_bits + SECTOR_SHIFT);
 945
 946	if (buffers < c->minimum_buffers)
 947		buffers = c->minimum_buffers;
 948
 949	*limit_buffers = buffers;
 950	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
 951}
 952
 953/*
 954 * Check if we're over watermark.
 955 * If we are over threshold_buffers, start freeing buffers.
 956 * If we're over "limit_buffers", block until we get under the limit.
 957 */
 958static void __check_watermark(struct dm_bufio_client *c,
 959			      struct list_head *write_list)
 960{
 961	unsigned long threshold_buffers, limit_buffers;
 962
 963	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
 964
 965	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
 966	       limit_buffers) {
 967
 968		struct dm_buffer *b = __get_unclaimed_buffer(c);
 969
 970		if (!b)
 971			return;
 972
 973		__free_buffer_wake(b);
 974		dm_bufio_cond_resched();
 975	}
 976
 977	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
 978		__write_dirty_buffers_async(c, 1, write_list);
 979}
 980
 981/*----------------------------------------------------------------
 982 * Getting a buffer
 983 *--------------------------------------------------------------*/
 984
 985static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 986				     enum new_flag nf, int *need_submit,
 987				     struct list_head *write_list)
 988{
 989	struct dm_buffer *b, *new_b = NULL;
 990
 991	*need_submit = 0;
 992
 993	b = __find(c, block);
 994	if (b)
 995		goto found_buffer;
 996
 997	if (nf == NF_GET)
 998		return NULL;
 999
1000	new_b = __alloc_buffer_wait(c, nf);
1001	if (!new_b)
1002		return NULL;
1003
1004	/*
1005	 * We've had a period where the mutex was unlocked, so need to
1006	 * recheck the hash table.
1007	 */
1008	b = __find(c, block);
1009	if (b) {
1010		__free_buffer_wake(new_b);
1011		goto found_buffer;
1012	}
1013
1014	__check_watermark(c, write_list);
1015
1016	b = new_b;
1017	b->hold_count = 1;
1018	b->read_error = 0;
1019	b->write_error = 0;
1020	__link_buffer(b, block, LIST_CLEAN);
1021
1022	if (nf == NF_FRESH) {
1023		b->state = 0;
1024		return b;
1025	}
1026
1027	b->state = 1 << B_READING;
1028	*need_submit = 1;
1029
1030	return b;
1031
1032found_buffer:
1033	if (nf == NF_PREFETCH)
1034		return NULL;
1035	/*
1036	 * Note: it is essential that we don't wait for the buffer to be
1037	 * read if dm_bufio_get function is used. Both dm_bufio_get and
1038	 * dm_bufio_prefetch can be used in the driver request routine.
1039	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1040	 * the same buffer, it would deadlock if we waited.
1041	 */
1042	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1043		return NULL;
1044
1045	b->hold_count++;
1046	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1047		     test_bit(B_WRITING, &b->state));
1048	return b;
1049}
1050
1051/*
1052 * The endio routine for reading: set the error, clear the bit and wake up
1053 * anyone waiting on the buffer.
1054 */
1055static void read_endio(struct bio *bio)
1056{
1057	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1058
1059	b->read_error = bio->bi_error;
1060
1061	BUG_ON(!test_bit(B_READING, &b->state));
1062
1063	smp_mb__before_atomic();
1064	clear_bit(B_READING, &b->state);
1065	smp_mb__after_atomic();
1066
1067	wake_up_bit(&b->state, B_READING);
1068}
1069
1070/*
1071 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1072 * functions is similar except that dm_bufio_new doesn't read the
1073 * buffer from the disk (assuming that the caller overwrites all the data
1074 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1075 */
1076static void *new_read(struct dm_bufio_client *c, sector_t block,
1077		      enum new_flag nf, struct dm_buffer **bp)
1078{
1079	int need_submit;
1080	struct dm_buffer *b;
1081
1082	LIST_HEAD(write_list);
1083
1084	dm_bufio_lock(c);
1085	b = __bufio_new(c, block, nf, &need_submit, &write_list);
1086#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1087	if (b && b->hold_count == 1)
1088		buffer_record_stack(b);
1089#endif
1090	dm_bufio_unlock(c);
1091
1092	__flush_write_list(&write_list);
1093
1094	if (!b)
1095		return NULL;
1096
1097	if (need_submit)
1098		submit_io(b, READ, b->block, read_endio);
1099
1100	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1101
1102	if (b->read_error) {
1103		int error = b->read_error;
1104
1105		dm_bufio_release(b);
1106
1107		return ERR_PTR(error);
1108	}
1109
1110	*bp = b;
1111
1112	return b->data;
1113}
1114
1115void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1116		   struct dm_buffer **bp)
1117{
1118	return new_read(c, block, NF_GET, bp);
1119}
1120EXPORT_SYMBOL_GPL(dm_bufio_get);
1121
1122void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1123		    struct dm_buffer **bp)
1124{
1125	BUG_ON(dm_bufio_in_request());
1126
1127	return new_read(c, block, NF_READ, bp);
1128}
1129EXPORT_SYMBOL_GPL(dm_bufio_read);
1130
1131void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1132		   struct dm_buffer **bp)
1133{
1134	BUG_ON(dm_bufio_in_request());
1135
1136	return new_read(c, block, NF_FRESH, bp);
1137}
1138EXPORT_SYMBOL_GPL(dm_bufio_new);
1139
1140void dm_bufio_prefetch(struct dm_bufio_client *c,
1141		       sector_t block, unsigned n_blocks)
1142{
1143	struct blk_plug plug;
1144
1145	LIST_HEAD(write_list);
1146
1147	BUG_ON(dm_bufio_in_request());
1148
1149	blk_start_plug(&plug);
1150	dm_bufio_lock(c);
1151
1152	for (; n_blocks--; block++) {
1153		int need_submit;
1154		struct dm_buffer *b;
1155		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1156				&write_list);
1157		if (unlikely(!list_empty(&write_list))) {
1158			dm_bufio_unlock(c);
1159			blk_finish_plug(&plug);
1160			__flush_write_list(&write_list);
1161			blk_start_plug(&plug);
1162			dm_bufio_lock(c);
1163		}
1164		if (unlikely(b != NULL)) {
1165			dm_bufio_unlock(c);
1166
1167			if (need_submit)
1168				submit_io(b, READ, b->block, read_endio);
1169			dm_bufio_release(b);
1170
1171			dm_bufio_cond_resched();
1172
1173			if (!n_blocks)
1174				goto flush_plug;
1175			dm_bufio_lock(c);
1176		}
1177	}
1178
1179	dm_bufio_unlock(c);
1180
1181flush_plug:
1182	blk_finish_plug(&plug);
1183}
1184EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1185
1186void dm_bufio_release(struct dm_buffer *b)
1187{
1188	struct dm_bufio_client *c = b->c;
1189
1190	dm_bufio_lock(c);
1191
1192	BUG_ON(!b->hold_count);
1193
1194	b->hold_count--;
1195	if (!b->hold_count) {
1196		wake_up(&c->free_buffer_wait);
1197
1198		/*
1199		 * If there were errors on the buffer, and the buffer is not
1200		 * to be written, free the buffer. There is no point in caching
1201		 * invalid buffer.
1202		 */
1203		if ((b->read_error || b->write_error) &&
1204		    !test_bit(B_READING, &b->state) &&
1205		    !test_bit(B_WRITING, &b->state) &&
1206		    !test_bit(B_DIRTY, &b->state)) {
1207			__unlink_buffer(b);
1208			__free_buffer_wake(b);
1209		}
1210	}
1211
1212	dm_bufio_unlock(c);
1213}
1214EXPORT_SYMBOL_GPL(dm_bufio_release);
1215
1216void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
 
1217{
1218	struct dm_bufio_client *c = b->c;
1219
 
 
 
1220	dm_bufio_lock(c);
1221
1222	BUG_ON(test_bit(B_READING, &b->state));
1223
1224	if (!test_and_set_bit(B_DIRTY, &b->state))
 
 
1225		__relink_lru(b, LIST_DIRTY);
 
 
 
 
 
 
1226
1227	dm_bufio_unlock(c);
1228}
 
 
 
 
 
 
1229EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1230
1231void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1232{
1233	LIST_HEAD(write_list);
1234
1235	BUG_ON(dm_bufio_in_request());
1236
1237	dm_bufio_lock(c);
1238	__write_dirty_buffers_async(c, 0, &write_list);
1239	dm_bufio_unlock(c);
1240	__flush_write_list(&write_list);
1241}
1242EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1243
1244/*
1245 * For performance, it is essential that the buffers are written asynchronously
1246 * and simultaneously (so that the block layer can merge the writes) and then
1247 * waited upon.
1248 *
1249 * Finally, we flush hardware disk cache.
1250 */
1251int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1252{
1253	int a, f;
1254	unsigned long buffers_processed = 0;
1255	struct dm_buffer *b, *tmp;
1256
1257	LIST_HEAD(write_list);
1258
1259	dm_bufio_lock(c);
1260	__write_dirty_buffers_async(c, 0, &write_list);
1261	dm_bufio_unlock(c);
1262	__flush_write_list(&write_list);
1263	dm_bufio_lock(c);
1264
1265again:
1266	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1267		int dropped_lock = 0;
1268
1269		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1270			buffers_processed++;
1271
1272		BUG_ON(test_bit(B_READING, &b->state));
1273
1274		if (test_bit(B_WRITING, &b->state)) {
1275			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1276				dropped_lock = 1;
1277				b->hold_count++;
1278				dm_bufio_unlock(c);
1279				wait_on_bit_io(&b->state, B_WRITING,
1280					       TASK_UNINTERRUPTIBLE);
1281				dm_bufio_lock(c);
1282				b->hold_count--;
1283			} else
1284				wait_on_bit_io(&b->state, B_WRITING,
1285					       TASK_UNINTERRUPTIBLE);
1286		}
1287
1288		if (!test_bit(B_DIRTY, &b->state) &&
1289		    !test_bit(B_WRITING, &b->state))
1290			__relink_lru(b, LIST_CLEAN);
1291
1292		dm_bufio_cond_resched();
1293
1294		/*
1295		 * If we dropped the lock, the list is no longer consistent,
1296		 * so we must restart the search.
1297		 *
1298		 * In the most common case, the buffer just processed is
1299		 * relinked to the clean list, so we won't loop scanning the
1300		 * same buffer again and again.
1301		 *
1302		 * This may livelock if there is another thread simultaneously
1303		 * dirtying buffers, so we count the number of buffers walked
1304		 * and if it exceeds the total number of buffers, it means that
1305		 * someone is doing some writes simultaneously with us.  In
1306		 * this case, stop, dropping the lock.
1307		 */
1308		if (dropped_lock)
1309			goto again;
1310	}
1311	wake_up(&c->free_buffer_wait);
1312	dm_bufio_unlock(c);
1313
1314	a = xchg(&c->async_write_error, 0);
1315	f = dm_bufio_issue_flush(c);
1316	if (a)
1317		return a;
1318
1319	return f;
1320}
1321EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1322
1323/*
1324 * Use dm-io to send and empty barrier flush the device.
1325 */
1326int dm_bufio_issue_flush(struct dm_bufio_client *c)
1327{
1328	struct dm_io_request io_req = {
1329		.bi_rw = WRITE_FLUSH,
 
1330		.mem.type = DM_IO_KMEM,
1331		.mem.ptr.addr = NULL,
1332		.client = c->dm_io,
1333	};
1334	struct dm_io_region io_reg = {
1335		.bdev = c->bdev,
1336		.sector = 0,
1337		.count = 0,
1338	};
1339
1340	BUG_ON(dm_bufio_in_request());
1341
1342	return dm_io(&io_req, 1, &io_reg, NULL);
1343}
1344EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1345
1346/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347 * We first delete any other buffer that may be at that new location.
1348 *
1349 * Then, we write the buffer to the original location if it was dirty.
1350 *
1351 * Then, if we are the only one who is holding the buffer, relink the buffer
1352 * in the hash queue for the new location.
1353 *
1354 * If there was someone else holding the buffer, we write it to the new
1355 * location but not relink it, because that other user needs to have the buffer
1356 * at the same place.
1357 */
1358void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1359{
1360	struct dm_bufio_client *c = b->c;
1361	struct dm_buffer *new;
1362
1363	BUG_ON(dm_bufio_in_request());
1364
1365	dm_bufio_lock(c);
1366
1367retry:
1368	new = __find(c, new_block);
1369	if (new) {
1370		if (new->hold_count) {
1371			__wait_for_free_buffer(c);
1372			goto retry;
1373		}
1374
1375		/*
1376		 * FIXME: Is there any point waiting for a write that's going
1377		 * to be overwritten in a bit?
1378		 */
1379		__make_buffer_clean(new);
1380		__unlink_buffer(new);
1381		__free_buffer_wake(new);
1382	}
1383
1384	BUG_ON(!b->hold_count);
1385	BUG_ON(test_bit(B_READING, &b->state));
1386
1387	__write_dirty_buffer(b, NULL);
1388	if (b->hold_count == 1) {
1389		wait_on_bit_io(&b->state, B_WRITING,
1390			       TASK_UNINTERRUPTIBLE);
1391		set_bit(B_DIRTY, &b->state);
 
 
1392		__unlink_buffer(b);
1393		__link_buffer(b, new_block, LIST_DIRTY);
1394	} else {
1395		sector_t old_block;
1396		wait_on_bit_lock_io(&b->state, B_WRITING,
1397				    TASK_UNINTERRUPTIBLE);
1398		/*
1399		 * Relink buffer to "new_block" so that write_callback
1400		 * sees "new_block" as a block number.
1401		 * After the write, link the buffer back to old_block.
1402		 * All this must be done in bufio lock, so that block number
1403		 * change isn't visible to other threads.
1404		 */
1405		old_block = b->block;
1406		__unlink_buffer(b);
1407		__link_buffer(b, new_block, b->list_mode);
1408		submit_io(b, WRITE, new_block, write_endio);
1409		wait_on_bit_io(&b->state, B_WRITING,
1410			       TASK_UNINTERRUPTIBLE);
1411		__unlink_buffer(b);
1412		__link_buffer(b, old_block, b->list_mode);
1413	}
1414
1415	dm_bufio_unlock(c);
1416	dm_bufio_release(b);
1417}
1418EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1419
 
 
 
 
 
 
 
 
1420/*
1421 * Free the given buffer.
1422 *
1423 * This is just a hint, if the buffer is in use or dirty, this function
1424 * does nothing.
1425 */
1426void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1427{
1428	struct dm_buffer *b;
1429
1430	dm_bufio_lock(c);
1431
1432	b = __find(c, block);
1433	if (b && likely(!b->hold_count) && likely(!b->state)) {
1434		__unlink_buffer(b);
1435		__free_buffer_wake(b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436	}
1437
1438	dm_bufio_unlock(c);
1439}
1440EXPORT_SYMBOL(dm_bufio_forget);
1441
1442void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1443{
1444	c->minimum_buffers = n;
1445}
1446EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1447
1448unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1449{
1450	return c->block_size;
1451}
1452EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1453
1454sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1455{
1456	return i_size_read(c->bdev->bd_inode) >>
1457			   (SECTOR_SHIFT + c->sectors_per_block_bits);
 
 
 
 
1458}
1459EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1460
1461sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1462{
1463	return b->block;
1464}
1465EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1466
1467void *dm_bufio_get_block_data(struct dm_buffer *b)
1468{
1469	return b->data;
1470}
1471EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1472
1473void *dm_bufio_get_aux_data(struct dm_buffer *b)
1474{
1475	return b + 1;
1476}
1477EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1478
1479struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1480{
1481	return b->c;
1482}
1483EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1484
1485static void drop_buffers(struct dm_bufio_client *c)
1486{
1487	struct dm_buffer *b;
1488	int i;
1489	bool warned = false;
1490
1491	BUG_ON(dm_bufio_in_request());
1492
1493	/*
1494	 * An optimization so that the buffers are not written one-by-one.
1495	 */
1496	dm_bufio_write_dirty_buffers_async(c);
1497
1498	dm_bufio_lock(c);
1499
1500	while ((b = __get_unclaimed_buffer(c)))
1501		__free_buffer_wake(b);
1502
1503	for (i = 0; i < LIST_SIZE; i++)
1504		list_for_each_entry(b, &c->lru[i], lru_list) {
1505			WARN_ON(!warned);
1506			warned = true;
1507			DMERR("leaked buffer %llx, hold count %u, list %d",
1508			      (unsigned long long)b->block, b->hold_count, i);
1509#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1510			print_stack_trace(&b->stack_trace, 1);
1511			b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
 
1512#endif
1513		}
1514
1515#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1516	while ((b = __get_unclaimed_buffer(c)))
1517		__free_buffer_wake(b);
1518#endif
1519
1520	for (i = 0; i < LIST_SIZE; i++)
1521		BUG_ON(!list_empty(&c->lru[i]));
1522
1523	dm_bufio_unlock(c);
1524}
1525
1526/*
1527 * We may not be able to evict this buffer if IO pending or the client
1528 * is still using it.  Caller is expected to know buffer is too old.
1529 *
1530 * And if GFP_NOFS is used, we must not do any I/O because we hold
1531 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1532 * rerouted to different bufio client.
1533 */
1534static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1535{
1536	if (!(gfp & __GFP_FS)) {
1537		if (test_bit(B_READING, &b->state) ||
1538		    test_bit(B_WRITING, &b->state) ||
1539		    test_bit(B_DIRTY, &b->state))
1540			return false;
1541	}
1542
1543	if (b->hold_count)
1544		return false;
1545
1546	__make_buffer_clean(b);
1547	__unlink_buffer(b);
1548	__free_buffer_wake(b);
1549
1550	return true;
1551}
1552
1553static unsigned get_retain_buffers(struct dm_bufio_client *c)
1554{
1555        unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1556        return retain_bytes / c->block_size;
 
 
 
 
1557}
1558
1559static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1560			    gfp_t gfp_mask)
1561{
1562	int l;
1563	struct dm_buffer *b, *tmp;
1564	unsigned long freed = 0;
1565	unsigned long count = nr_to_scan;
1566	unsigned retain_target = get_retain_buffers(c);
 
1567
1568	for (l = 0; l < LIST_SIZE; l++) {
1569		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1570			if (__try_evict_buffer(b, gfp_mask))
 
 
 
 
 
1571				freed++;
1572			if (!--nr_to_scan || ((count - freed) <= retain_target))
1573				return freed;
1574			dm_bufio_cond_resched();
1575		}
1576	}
1577	return freed;
1578}
1579
1580static unsigned long
1581dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 
 
 
 
 
 
 
 
1582{
1583	struct dm_bufio_client *c;
1584	unsigned long freed;
1585
1586	c = container_of(shrink, struct dm_bufio_client, shrinker);
1587	if (sc->gfp_mask & __GFP_FS)
1588		dm_bufio_lock(c);
1589	else if (!dm_bufio_trylock(c))
1590		return SHRINK_STOP;
1591
1592	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1593	dm_bufio_unlock(c);
1594	return freed;
1595}
1596
1597static unsigned long
1598dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1599{
1600	struct dm_bufio_client *c;
1601	unsigned long count;
 
 
 
 
 
 
 
 
1602
1603	c = container_of(shrink, struct dm_bufio_client, shrinker);
1604	if (sc->gfp_mask & __GFP_FS)
1605		dm_bufio_lock(c);
1606	else if (!dm_bufio_trylock(c))
1607		return 0;
1608
1609	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1610	dm_bufio_unlock(c);
1611	return count;
1612}
1613
1614/*
1615 * Create the buffering interface
1616 */
1617struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1618					       unsigned reserved_buffers, unsigned aux_size,
1619					       void (*alloc_callback)(struct dm_buffer *),
1620					       void (*write_callback)(struct dm_buffer *))
1621{
1622	int r;
1623	struct dm_bufio_client *c;
1624	unsigned i;
 
1625
1626	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1627	       (block_size & (block_size - 1)));
 
 
 
1628
1629	c = kzalloc(sizeof(*c), GFP_KERNEL);
1630	if (!c) {
1631		r = -ENOMEM;
1632		goto bad_client;
1633	}
1634	c->buffer_tree = RB_ROOT;
1635
1636	c->bdev = bdev;
1637	c->block_size = block_size;
1638	c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1639	c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1640				  __ffs(block_size) - PAGE_SHIFT : 0;
1641	c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1642				  PAGE_SHIFT - __ffs(block_size) : 0);
1643
1644	c->aux_size = aux_size;
1645	c->alloc_callback = alloc_callback;
1646	c->write_callback = write_callback;
1647
1648	for (i = 0; i < LIST_SIZE; i++) {
1649		INIT_LIST_HEAD(&c->lru[i]);
1650		c->n_buffers[i] = 0;
1651	}
1652
1653	mutex_init(&c->lock);
1654	INIT_LIST_HEAD(&c->reserved_buffers);
1655	c->need_reserved_buffers = reserved_buffers;
1656
1657	c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1658
1659	init_waitqueue_head(&c->free_buffer_wait);
1660	c->async_write_error = 0;
1661
1662	c->dm_io = dm_io_client_create();
1663	if (IS_ERR(c->dm_io)) {
1664		r = PTR_ERR(c->dm_io);
1665		goto bad_dm_io;
1666	}
1667
1668	mutex_lock(&dm_bufio_clients_lock);
1669	if (c->blocks_per_page_bits) {
1670		if (!DM_BUFIO_CACHE_NAME(c)) {
1671			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1672			if (!DM_BUFIO_CACHE_NAME(c)) {
1673				r = -ENOMEM;
1674				mutex_unlock(&dm_bufio_clients_lock);
1675				goto bad_cache;
1676			}
1677		}
1678
1679		if (!DM_BUFIO_CACHE(c)) {
1680			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1681							      c->block_size,
1682							      c->block_size, 0, NULL);
1683			if (!DM_BUFIO_CACHE(c)) {
1684				r = -ENOMEM;
1685				mutex_unlock(&dm_bufio_clients_lock);
1686				goto bad_cache;
1687			}
1688		}
1689	}
1690	mutex_unlock(&dm_bufio_clients_lock);
 
 
 
 
 
 
 
 
 
1691
1692	while (c->need_reserved_buffers) {
1693		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1694
1695		if (!b) {
1696			r = -ENOMEM;
1697			goto bad_buffer;
1698		}
1699		__free_buffer_wake(b);
1700	}
1701
 
 
 
 
 
 
 
 
 
 
 
1702	mutex_lock(&dm_bufio_clients_lock);
1703	dm_bufio_client_count++;
1704	list_add(&c->client_list, &dm_bufio_all_clients);
1705	__cache_size_refresh();
1706	mutex_unlock(&dm_bufio_clients_lock);
1707
1708	c->shrinker.count_objects = dm_bufio_shrink_count;
1709	c->shrinker.scan_objects = dm_bufio_shrink_scan;
1710	c->shrinker.seeks = 1;
1711	c->shrinker.batch = 0;
1712	register_shrinker(&c->shrinker);
1713
1714	return c;
1715
1716bad_buffer:
1717bad_cache:
1718	while (!list_empty(&c->reserved_buffers)) {
1719		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1720						 struct dm_buffer, lru_list);
1721		list_del(&b->lru_list);
1722		free_buffer(b);
1723	}
 
 
1724	dm_io_client_destroy(c->dm_io);
1725bad_dm_io:
 
1726	kfree(c);
1727bad_client:
1728	return ERR_PTR(r);
1729}
1730EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1731
1732/*
1733 * Free the buffering interface.
1734 * It is required that there are no references on any buffers.
1735 */
1736void dm_bufio_client_destroy(struct dm_bufio_client *c)
1737{
1738	unsigned i;
1739
1740	drop_buffers(c);
1741
1742	unregister_shrinker(&c->shrinker);
 
1743
1744	mutex_lock(&dm_bufio_clients_lock);
1745
1746	list_del(&c->client_list);
1747	dm_bufio_client_count--;
1748	__cache_size_refresh();
1749
1750	mutex_unlock(&dm_bufio_clients_lock);
1751
1752	BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1753	BUG_ON(c->need_reserved_buffers);
1754
1755	while (!list_empty(&c->reserved_buffers)) {
1756		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1757						 struct dm_buffer, lru_list);
1758		list_del(&b->lru_list);
1759		free_buffer(b);
1760	}
1761
1762	for (i = 0; i < LIST_SIZE; i++)
1763		if (c->n_buffers[i])
1764			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1765
1766	for (i = 0; i < LIST_SIZE; i++)
1767		BUG_ON(c->n_buffers[i]);
1768
 
 
1769	dm_io_client_destroy(c->dm_io);
 
1770	kfree(c);
1771}
1772EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1773
 
 
 
 
 
 
1774static unsigned get_max_age_hz(void)
1775{
1776	unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1777
1778	if (max_age > UINT_MAX / HZ)
1779		max_age = UINT_MAX / HZ;
1780
1781	return max_age * HZ;
1782}
1783
1784static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1785{
1786	return time_after_eq(jiffies, b->last_accessed + age_hz);
1787}
1788
1789static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1790{
1791	struct dm_buffer *b, *tmp;
1792	unsigned retain_target = get_retain_buffers(c);
1793	unsigned count;
 
1794
1795	dm_bufio_lock(c);
1796
 
 
 
 
 
 
 
1797	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1798	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1799		if (count <= retain_target)
1800			break;
1801
1802		if (!older_than(b, age_hz))
1803			break;
1804
1805		if (__try_evict_buffer(b, 0))
1806			count--;
1807
1808		dm_bufio_cond_resched();
1809	}
1810
1811	dm_bufio_unlock(c);
1812}
1813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1814static void cleanup_old_buffers(void)
1815{
1816	unsigned long max_age_hz = get_max_age_hz();
1817	struct dm_bufio_client *c;
1818
1819	mutex_lock(&dm_bufio_clients_lock);
1820
 
 
1821	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1822		__evict_old_buffers(c, max_age_hz);
1823
1824	mutex_unlock(&dm_bufio_clients_lock);
1825}
1826
1827static struct workqueue_struct *dm_bufio_wq;
1828static struct delayed_work dm_bufio_work;
1829
1830static void work_fn(struct work_struct *w)
1831{
1832	cleanup_old_buffers();
1833
1834	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1835			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1836}
1837
1838/*----------------------------------------------------------------
1839 * Module setup
1840 *--------------------------------------------------------------*/
1841
1842/*
1843 * This is called only once for the whole dm_bufio module.
1844 * It initializes memory limit.
1845 */
1846static int __init dm_bufio_init(void)
1847{
1848	__u64 mem;
1849
1850	dm_bufio_allocated_kmem_cache = 0;
1851	dm_bufio_allocated_get_free_pages = 0;
1852	dm_bufio_allocated_vmalloc = 0;
1853	dm_bufio_current_allocated = 0;
1854
1855	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1856	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1857
1858	mem = (__u64)((totalram_pages - totalhigh_pages) *
1859		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1860
1861	if (mem > ULONG_MAX)
1862		mem = ULONG_MAX;
1863
1864#ifdef CONFIG_MMU
1865	/*
1866	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1867	 * in fs/proc/internal.h
1868	 */
1869	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1870		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1871#endif
1872
1873	dm_bufio_default_cache_size = mem;
1874
1875	mutex_lock(&dm_bufio_clients_lock);
1876	__cache_size_refresh();
1877	mutex_unlock(&dm_bufio_clients_lock);
1878
1879	dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1880	if (!dm_bufio_wq)
1881		return -ENOMEM;
1882
1883	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1884	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
 
1885			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1886
1887	return 0;
1888}
1889
1890/*
1891 * This is called once when unloading the dm_bufio module.
1892 */
1893static void __exit dm_bufio_exit(void)
1894{
1895	int bug = 0;
1896	int i;
1897
1898	cancel_delayed_work_sync(&dm_bufio_work);
 
1899	destroy_workqueue(dm_bufio_wq);
1900
1901	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1902		kmem_cache_destroy(dm_bufio_caches[i]);
1903
1904	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1905		kfree(dm_bufio_cache_names[i]);
1906
1907	if (dm_bufio_client_count) {
1908		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1909			__func__, dm_bufio_client_count);
1910		bug = 1;
1911	}
1912
1913	if (dm_bufio_current_allocated) {
1914		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1915			__func__, dm_bufio_current_allocated);
1916		bug = 1;
1917	}
1918
1919	if (dm_bufio_allocated_get_free_pages) {
1920		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1921		       __func__, dm_bufio_allocated_get_free_pages);
1922		bug = 1;
1923	}
1924
1925	if (dm_bufio_allocated_vmalloc) {
1926		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1927		       __func__, dm_bufio_allocated_vmalloc);
1928		bug = 1;
1929	}
1930
1931	BUG_ON(bug);
1932}
1933
1934module_init(dm_bufio_init)
1935module_exit(dm_bufio_exit)
1936
1937module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1938MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1939
1940module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1941MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1942
1943module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1944MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1945
1946module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1947MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1948
1949module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1950MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1951
1952module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1953MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1954
1955module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1956MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1957
1958module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1959MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1960
1961MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1962MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1963MODULE_LICENSE("GPL");