Loading...
1/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/dm-bufio.h>
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/sched/mm.h>
15#include <linux/jiffies.h>
16#include <linux/vmalloc.h>
17#include <linux/shrinker.h>
18#include <linux/module.h>
19#include <linux/rbtree.h>
20#include <linux/stacktrace.h>
21#include <linux/jump_label.h>
22
23#define DM_MSG_PREFIX "bufio"
24
25/*
26 * Memory management policy:
27 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
28 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
29 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
30 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
31 * dirty buffers.
32 */
33#define DM_BUFIO_MIN_BUFFERS 8
34
35#define DM_BUFIO_MEMORY_PERCENT 2
36#define DM_BUFIO_VMALLOC_PERCENT 25
37#define DM_BUFIO_WRITEBACK_RATIO 3
38#define DM_BUFIO_LOW_WATERMARK_RATIO 16
39
40/*
41 * Check buffer ages in this interval (seconds)
42 */
43#define DM_BUFIO_WORK_TIMER_SECS 30
44
45/*
46 * Free buffers when they are older than this (seconds)
47 */
48#define DM_BUFIO_DEFAULT_AGE_SECS 300
49
50/*
51 * The nr of bytes of cached data to keep around.
52 */
53#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
54
55/*
56 * Align buffer writes to this boundary.
57 * Tests show that SSDs have the highest IOPS when using 4k writes.
58 */
59#define DM_BUFIO_WRITE_ALIGN 4096
60
61/*
62 * dm_buffer->list_mode
63 */
64#define LIST_CLEAN 0
65#define LIST_DIRTY 1
66#define LIST_SIZE 2
67
68/*
69 * Linking of buffers:
70 * All buffers are linked to buffer_tree with their node field.
71 *
72 * Clean buffers that are not being written (B_WRITING not set)
73 * are linked to lru[LIST_CLEAN] with their lru_list field.
74 *
75 * Dirty and clean buffers that are being written are linked to
76 * lru[LIST_DIRTY] with their lru_list field. When the write
77 * finishes, the buffer cannot be relinked immediately (because we
78 * are in an interrupt context and relinking requires process
79 * context), so some clean-not-writing buffers can be held on
80 * dirty_lru too. They are later added to lru in the process
81 * context.
82 */
83struct dm_bufio_client {
84 struct mutex lock;
85 spinlock_t spinlock;
86 bool no_sleep;
87
88 struct list_head lru[LIST_SIZE];
89 unsigned long n_buffers[LIST_SIZE];
90
91 struct block_device *bdev;
92 unsigned block_size;
93 s8 sectors_per_block_bits;
94 void (*alloc_callback)(struct dm_buffer *);
95 void (*write_callback)(struct dm_buffer *);
96 struct kmem_cache *slab_buffer;
97 struct kmem_cache *slab_cache;
98 struct dm_io_client *dm_io;
99
100 struct list_head reserved_buffers;
101 unsigned need_reserved_buffers;
102
103 unsigned minimum_buffers;
104
105 struct rb_root buffer_tree;
106 wait_queue_head_t free_buffer_wait;
107
108 sector_t start;
109
110 int async_write_error;
111
112 struct list_head client_list;
113
114 struct shrinker shrinker;
115 struct work_struct shrink_work;
116 atomic_long_t need_shrink;
117};
118
119/*
120 * Buffer state bits.
121 */
122#define B_READING 0
123#define B_WRITING 1
124#define B_DIRTY 2
125
126/*
127 * Describes how the block was allocated:
128 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
129 * See the comment at alloc_buffer_data.
130 */
131enum data_mode {
132 DATA_MODE_SLAB = 0,
133 DATA_MODE_GET_FREE_PAGES = 1,
134 DATA_MODE_VMALLOC = 2,
135 DATA_MODE_LIMIT = 3
136};
137
138struct dm_buffer {
139 struct rb_node node;
140 struct list_head lru_list;
141 struct list_head global_list;
142 sector_t block;
143 void *data;
144 unsigned char data_mode; /* DATA_MODE_* */
145 unsigned char list_mode; /* LIST_* */
146 blk_status_t read_error;
147 blk_status_t write_error;
148 unsigned accessed;
149 unsigned hold_count;
150 unsigned long state;
151 unsigned long last_accessed;
152 unsigned dirty_start;
153 unsigned dirty_end;
154 unsigned write_start;
155 unsigned write_end;
156 struct dm_bufio_client *c;
157 struct list_head write_list;
158 void (*end_io)(struct dm_buffer *, blk_status_t);
159#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
160#define MAX_STACK 10
161 unsigned int stack_len;
162 unsigned long stack_entries[MAX_STACK];
163#endif
164};
165
166static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
167
168/*----------------------------------------------------------------*/
169
170#define dm_bufio_in_request() (!!current->bio_list)
171
172static void dm_bufio_lock(struct dm_bufio_client *c)
173{
174 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
175 spin_lock_bh(&c->spinlock);
176 else
177 mutex_lock_nested(&c->lock, dm_bufio_in_request());
178}
179
180static int dm_bufio_trylock(struct dm_bufio_client *c)
181{
182 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
183 return spin_trylock_bh(&c->spinlock);
184 else
185 return mutex_trylock(&c->lock);
186}
187
188static void dm_bufio_unlock(struct dm_bufio_client *c)
189{
190 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
191 spin_unlock_bh(&c->spinlock);
192 else
193 mutex_unlock(&c->lock);
194}
195
196/*----------------------------------------------------------------*/
197
198/*
199 * Default cache size: available memory divided by the ratio.
200 */
201static unsigned long dm_bufio_default_cache_size;
202
203/*
204 * Total cache size set by the user.
205 */
206static unsigned long dm_bufio_cache_size;
207
208/*
209 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
210 * at any time. If it disagrees, the user has changed cache size.
211 */
212static unsigned long dm_bufio_cache_size_latch;
213
214static DEFINE_SPINLOCK(global_spinlock);
215
216static LIST_HEAD(global_queue);
217
218static unsigned long global_num = 0;
219
220/*
221 * Buffers are freed after this timeout
222 */
223static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
224static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
225
226static unsigned long dm_bufio_peak_allocated;
227static unsigned long dm_bufio_allocated_kmem_cache;
228static unsigned long dm_bufio_allocated_get_free_pages;
229static unsigned long dm_bufio_allocated_vmalloc;
230static unsigned long dm_bufio_current_allocated;
231
232/*----------------------------------------------------------------*/
233
234/*
235 * The current number of clients.
236 */
237static int dm_bufio_client_count;
238
239/*
240 * The list of all clients.
241 */
242static LIST_HEAD(dm_bufio_all_clients);
243
244/*
245 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
246 */
247static DEFINE_MUTEX(dm_bufio_clients_lock);
248
249static struct workqueue_struct *dm_bufio_wq;
250static struct delayed_work dm_bufio_cleanup_old_work;
251static struct work_struct dm_bufio_replacement_work;
252
253
254#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
255static void buffer_record_stack(struct dm_buffer *b)
256{
257 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
258}
259#endif
260
261/*----------------------------------------------------------------
262 * A red/black tree acts as an index for all the buffers.
263 *--------------------------------------------------------------*/
264static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
265{
266 struct rb_node *n = c->buffer_tree.rb_node;
267 struct dm_buffer *b;
268
269 while (n) {
270 b = container_of(n, struct dm_buffer, node);
271
272 if (b->block == block)
273 return b;
274
275 n = block < b->block ? n->rb_left : n->rb_right;
276 }
277
278 return NULL;
279}
280
281static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
282{
283 struct rb_node *n = c->buffer_tree.rb_node;
284 struct dm_buffer *b;
285 struct dm_buffer *best = NULL;
286
287 while (n) {
288 b = container_of(n, struct dm_buffer, node);
289
290 if (b->block == block)
291 return b;
292
293 if (block <= b->block) {
294 n = n->rb_left;
295 best = b;
296 } else {
297 n = n->rb_right;
298 }
299 }
300
301 return best;
302}
303
304static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
305{
306 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
307 struct dm_buffer *found;
308
309 while (*new) {
310 found = container_of(*new, struct dm_buffer, node);
311
312 if (found->block == b->block) {
313 BUG_ON(found != b);
314 return;
315 }
316
317 parent = *new;
318 new = b->block < found->block ?
319 &found->node.rb_left : &found->node.rb_right;
320 }
321
322 rb_link_node(&b->node, parent, new);
323 rb_insert_color(&b->node, &c->buffer_tree);
324}
325
326static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
327{
328 rb_erase(&b->node, &c->buffer_tree);
329}
330
331/*----------------------------------------------------------------*/
332
333static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
334{
335 unsigned char data_mode;
336 long diff;
337
338 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
339 &dm_bufio_allocated_kmem_cache,
340 &dm_bufio_allocated_get_free_pages,
341 &dm_bufio_allocated_vmalloc,
342 };
343
344 data_mode = b->data_mode;
345 diff = (long)b->c->block_size;
346 if (unlink)
347 diff = -diff;
348
349 spin_lock(&global_spinlock);
350
351 *class_ptr[data_mode] += diff;
352
353 dm_bufio_current_allocated += diff;
354
355 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
356 dm_bufio_peak_allocated = dm_bufio_current_allocated;
357
358 b->accessed = 1;
359
360 if (!unlink) {
361 list_add(&b->global_list, &global_queue);
362 global_num++;
363 if (dm_bufio_current_allocated > dm_bufio_cache_size)
364 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
365 } else {
366 list_del(&b->global_list);
367 global_num--;
368 }
369
370 spin_unlock(&global_spinlock);
371}
372
373/*
374 * Change the number of clients and recalculate per-client limit.
375 */
376static void __cache_size_refresh(void)
377{
378 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
379 BUG_ON(dm_bufio_client_count < 0);
380
381 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
382
383 /*
384 * Use default if set to 0 and report the actual cache size used.
385 */
386 if (!dm_bufio_cache_size_latch) {
387 (void)cmpxchg(&dm_bufio_cache_size, 0,
388 dm_bufio_default_cache_size);
389 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
390 }
391}
392
393/*
394 * Allocating buffer data.
395 *
396 * Small buffers are allocated with kmem_cache, to use space optimally.
397 *
398 * For large buffers, we choose between get_free_pages and vmalloc.
399 * Each has advantages and disadvantages.
400 *
401 * __get_free_pages can randomly fail if the memory is fragmented.
402 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
403 * as low as 128M) so using it for caching is not appropriate.
404 *
405 * If the allocation may fail we use __get_free_pages. Memory fragmentation
406 * won't have a fatal effect here, but it just causes flushes of some other
407 * buffers and more I/O will be performed. Don't use __get_free_pages if it
408 * always fails (i.e. order >= MAX_ORDER).
409 *
410 * If the allocation shouldn't fail we use __vmalloc. This is only for the
411 * initial reserve allocation, so there's no risk of wasting all vmalloc
412 * space.
413 */
414static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
415 unsigned char *data_mode)
416{
417 if (unlikely(c->slab_cache != NULL)) {
418 *data_mode = DATA_MODE_SLAB;
419 return kmem_cache_alloc(c->slab_cache, gfp_mask);
420 }
421
422 if (c->block_size <= KMALLOC_MAX_SIZE &&
423 gfp_mask & __GFP_NORETRY) {
424 *data_mode = DATA_MODE_GET_FREE_PAGES;
425 return (void *)__get_free_pages(gfp_mask,
426 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
427 }
428
429 *data_mode = DATA_MODE_VMALLOC;
430
431 /*
432 * __vmalloc allocates the data pages and auxiliary structures with
433 * gfp_flags that were specified, but pagetables are always allocated
434 * with GFP_KERNEL, no matter what was specified as gfp_mask.
435 *
436 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
437 * all allocations done by this process (including pagetables) are done
438 * as if GFP_NOIO was specified.
439 */
440 if (gfp_mask & __GFP_NORETRY) {
441 unsigned noio_flag = memalloc_noio_save();
442 void *ptr = __vmalloc(c->block_size, gfp_mask);
443
444 memalloc_noio_restore(noio_flag);
445 return ptr;
446 }
447
448 return __vmalloc(c->block_size, gfp_mask);
449}
450
451/*
452 * Free buffer's data.
453 */
454static void free_buffer_data(struct dm_bufio_client *c,
455 void *data, unsigned char data_mode)
456{
457 switch (data_mode) {
458 case DATA_MODE_SLAB:
459 kmem_cache_free(c->slab_cache, data);
460 break;
461
462 case DATA_MODE_GET_FREE_PAGES:
463 free_pages((unsigned long)data,
464 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
465 break;
466
467 case DATA_MODE_VMALLOC:
468 vfree(data);
469 break;
470
471 default:
472 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
473 data_mode);
474 BUG();
475 }
476}
477
478/*
479 * Allocate buffer and its data.
480 */
481static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
482{
483 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
484
485 if (!b)
486 return NULL;
487
488 b->c = c;
489
490 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
491 if (!b->data) {
492 kmem_cache_free(c->slab_buffer, b);
493 return NULL;
494 }
495
496#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
497 b->stack_len = 0;
498#endif
499 return b;
500}
501
502/*
503 * Free buffer and its data.
504 */
505static void free_buffer(struct dm_buffer *b)
506{
507 struct dm_bufio_client *c = b->c;
508
509 free_buffer_data(c, b->data, b->data_mode);
510 kmem_cache_free(c->slab_buffer, b);
511}
512
513/*
514 * Link buffer to the buffer tree and clean or dirty queue.
515 */
516static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
517{
518 struct dm_bufio_client *c = b->c;
519
520 c->n_buffers[dirty]++;
521 b->block = block;
522 b->list_mode = dirty;
523 list_add(&b->lru_list, &c->lru[dirty]);
524 __insert(b->c, b);
525 b->last_accessed = jiffies;
526
527 adjust_total_allocated(b, false);
528}
529
530/*
531 * Unlink buffer from the buffer tree and dirty or clean queue.
532 */
533static void __unlink_buffer(struct dm_buffer *b)
534{
535 struct dm_bufio_client *c = b->c;
536
537 BUG_ON(!c->n_buffers[b->list_mode]);
538
539 c->n_buffers[b->list_mode]--;
540 __remove(b->c, b);
541 list_del(&b->lru_list);
542
543 adjust_total_allocated(b, true);
544}
545
546/*
547 * Place the buffer to the head of dirty or clean LRU queue.
548 */
549static void __relink_lru(struct dm_buffer *b, int dirty)
550{
551 struct dm_bufio_client *c = b->c;
552
553 b->accessed = 1;
554
555 BUG_ON(!c->n_buffers[b->list_mode]);
556
557 c->n_buffers[b->list_mode]--;
558 c->n_buffers[dirty]++;
559 b->list_mode = dirty;
560 list_move(&b->lru_list, &c->lru[dirty]);
561 b->last_accessed = jiffies;
562}
563
564/*----------------------------------------------------------------
565 * Submit I/O on the buffer.
566 *
567 * Bio interface is faster but it has some problems:
568 * the vector list is limited (increasing this limit increases
569 * memory-consumption per buffer, so it is not viable);
570 *
571 * the memory must be direct-mapped, not vmalloced;
572 *
573 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
574 * it is not vmalloced, try using the bio interface.
575 *
576 * If the buffer is big, if it is vmalloced or if the underlying device
577 * rejects the bio because it is too large, use dm-io layer to do the I/O.
578 * The dm-io layer splits the I/O into multiple requests, avoiding the above
579 * shortcomings.
580 *--------------------------------------------------------------*/
581
582/*
583 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
584 * that the request was handled directly with bio interface.
585 */
586static void dmio_complete(unsigned long error, void *context)
587{
588 struct dm_buffer *b = context;
589
590 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
591}
592
593static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
594 unsigned n_sectors, unsigned offset)
595{
596 int r;
597 struct dm_io_request io_req = {
598 .bi_opf = op,
599 .notify.fn = dmio_complete,
600 .notify.context = b,
601 .client = b->c->dm_io,
602 };
603 struct dm_io_region region = {
604 .bdev = b->c->bdev,
605 .sector = sector,
606 .count = n_sectors,
607 };
608
609 if (b->data_mode != DATA_MODE_VMALLOC) {
610 io_req.mem.type = DM_IO_KMEM;
611 io_req.mem.ptr.addr = (char *)b->data + offset;
612 } else {
613 io_req.mem.type = DM_IO_VMA;
614 io_req.mem.ptr.vma = (char *)b->data + offset;
615 }
616
617 r = dm_io(&io_req, 1, ®ion, NULL);
618 if (unlikely(r))
619 b->end_io(b, errno_to_blk_status(r));
620}
621
622static void bio_complete(struct bio *bio)
623{
624 struct dm_buffer *b = bio->bi_private;
625 blk_status_t status = bio->bi_status;
626 bio_uninit(bio);
627 kfree(bio);
628 b->end_io(b, status);
629}
630
631static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
632 unsigned n_sectors, unsigned offset)
633{
634 struct bio *bio;
635 char *ptr;
636 unsigned vec_size, len;
637
638 vec_size = b->c->block_size >> PAGE_SHIFT;
639 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
640 vec_size += 2;
641
642 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
643 if (!bio) {
644dmio:
645 use_dmio(b, op, sector, n_sectors, offset);
646 return;
647 }
648 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
649 bio->bi_iter.bi_sector = sector;
650 bio->bi_end_io = bio_complete;
651 bio->bi_private = b;
652
653 ptr = (char *)b->data + offset;
654 len = n_sectors << SECTOR_SHIFT;
655
656 do {
657 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
658 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
659 offset_in_page(ptr))) {
660 bio_put(bio);
661 goto dmio;
662 }
663
664 len -= this_step;
665 ptr += this_step;
666 } while (len > 0);
667
668 submit_bio(bio);
669}
670
671static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
672{
673 sector_t sector;
674
675 if (likely(c->sectors_per_block_bits >= 0))
676 sector = block << c->sectors_per_block_bits;
677 else
678 sector = block * (c->block_size >> SECTOR_SHIFT);
679 sector += c->start;
680
681 return sector;
682}
683
684static void submit_io(struct dm_buffer *b, enum req_op op,
685 void (*end_io)(struct dm_buffer *, blk_status_t))
686{
687 unsigned n_sectors;
688 sector_t sector;
689 unsigned offset, end;
690
691 b->end_io = end_io;
692
693 sector = block_to_sector(b->c, b->block);
694
695 if (op != REQ_OP_WRITE) {
696 n_sectors = b->c->block_size >> SECTOR_SHIFT;
697 offset = 0;
698 } else {
699 if (b->c->write_callback)
700 b->c->write_callback(b);
701 offset = b->write_start;
702 end = b->write_end;
703 offset &= -DM_BUFIO_WRITE_ALIGN;
704 end += DM_BUFIO_WRITE_ALIGN - 1;
705 end &= -DM_BUFIO_WRITE_ALIGN;
706 if (unlikely(end > b->c->block_size))
707 end = b->c->block_size;
708
709 sector += offset >> SECTOR_SHIFT;
710 n_sectors = (end - offset) >> SECTOR_SHIFT;
711 }
712
713 if (b->data_mode != DATA_MODE_VMALLOC)
714 use_bio(b, op, sector, n_sectors, offset);
715 else
716 use_dmio(b, op, sector, n_sectors, offset);
717}
718
719/*----------------------------------------------------------------
720 * Writing dirty buffers
721 *--------------------------------------------------------------*/
722
723/*
724 * The endio routine for write.
725 *
726 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
727 * it.
728 */
729static void write_endio(struct dm_buffer *b, blk_status_t status)
730{
731 b->write_error = status;
732 if (unlikely(status)) {
733 struct dm_bufio_client *c = b->c;
734
735 (void)cmpxchg(&c->async_write_error, 0,
736 blk_status_to_errno(status));
737 }
738
739 BUG_ON(!test_bit(B_WRITING, &b->state));
740
741 smp_mb__before_atomic();
742 clear_bit(B_WRITING, &b->state);
743 smp_mb__after_atomic();
744
745 wake_up_bit(&b->state, B_WRITING);
746}
747
748/*
749 * Initiate a write on a dirty buffer, but don't wait for it.
750 *
751 * - If the buffer is not dirty, exit.
752 * - If there some previous write going on, wait for it to finish (we can't
753 * have two writes on the same buffer simultaneously).
754 * - Submit our write and don't wait on it. We set B_WRITING indicating
755 * that there is a write in progress.
756 */
757static void __write_dirty_buffer(struct dm_buffer *b,
758 struct list_head *write_list)
759{
760 if (!test_bit(B_DIRTY, &b->state))
761 return;
762
763 clear_bit(B_DIRTY, &b->state);
764 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
765
766 b->write_start = b->dirty_start;
767 b->write_end = b->dirty_end;
768
769 if (!write_list)
770 submit_io(b, REQ_OP_WRITE, write_endio);
771 else
772 list_add_tail(&b->write_list, write_list);
773}
774
775static void __flush_write_list(struct list_head *write_list)
776{
777 struct blk_plug plug;
778 blk_start_plug(&plug);
779 while (!list_empty(write_list)) {
780 struct dm_buffer *b =
781 list_entry(write_list->next, struct dm_buffer, write_list);
782 list_del(&b->write_list);
783 submit_io(b, REQ_OP_WRITE, write_endio);
784 cond_resched();
785 }
786 blk_finish_plug(&plug);
787}
788
789/*
790 * Wait until any activity on the buffer finishes. Possibly write the
791 * buffer if it is dirty. When this function finishes, there is no I/O
792 * running on the buffer and the buffer is not dirty.
793 */
794static void __make_buffer_clean(struct dm_buffer *b)
795{
796 BUG_ON(b->hold_count);
797
798 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
799 if (!smp_load_acquire(&b->state)) /* fast case */
800 return;
801
802 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
803 __write_dirty_buffer(b, NULL);
804 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
805}
806
807/*
808 * Find some buffer that is not held by anybody, clean it, unlink it and
809 * return it.
810 */
811static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
812{
813 struct dm_buffer *b;
814
815 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
816 BUG_ON(test_bit(B_WRITING, &b->state));
817 BUG_ON(test_bit(B_DIRTY, &b->state));
818
819 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
820 unlikely(test_bit_acquire(B_READING, &b->state)))
821 continue;
822
823 if (!b->hold_count) {
824 __make_buffer_clean(b);
825 __unlink_buffer(b);
826 return b;
827 }
828 cond_resched();
829 }
830
831 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
832 return NULL;
833
834 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
835 BUG_ON(test_bit(B_READING, &b->state));
836
837 if (!b->hold_count) {
838 __make_buffer_clean(b);
839 __unlink_buffer(b);
840 return b;
841 }
842 cond_resched();
843 }
844
845 return NULL;
846}
847
848/*
849 * Wait until some other threads free some buffer or release hold count on
850 * some buffer.
851 *
852 * This function is entered with c->lock held, drops it and regains it
853 * before exiting.
854 */
855static void __wait_for_free_buffer(struct dm_bufio_client *c)
856{
857 DECLARE_WAITQUEUE(wait, current);
858
859 add_wait_queue(&c->free_buffer_wait, &wait);
860 set_current_state(TASK_UNINTERRUPTIBLE);
861 dm_bufio_unlock(c);
862
863 io_schedule();
864
865 remove_wait_queue(&c->free_buffer_wait, &wait);
866
867 dm_bufio_lock(c);
868}
869
870enum new_flag {
871 NF_FRESH = 0,
872 NF_READ = 1,
873 NF_GET = 2,
874 NF_PREFETCH = 3
875};
876
877/*
878 * Allocate a new buffer. If the allocation is not possible, wait until
879 * some other thread frees a buffer.
880 *
881 * May drop the lock and regain it.
882 */
883static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
884{
885 struct dm_buffer *b;
886 bool tried_noio_alloc = false;
887
888 /*
889 * dm-bufio is resistant to allocation failures (it just keeps
890 * one buffer reserved in cases all the allocations fail).
891 * So set flags to not try too hard:
892 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
893 * mutex and wait ourselves.
894 * __GFP_NORETRY: don't retry and rather return failure
895 * __GFP_NOMEMALLOC: don't use emergency reserves
896 * __GFP_NOWARN: don't print a warning in case of failure
897 *
898 * For debugging, if we set the cache size to 1, no new buffers will
899 * be allocated.
900 */
901 while (1) {
902 if (dm_bufio_cache_size_latch != 1) {
903 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
904 if (b)
905 return b;
906 }
907
908 if (nf == NF_PREFETCH)
909 return NULL;
910
911 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
912 dm_bufio_unlock(c);
913 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
914 dm_bufio_lock(c);
915 if (b)
916 return b;
917 tried_noio_alloc = true;
918 }
919
920 if (!list_empty(&c->reserved_buffers)) {
921 b = list_entry(c->reserved_buffers.next,
922 struct dm_buffer, lru_list);
923 list_del(&b->lru_list);
924 c->need_reserved_buffers++;
925
926 return b;
927 }
928
929 b = __get_unclaimed_buffer(c);
930 if (b)
931 return b;
932
933 __wait_for_free_buffer(c);
934 }
935}
936
937static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
938{
939 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
940
941 if (!b)
942 return NULL;
943
944 if (c->alloc_callback)
945 c->alloc_callback(b);
946
947 return b;
948}
949
950/*
951 * Free a buffer and wake other threads waiting for free buffers.
952 */
953static void __free_buffer_wake(struct dm_buffer *b)
954{
955 struct dm_bufio_client *c = b->c;
956
957 if (!c->need_reserved_buffers)
958 free_buffer(b);
959 else {
960 list_add(&b->lru_list, &c->reserved_buffers);
961 c->need_reserved_buffers--;
962 }
963
964 wake_up(&c->free_buffer_wait);
965}
966
967static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
968 struct list_head *write_list)
969{
970 struct dm_buffer *b, *tmp;
971
972 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
973 BUG_ON(test_bit(B_READING, &b->state));
974
975 if (!test_bit(B_DIRTY, &b->state) &&
976 !test_bit(B_WRITING, &b->state)) {
977 __relink_lru(b, LIST_CLEAN);
978 continue;
979 }
980
981 if (no_wait && test_bit(B_WRITING, &b->state))
982 return;
983
984 __write_dirty_buffer(b, write_list);
985 cond_resched();
986 }
987}
988
989/*
990 * Check if we're over watermark.
991 * If we are over threshold_buffers, start freeing buffers.
992 * If we're over "limit_buffers", block until we get under the limit.
993 */
994static void __check_watermark(struct dm_bufio_client *c,
995 struct list_head *write_list)
996{
997 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
998 __write_dirty_buffers_async(c, 1, write_list);
999}
1000
1001/*----------------------------------------------------------------
1002 * Getting a buffer
1003 *--------------------------------------------------------------*/
1004
1005static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1006 enum new_flag nf, int *need_submit,
1007 struct list_head *write_list)
1008{
1009 struct dm_buffer *b, *new_b = NULL;
1010
1011 *need_submit = 0;
1012
1013 b = __find(c, block);
1014 if (b)
1015 goto found_buffer;
1016
1017 if (nf == NF_GET)
1018 return NULL;
1019
1020 new_b = __alloc_buffer_wait(c, nf);
1021 if (!new_b)
1022 return NULL;
1023
1024 /*
1025 * We've had a period where the mutex was unlocked, so need to
1026 * recheck the buffer tree.
1027 */
1028 b = __find(c, block);
1029 if (b) {
1030 __free_buffer_wake(new_b);
1031 goto found_buffer;
1032 }
1033
1034 __check_watermark(c, write_list);
1035
1036 b = new_b;
1037 b->hold_count = 1;
1038 b->read_error = 0;
1039 b->write_error = 0;
1040 __link_buffer(b, block, LIST_CLEAN);
1041
1042 if (nf == NF_FRESH) {
1043 b->state = 0;
1044 return b;
1045 }
1046
1047 b->state = 1 << B_READING;
1048 *need_submit = 1;
1049
1050 return b;
1051
1052found_buffer:
1053 if (nf == NF_PREFETCH)
1054 return NULL;
1055 /*
1056 * Note: it is essential that we don't wait for the buffer to be
1057 * read if dm_bufio_get function is used. Both dm_bufio_get and
1058 * dm_bufio_prefetch can be used in the driver request routine.
1059 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1060 * the same buffer, it would deadlock if we waited.
1061 */
1062 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
1063 return NULL;
1064
1065 b->hold_count++;
1066 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1067 test_bit(B_WRITING, &b->state));
1068 return b;
1069}
1070
1071/*
1072 * The endio routine for reading: set the error, clear the bit and wake up
1073 * anyone waiting on the buffer.
1074 */
1075static void read_endio(struct dm_buffer *b, blk_status_t status)
1076{
1077 b->read_error = status;
1078
1079 BUG_ON(!test_bit(B_READING, &b->state));
1080
1081 smp_mb__before_atomic();
1082 clear_bit(B_READING, &b->state);
1083 smp_mb__after_atomic();
1084
1085 wake_up_bit(&b->state, B_READING);
1086}
1087
1088/*
1089 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1090 * functions is similar except that dm_bufio_new doesn't read the
1091 * buffer from the disk (assuming that the caller overwrites all the data
1092 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1093 */
1094static void *new_read(struct dm_bufio_client *c, sector_t block,
1095 enum new_flag nf, struct dm_buffer **bp)
1096{
1097 int need_submit;
1098 struct dm_buffer *b;
1099
1100 LIST_HEAD(write_list);
1101
1102 dm_bufio_lock(c);
1103 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1104#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1105 if (b && b->hold_count == 1)
1106 buffer_record_stack(b);
1107#endif
1108 dm_bufio_unlock(c);
1109
1110 __flush_write_list(&write_list);
1111
1112 if (!b)
1113 return NULL;
1114
1115 if (need_submit)
1116 submit_io(b, REQ_OP_READ, read_endio);
1117
1118 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1119
1120 if (b->read_error) {
1121 int error = blk_status_to_errno(b->read_error);
1122
1123 dm_bufio_release(b);
1124
1125 return ERR_PTR(error);
1126 }
1127
1128 *bp = b;
1129
1130 return b->data;
1131}
1132
1133void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1134 struct dm_buffer **bp)
1135{
1136 return new_read(c, block, NF_GET, bp);
1137}
1138EXPORT_SYMBOL_GPL(dm_bufio_get);
1139
1140void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1141 struct dm_buffer **bp)
1142{
1143 BUG_ON(dm_bufio_in_request());
1144
1145 return new_read(c, block, NF_READ, bp);
1146}
1147EXPORT_SYMBOL_GPL(dm_bufio_read);
1148
1149void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1150 struct dm_buffer **bp)
1151{
1152 BUG_ON(dm_bufio_in_request());
1153
1154 return new_read(c, block, NF_FRESH, bp);
1155}
1156EXPORT_SYMBOL_GPL(dm_bufio_new);
1157
1158void dm_bufio_prefetch(struct dm_bufio_client *c,
1159 sector_t block, unsigned n_blocks)
1160{
1161 struct blk_plug plug;
1162
1163 LIST_HEAD(write_list);
1164
1165 BUG_ON(dm_bufio_in_request());
1166
1167 blk_start_plug(&plug);
1168 dm_bufio_lock(c);
1169
1170 for (; n_blocks--; block++) {
1171 int need_submit;
1172 struct dm_buffer *b;
1173 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1174 &write_list);
1175 if (unlikely(!list_empty(&write_list))) {
1176 dm_bufio_unlock(c);
1177 blk_finish_plug(&plug);
1178 __flush_write_list(&write_list);
1179 blk_start_plug(&plug);
1180 dm_bufio_lock(c);
1181 }
1182 if (unlikely(b != NULL)) {
1183 dm_bufio_unlock(c);
1184
1185 if (need_submit)
1186 submit_io(b, REQ_OP_READ, read_endio);
1187 dm_bufio_release(b);
1188
1189 cond_resched();
1190
1191 if (!n_blocks)
1192 goto flush_plug;
1193 dm_bufio_lock(c);
1194 }
1195 }
1196
1197 dm_bufio_unlock(c);
1198
1199flush_plug:
1200 blk_finish_plug(&plug);
1201}
1202EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1203
1204void dm_bufio_release(struct dm_buffer *b)
1205{
1206 struct dm_bufio_client *c = b->c;
1207
1208 dm_bufio_lock(c);
1209
1210 BUG_ON(!b->hold_count);
1211
1212 b->hold_count--;
1213 if (!b->hold_count) {
1214 wake_up(&c->free_buffer_wait);
1215
1216 /*
1217 * If there were errors on the buffer, and the buffer is not
1218 * to be written, free the buffer. There is no point in caching
1219 * invalid buffer.
1220 */
1221 if ((b->read_error || b->write_error) &&
1222 !test_bit_acquire(B_READING, &b->state) &&
1223 !test_bit(B_WRITING, &b->state) &&
1224 !test_bit(B_DIRTY, &b->state)) {
1225 __unlink_buffer(b);
1226 __free_buffer_wake(b);
1227 }
1228 }
1229
1230 dm_bufio_unlock(c);
1231}
1232EXPORT_SYMBOL_GPL(dm_bufio_release);
1233
1234void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1235 unsigned start, unsigned end)
1236{
1237 struct dm_bufio_client *c = b->c;
1238
1239 BUG_ON(start >= end);
1240 BUG_ON(end > b->c->block_size);
1241
1242 dm_bufio_lock(c);
1243
1244 BUG_ON(test_bit(B_READING, &b->state));
1245
1246 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1247 b->dirty_start = start;
1248 b->dirty_end = end;
1249 __relink_lru(b, LIST_DIRTY);
1250 } else {
1251 if (start < b->dirty_start)
1252 b->dirty_start = start;
1253 if (end > b->dirty_end)
1254 b->dirty_end = end;
1255 }
1256
1257 dm_bufio_unlock(c);
1258}
1259EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1260
1261void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1262{
1263 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1264}
1265EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1266
1267void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1268{
1269 LIST_HEAD(write_list);
1270
1271 BUG_ON(dm_bufio_in_request());
1272
1273 dm_bufio_lock(c);
1274 __write_dirty_buffers_async(c, 0, &write_list);
1275 dm_bufio_unlock(c);
1276 __flush_write_list(&write_list);
1277}
1278EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1279
1280/*
1281 * For performance, it is essential that the buffers are written asynchronously
1282 * and simultaneously (so that the block layer can merge the writes) and then
1283 * waited upon.
1284 *
1285 * Finally, we flush hardware disk cache.
1286 */
1287int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1288{
1289 int a, f;
1290 unsigned long buffers_processed = 0;
1291 struct dm_buffer *b, *tmp;
1292
1293 LIST_HEAD(write_list);
1294
1295 dm_bufio_lock(c);
1296 __write_dirty_buffers_async(c, 0, &write_list);
1297 dm_bufio_unlock(c);
1298 __flush_write_list(&write_list);
1299 dm_bufio_lock(c);
1300
1301again:
1302 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1303 int dropped_lock = 0;
1304
1305 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1306 buffers_processed++;
1307
1308 BUG_ON(test_bit(B_READING, &b->state));
1309
1310 if (test_bit(B_WRITING, &b->state)) {
1311 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1312 dropped_lock = 1;
1313 b->hold_count++;
1314 dm_bufio_unlock(c);
1315 wait_on_bit_io(&b->state, B_WRITING,
1316 TASK_UNINTERRUPTIBLE);
1317 dm_bufio_lock(c);
1318 b->hold_count--;
1319 } else
1320 wait_on_bit_io(&b->state, B_WRITING,
1321 TASK_UNINTERRUPTIBLE);
1322 }
1323
1324 if (!test_bit(B_DIRTY, &b->state) &&
1325 !test_bit(B_WRITING, &b->state))
1326 __relink_lru(b, LIST_CLEAN);
1327
1328 cond_resched();
1329
1330 /*
1331 * If we dropped the lock, the list is no longer consistent,
1332 * so we must restart the search.
1333 *
1334 * In the most common case, the buffer just processed is
1335 * relinked to the clean list, so we won't loop scanning the
1336 * same buffer again and again.
1337 *
1338 * This may livelock if there is another thread simultaneously
1339 * dirtying buffers, so we count the number of buffers walked
1340 * and if it exceeds the total number of buffers, it means that
1341 * someone is doing some writes simultaneously with us. In
1342 * this case, stop, dropping the lock.
1343 */
1344 if (dropped_lock)
1345 goto again;
1346 }
1347 wake_up(&c->free_buffer_wait);
1348 dm_bufio_unlock(c);
1349
1350 a = xchg(&c->async_write_error, 0);
1351 f = dm_bufio_issue_flush(c);
1352 if (a)
1353 return a;
1354
1355 return f;
1356}
1357EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1358
1359/*
1360 * Use dm-io to send an empty barrier to flush the device.
1361 */
1362int dm_bufio_issue_flush(struct dm_bufio_client *c)
1363{
1364 struct dm_io_request io_req = {
1365 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1366 .mem.type = DM_IO_KMEM,
1367 .mem.ptr.addr = NULL,
1368 .client = c->dm_io,
1369 };
1370 struct dm_io_region io_reg = {
1371 .bdev = c->bdev,
1372 .sector = 0,
1373 .count = 0,
1374 };
1375
1376 BUG_ON(dm_bufio_in_request());
1377
1378 return dm_io(&io_req, 1, &io_reg, NULL);
1379}
1380EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1381
1382/*
1383 * Use dm-io to send a discard request to flush the device.
1384 */
1385int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1386{
1387 struct dm_io_request io_req = {
1388 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
1389 .mem.type = DM_IO_KMEM,
1390 .mem.ptr.addr = NULL,
1391 .client = c->dm_io,
1392 };
1393 struct dm_io_region io_reg = {
1394 .bdev = c->bdev,
1395 .sector = block_to_sector(c, block),
1396 .count = block_to_sector(c, count),
1397 };
1398
1399 BUG_ON(dm_bufio_in_request());
1400
1401 return dm_io(&io_req, 1, &io_reg, NULL);
1402}
1403EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1404
1405/*
1406 * We first delete any other buffer that may be at that new location.
1407 *
1408 * Then, we write the buffer to the original location if it was dirty.
1409 *
1410 * Then, if we are the only one who is holding the buffer, relink the buffer
1411 * in the buffer tree for the new location.
1412 *
1413 * If there was someone else holding the buffer, we write it to the new
1414 * location but not relink it, because that other user needs to have the buffer
1415 * at the same place.
1416 */
1417void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1418{
1419 struct dm_bufio_client *c = b->c;
1420 struct dm_buffer *new;
1421
1422 BUG_ON(dm_bufio_in_request());
1423
1424 dm_bufio_lock(c);
1425
1426retry:
1427 new = __find(c, new_block);
1428 if (new) {
1429 if (new->hold_count) {
1430 __wait_for_free_buffer(c);
1431 goto retry;
1432 }
1433
1434 /*
1435 * FIXME: Is there any point waiting for a write that's going
1436 * to be overwritten in a bit?
1437 */
1438 __make_buffer_clean(new);
1439 __unlink_buffer(new);
1440 __free_buffer_wake(new);
1441 }
1442
1443 BUG_ON(!b->hold_count);
1444 BUG_ON(test_bit(B_READING, &b->state));
1445
1446 __write_dirty_buffer(b, NULL);
1447 if (b->hold_count == 1) {
1448 wait_on_bit_io(&b->state, B_WRITING,
1449 TASK_UNINTERRUPTIBLE);
1450 set_bit(B_DIRTY, &b->state);
1451 b->dirty_start = 0;
1452 b->dirty_end = c->block_size;
1453 __unlink_buffer(b);
1454 __link_buffer(b, new_block, LIST_DIRTY);
1455 } else {
1456 sector_t old_block;
1457 wait_on_bit_lock_io(&b->state, B_WRITING,
1458 TASK_UNINTERRUPTIBLE);
1459 /*
1460 * Relink buffer to "new_block" so that write_callback
1461 * sees "new_block" as a block number.
1462 * After the write, link the buffer back to old_block.
1463 * All this must be done in bufio lock, so that block number
1464 * change isn't visible to other threads.
1465 */
1466 old_block = b->block;
1467 __unlink_buffer(b);
1468 __link_buffer(b, new_block, b->list_mode);
1469 submit_io(b, REQ_OP_WRITE, write_endio);
1470 wait_on_bit_io(&b->state, B_WRITING,
1471 TASK_UNINTERRUPTIBLE);
1472 __unlink_buffer(b);
1473 __link_buffer(b, old_block, b->list_mode);
1474 }
1475
1476 dm_bufio_unlock(c);
1477 dm_bufio_release(b);
1478}
1479EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1480
1481static void forget_buffer_locked(struct dm_buffer *b)
1482{
1483 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
1484 __unlink_buffer(b);
1485 __free_buffer_wake(b);
1486 }
1487}
1488
1489/*
1490 * Free the given buffer.
1491 *
1492 * This is just a hint, if the buffer is in use or dirty, this function
1493 * does nothing.
1494 */
1495void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1496{
1497 struct dm_buffer *b;
1498
1499 dm_bufio_lock(c);
1500
1501 b = __find(c, block);
1502 if (b)
1503 forget_buffer_locked(b);
1504
1505 dm_bufio_unlock(c);
1506}
1507EXPORT_SYMBOL_GPL(dm_bufio_forget);
1508
1509void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1510{
1511 struct dm_buffer *b;
1512 sector_t end_block = block + n_blocks;
1513
1514 while (block < end_block) {
1515 dm_bufio_lock(c);
1516
1517 b = __find_next(c, block);
1518 if (b) {
1519 block = b->block + 1;
1520 forget_buffer_locked(b);
1521 }
1522
1523 dm_bufio_unlock(c);
1524
1525 if (!b)
1526 break;
1527 }
1528
1529}
1530EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1531
1532void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1533{
1534 c->minimum_buffers = n;
1535}
1536EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1537
1538unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1539{
1540 return c->block_size;
1541}
1542EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1543
1544sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1545{
1546 sector_t s = bdev_nr_sectors(c->bdev);
1547 if (s >= c->start)
1548 s -= c->start;
1549 else
1550 s = 0;
1551 if (likely(c->sectors_per_block_bits >= 0))
1552 s >>= c->sectors_per_block_bits;
1553 else
1554 sector_div(s, c->block_size >> SECTOR_SHIFT);
1555 return s;
1556}
1557EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1558
1559struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1560{
1561 return c->dm_io;
1562}
1563EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1564
1565sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1566{
1567 return b->block;
1568}
1569EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1570
1571void *dm_bufio_get_block_data(struct dm_buffer *b)
1572{
1573 return b->data;
1574}
1575EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1576
1577void *dm_bufio_get_aux_data(struct dm_buffer *b)
1578{
1579 return b + 1;
1580}
1581EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1582
1583struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1584{
1585 return b->c;
1586}
1587EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1588
1589static void drop_buffers(struct dm_bufio_client *c)
1590{
1591 struct dm_buffer *b;
1592 int i;
1593 bool warned = false;
1594
1595 BUG_ON(dm_bufio_in_request());
1596
1597 /*
1598 * An optimization so that the buffers are not written one-by-one.
1599 */
1600 dm_bufio_write_dirty_buffers_async(c);
1601
1602 dm_bufio_lock(c);
1603
1604 while ((b = __get_unclaimed_buffer(c)))
1605 __free_buffer_wake(b);
1606
1607 for (i = 0; i < LIST_SIZE; i++)
1608 list_for_each_entry(b, &c->lru[i], lru_list) {
1609 WARN_ON(!warned);
1610 warned = true;
1611 DMERR("leaked buffer %llx, hold count %u, list %d",
1612 (unsigned long long)b->block, b->hold_count, i);
1613#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1614 stack_trace_print(b->stack_entries, b->stack_len, 1);
1615 /* mark unclaimed to avoid BUG_ON below */
1616 b->hold_count = 0;
1617#endif
1618 }
1619
1620#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1621 while ((b = __get_unclaimed_buffer(c)))
1622 __free_buffer_wake(b);
1623#endif
1624
1625 for (i = 0; i < LIST_SIZE; i++)
1626 BUG_ON(!list_empty(&c->lru[i]));
1627
1628 dm_bufio_unlock(c);
1629}
1630
1631/*
1632 * We may not be able to evict this buffer if IO pending or the client
1633 * is still using it. Caller is expected to know buffer is too old.
1634 *
1635 * And if GFP_NOFS is used, we must not do any I/O because we hold
1636 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1637 * rerouted to different bufio client.
1638 */
1639static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1640{
1641 if (!(gfp & __GFP_FS) ||
1642 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
1643 if (test_bit_acquire(B_READING, &b->state) ||
1644 test_bit(B_WRITING, &b->state) ||
1645 test_bit(B_DIRTY, &b->state))
1646 return false;
1647 }
1648
1649 if (b->hold_count)
1650 return false;
1651
1652 __make_buffer_clean(b);
1653 __unlink_buffer(b);
1654 __free_buffer_wake(b);
1655
1656 return true;
1657}
1658
1659static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1660{
1661 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1662 if (likely(c->sectors_per_block_bits >= 0))
1663 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1664 else
1665 retain_bytes /= c->block_size;
1666 return retain_bytes;
1667}
1668
1669static void __scan(struct dm_bufio_client *c)
1670{
1671 int l;
1672 struct dm_buffer *b, *tmp;
1673 unsigned long freed = 0;
1674 unsigned long count = c->n_buffers[LIST_CLEAN] +
1675 c->n_buffers[LIST_DIRTY];
1676 unsigned long retain_target = get_retain_buffers(c);
1677
1678 for (l = 0; l < LIST_SIZE; l++) {
1679 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1680 if (count - freed <= retain_target)
1681 atomic_long_set(&c->need_shrink, 0);
1682 if (!atomic_long_read(&c->need_shrink))
1683 return;
1684 if (__try_evict_buffer(b, GFP_KERNEL)) {
1685 atomic_long_dec(&c->need_shrink);
1686 freed++;
1687 }
1688 cond_resched();
1689 }
1690 }
1691}
1692
1693static void shrink_work(struct work_struct *w)
1694{
1695 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1696
1697 dm_bufio_lock(c);
1698 __scan(c);
1699 dm_bufio_unlock(c);
1700}
1701
1702static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1703{
1704 struct dm_bufio_client *c;
1705
1706 c = container_of(shrink, struct dm_bufio_client, shrinker);
1707 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1708 queue_work(dm_bufio_wq, &c->shrink_work);
1709
1710 return sc->nr_to_scan;
1711}
1712
1713static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1714{
1715 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1716 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1717 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1718 unsigned long retain_target = get_retain_buffers(c);
1719 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1720
1721 if (unlikely(count < retain_target))
1722 count = 0;
1723 else
1724 count -= retain_target;
1725
1726 if (unlikely(count < queued_for_cleanup))
1727 count = 0;
1728 else
1729 count -= queued_for_cleanup;
1730
1731 return count;
1732}
1733
1734/*
1735 * Create the buffering interface
1736 */
1737struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1738 unsigned reserved_buffers, unsigned aux_size,
1739 void (*alloc_callback)(struct dm_buffer *),
1740 void (*write_callback)(struct dm_buffer *),
1741 unsigned int flags)
1742{
1743 int r;
1744 struct dm_bufio_client *c;
1745 unsigned i;
1746 char slab_name[27];
1747
1748 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1749 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1750 r = -EINVAL;
1751 goto bad_client;
1752 }
1753
1754 c = kzalloc(sizeof(*c), GFP_KERNEL);
1755 if (!c) {
1756 r = -ENOMEM;
1757 goto bad_client;
1758 }
1759 c->buffer_tree = RB_ROOT;
1760
1761 c->bdev = bdev;
1762 c->block_size = block_size;
1763 if (is_power_of_2(block_size))
1764 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1765 else
1766 c->sectors_per_block_bits = -1;
1767
1768 c->alloc_callback = alloc_callback;
1769 c->write_callback = write_callback;
1770
1771 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
1772 c->no_sleep = true;
1773 static_branch_inc(&no_sleep_enabled);
1774 }
1775
1776 for (i = 0; i < LIST_SIZE; i++) {
1777 INIT_LIST_HEAD(&c->lru[i]);
1778 c->n_buffers[i] = 0;
1779 }
1780
1781 mutex_init(&c->lock);
1782 spin_lock_init(&c->spinlock);
1783 INIT_LIST_HEAD(&c->reserved_buffers);
1784 c->need_reserved_buffers = reserved_buffers;
1785
1786 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1787
1788 init_waitqueue_head(&c->free_buffer_wait);
1789 c->async_write_error = 0;
1790
1791 c->dm_io = dm_io_client_create();
1792 if (IS_ERR(c->dm_io)) {
1793 r = PTR_ERR(c->dm_io);
1794 goto bad_dm_io;
1795 }
1796
1797 if (block_size <= KMALLOC_MAX_SIZE &&
1798 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1799 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1800 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1801 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1802 SLAB_RECLAIM_ACCOUNT, NULL);
1803 if (!c->slab_cache) {
1804 r = -ENOMEM;
1805 goto bad;
1806 }
1807 }
1808 if (aux_size)
1809 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1810 else
1811 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1812 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1813 0, SLAB_RECLAIM_ACCOUNT, NULL);
1814 if (!c->slab_buffer) {
1815 r = -ENOMEM;
1816 goto bad;
1817 }
1818
1819 while (c->need_reserved_buffers) {
1820 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1821
1822 if (!b) {
1823 r = -ENOMEM;
1824 goto bad;
1825 }
1826 __free_buffer_wake(b);
1827 }
1828
1829 INIT_WORK(&c->shrink_work, shrink_work);
1830 atomic_long_set(&c->need_shrink, 0);
1831
1832 c->shrinker.count_objects = dm_bufio_shrink_count;
1833 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1834 c->shrinker.seeks = 1;
1835 c->shrinker.batch = 0;
1836 r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
1837 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1838 if (r)
1839 goto bad;
1840
1841 mutex_lock(&dm_bufio_clients_lock);
1842 dm_bufio_client_count++;
1843 list_add(&c->client_list, &dm_bufio_all_clients);
1844 __cache_size_refresh();
1845 mutex_unlock(&dm_bufio_clients_lock);
1846
1847 return c;
1848
1849bad:
1850 while (!list_empty(&c->reserved_buffers)) {
1851 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1852 struct dm_buffer, lru_list);
1853 list_del(&b->lru_list);
1854 free_buffer(b);
1855 }
1856 kmem_cache_destroy(c->slab_cache);
1857 kmem_cache_destroy(c->slab_buffer);
1858 dm_io_client_destroy(c->dm_io);
1859bad_dm_io:
1860 mutex_destroy(&c->lock);
1861 if (c->no_sleep)
1862 static_branch_dec(&no_sleep_enabled);
1863 kfree(c);
1864bad_client:
1865 return ERR_PTR(r);
1866}
1867EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1868
1869/*
1870 * Free the buffering interface.
1871 * It is required that there are no references on any buffers.
1872 */
1873void dm_bufio_client_destroy(struct dm_bufio_client *c)
1874{
1875 unsigned i;
1876
1877 drop_buffers(c);
1878
1879 unregister_shrinker(&c->shrinker);
1880 flush_work(&c->shrink_work);
1881
1882 mutex_lock(&dm_bufio_clients_lock);
1883
1884 list_del(&c->client_list);
1885 dm_bufio_client_count--;
1886 __cache_size_refresh();
1887
1888 mutex_unlock(&dm_bufio_clients_lock);
1889
1890 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1891 BUG_ON(c->need_reserved_buffers);
1892
1893 while (!list_empty(&c->reserved_buffers)) {
1894 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1895 struct dm_buffer, lru_list);
1896 list_del(&b->lru_list);
1897 free_buffer(b);
1898 }
1899
1900 for (i = 0; i < LIST_SIZE; i++)
1901 if (c->n_buffers[i])
1902 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1903
1904 for (i = 0; i < LIST_SIZE; i++)
1905 BUG_ON(c->n_buffers[i]);
1906
1907 kmem_cache_destroy(c->slab_cache);
1908 kmem_cache_destroy(c->slab_buffer);
1909 dm_io_client_destroy(c->dm_io);
1910 mutex_destroy(&c->lock);
1911 if (c->no_sleep)
1912 static_branch_dec(&no_sleep_enabled);
1913 kfree(c);
1914}
1915EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1916
1917void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1918{
1919 c->start = start;
1920}
1921EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1922
1923static unsigned get_max_age_hz(void)
1924{
1925 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1926
1927 if (max_age > UINT_MAX / HZ)
1928 max_age = UINT_MAX / HZ;
1929
1930 return max_age * HZ;
1931}
1932
1933static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1934{
1935 return time_after_eq(jiffies, b->last_accessed + age_hz);
1936}
1937
1938static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1939{
1940 struct dm_buffer *b, *tmp;
1941 unsigned long retain_target = get_retain_buffers(c);
1942 unsigned long count;
1943 LIST_HEAD(write_list);
1944
1945 dm_bufio_lock(c);
1946
1947 __check_watermark(c, &write_list);
1948 if (unlikely(!list_empty(&write_list))) {
1949 dm_bufio_unlock(c);
1950 __flush_write_list(&write_list);
1951 dm_bufio_lock(c);
1952 }
1953
1954 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1955 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1956 if (count <= retain_target)
1957 break;
1958
1959 if (!older_than(b, age_hz))
1960 break;
1961
1962 if (__try_evict_buffer(b, 0))
1963 count--;
1964
1965 cond_resched();
1966 }
1967
1968 dm_bufio_unlock(c);
1969}
1970
1971static void do_global_cleanup(struct work_struct *w)
1972{
1973 struct dm_bufio_client *locked_client = NULL;
1974 struct dm_bufio_client *current_client;
1975 struct dm_buffer *b;
1976 unsigned spinlock_hold_count;
1977 unsigned long threshold = dm_bufio_cache_size -
1978 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1979 unsigned long loops = global_num * 2;
1980
1981 mutex_lock(&dm_bufio_clients_lock);
1982
1983 while (1) {
1984 cond_resched();
1985
1986 spin_lock(&global_spinlock);
1987 if (unlikely(dm_bufio_current_allocated <= threshold))
1988 break;
1989
1990 spinlock_hold_count = 0;
1991get_next:
1992 if (!loops--)
1993 break;
1994 if (unlikely(list_empty(&global_queue)))
1995 break;
1996 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1997
1998 if (b->accessed) {
1999 b->accessed = 0;
2000 list_move(&b->global_list, &global_queue);
2001 if (likely(++spinlock_hold_count < 16))
2002 goto get_next;
2003 spin_unlock(&global_spinlock);
2004 continue;
2005 }
2006
2007 current_client = b->c;
2008 if (unlikely(current_client != locked_client)) {
2009 if (locked_client)
2010 dm_bufio_unlock(locked_client);
2011
2012 if (!dm_bufio_trylock(current_client)) {
2013 spin_unlock(&global_spinlock);
2014 dm_bufio_lock(current_client);
2015 locked_client = current_client;
2016 continue;
2017 }
2018
2019 locked_client = current_client;
2020 }
2021
2022 spin_unlock(&global_spinlock);
2023
2024 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2025 spin_lock(&global_spinlock);
2026 list_move(&b->global_list, &global_queue);
2027 spin_unlock(&global_spinlock);
2028 }
2029 }
2030
2031 spin_unlock(&global_spinlock);
2032
2033 if (locked_client)
2034 dm_bufio_unlock(locked_client);
2035
2036 mutex_unlock(&dm_bufio_clients_lock);
2037}
2038
2039static void cleanup_old_buffers(void)
2040{
2041 unsigned long max_age_hz = get_max_age_hz();
2042 struct dm_bufio_client *c;
2043
2044 mutex_lock(&dm_bufio_clients_lock);
2045
2046 __cache_size_refresh();
2047
2048 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2049 __evict_old_buffers(c, max_age_hz);
2050
2051 mutex_unlock(&dm_bufio_clients_lock);
2052}
2053
2054static void work_fn(struct work_struct *w)
2055{
2056 cleanup_old_buffers();
2057
2058 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2059 DM_BUFIO_WORK_TIMER_SECS * HZ);
2060}
2061
2062/*----------------------------------------------------------------
2063 * Module setup
2064 *--------------------------------------------------------------*/
2065
2066/*
2067 * This is called only once for the whole dm_bufio module.
2068 * It initializes memory limit.
2069 */
2070static int __init dm_bufio_init(void)
2071{
2072 __u64 mem;
2073
2074 dm_bufio_allocated_kmem_cache = 0;
2075 dm_bufio_allocated_get_free_pages = 0;
2076 dm_bufio_allocated_vmalloc = 0;
2077 dm_bufio_current_allocated = 0;
2078
2079 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2080 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2081
2082 if (mem > ULONG_MAX)
2083 mem = ULONG_MAX;
2084
2085#ifdef CONFIG_MMU
2086 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2087 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2088#endif
2089
2090 dm_bufio_default_cache_size = mem;
2091
2092 mutex_lock(&dm_bufio_clients_lock);
2093 __cache_size_refresh();
2094 mutex_unlock(&dm_bufio_clients_lock);
2095
2096 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2097 if (!dm_bufio_wq)
2098 return -ENOMEM;
2099
2100 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2101 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2102 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2103 DM_BUFIO_WORK_TIMER_SECS * HZ);
2104
2105 return 0;
2106}
2107
2108/*
2109 * This is called once when unloading the dm_bufio module.
2110 */
2111static void __exit dm_bufio_exit(void)
2112{
2113 int bug = 0;
2114
2115 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2116 destroy_workqueue(dm_bufio_wq);
2117
2118 if (dm_bufio_client_count) {
2119 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2120 __func__, dm_bufio_client_count);
2121 bug = 1;
2122 }
2123
2124 if (dm_bufio_current_allocated) {
2125 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2126 __func__, dm_bufio_current_allocated);
2127 bug = 1;
2128 }
2129
2130 if (dm_bufio_allocated_get_free_pages) {
2131 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2132 __func__, dm_bufio_allocated_get_free_pages);
2133 bug = 1;
2134 }
2135
2136 if (dm_bufio_allocated_vmalloc) {
2137 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2138 __func__, dm_bufio_allocated_vmalloc);
2139 bug = 1;
2140 }
2141
2142 BUG_ON(bug);
2143}
2144
2145module_init(dm_bufio_init)
2146module_exit(dm_bufio_exit)
2147
2148module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2149MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2150
2151module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2152MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2153
2154module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2155MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2156
2157module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2158MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2159
2160module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2161MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2162
2163module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2164MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2165
2166module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2167MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2168
2169module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2170MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2171
2172MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2173MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2174MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/dm-bufio.h>
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/sched/mm.h>
15#include <linux/jiffies.h>
16#include <linux/vmalloc.h>
17#include <linux/shrinker.h>
18#include <linux/module.h>
19#include <linux/rbtree.h>
20#include <linux/stacktrace.h>
21
22#define DM_MSG_PREFIX "bufio"
23
24/*
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30 * dirty buffers.
31 */
32#define DM_BUFIO_MIN_BUFFERS 8
33
34#define DM_BUFIO_MEMORY_PERCENT 2
35#define DM_BUFIO_VMALLOC_PERCENT 25
36#define DM_BUFIO_WRITEBACK_RATIO 3
37#define DM_BUFIO_LOW_WATERMARK_RATIO 16
38
39/*
40 * Check buffer ages in this interval (seconds)
41 */
42#define DM_BUFIO_WORK_TIMER_SECS 30
43
44/*
45 * Free buffers when they are older than this (seconds)
46 */
47#define DM_BUFIO_DEFAULT_AGE_SECS 300
48
49/*
50 * The nr of bytes of cached data to keep around.
51 */
52#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
53
54/*
55 * Align buffer writes to this boundary.
56 * Tests show that SSDs have the highest IOPS when using 4k writes.
57 */
58#define DM_BUFIO_WRITE_ALIGN 4096
59
60/*
61 * dm_buffer->list_mode
62 */
63#define LIST_CLEAN 0
64#define LIST_DIRTY 1
65#define LIST_SIZE 2
66
67/*
68 * Linking of buffers:
69 * All buffers are linked to buffer_tree with their node field.
70 *
71 * Clean buffers that are not being written (B_WRITING not set)
72 * are linked to lru[LIST_CLEAN] with their lru_list field.
73 *
74 * Dirty and clean buffers that are being written are linked to
75 * lru[LIST_DIRTY] with their lru_list field. When the write
76 * finishes, the buffer cannot be relinked immediately (because we
77 * are in an interrupt context and relinking requires process
78 * context), so some clean-not-writing buffers can be held on
79 * dirty_lru too. They are later added to lru in the process
80 * context.
81 */
82struct dm_bufio_client {
83 struct mutex lock;
84
85 struct list_head lru[LIST_SIZE];
86 unsigned long n_buffers[LIST_SIZE];
87
88 struct block_device *bdev;
89 unsigned block_size;
90 s8 sectors_per_block_bits;
91 void (*alloc_callback)(struct dm_buffer *);
92 void (*write_callback)(struct dm_buffer *);
93
94 struct kmem_cache *slab_buffer;
95 struct kmem_cache *slab_cache;
96 struct dm_io_client *dm_io;
97
98 struct list_head reserved_buffers;
99 unsigned need_reserved_buffers;
100
101 unsigned minimum_buffers;
102
103 struct rb_root buffer_tree;
104 wait_queue_head_t free_buffer_wait;
105
106 sector_t start;
107
108 int async_write_error;
109
110 struct list_head client_list;
111
112 struct shrinker shrinker;
113 struct work_struct shrink_work;
114 atomic_long_t need_shrink;
115};
116
117/*
118 * Buffer state bits.
119 */
120#define B_READING 0
121#define B_WRITING 1
122#define B_DIRTY 2
123
124/*
125 * Describes how the block was allocated:
126 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127 * See the comment at alloc_buffer_data.
128 */
129enum data_mode {
130 DATA_MODE_SLAB = 0,
131 DATA_MODE_GET_FREE_PAGES = 1,
132 DATA_MODE_VMALLOC = 2,
133 DATA_MODE_LIMIT = 3
134};
135
136struct dm_buffer {
137 struct rb_node node;
138 struct list_head lru_list;
139 struct list_head global_list;
140 sector_t block;
141 void *data;
142 unsigned char data_mode; /* DATA_MODE_* */
143 unsigned char list_mode; /* LIST_* */
144 blk_status_t read_error;
145 blk_status_t write_error;
146 unsigned accessed;
147 unsigned hold_count;
148 unsigned long state;
149 unsigned long last_accessed;
150 unsigned dirty_start;
151 unsigned dirty_end;
152 unsigned write_start;
153 unsigned write_end;
154 struct dm_bufio_client *c;
155 struct list_head write_list;
156 void (*end_io)(struct dm_buffer *, blk_status_t);
157#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
158#define MAX_STACK 10
159 unsigned int stack_len;
160 unsigned long stack_entries[MAX_STACK];
161#endif
162};
163
164/*----------------------------------------------------------------*/
165
166#define dm_bufio_in_request() (!!current->bio_list)
167
168static void dm_bufio_lock(struct dm_bufio_client *c)
169{
170 mutex_lock_nested(&c->lock, dm_bufio_in_request());
171}
172
173static int dm_bufio_trylock(struct dm_bufio_client *c)
174{
175 return mutex_trylock(&c->lock);
176}
177
178static void dm_bufio_unlock(struct dm_bufio_client *c)
179{
180 mutex_unlock(&c->lock);
181}
182
183/*----------------------------------------------------------------*/
184
185/*
186 * Default cache size: available memory divided by the ratio.
187 */
188static unsigned long dm_bufio_default_cache_size;
189
190/*
191 * Total cache size set by the user.
192 */
193static unsigned long dm_bufio_cache_size;
194
195/*
196 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
197 * at any time. If it disagrees, the user has changed cache size.
198 */
199static unsigned long dm_bufio_cache_size_latch;
200
201static DEFINE_SPINLOCK(global_spinlock);
202
203static LIST_HEAD(global_queue);
204
205static unsigned long global_num = 0;
206
207/*
208 * Buffers are freed after this timeout
209 */
210static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
211static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
212
213static unsigned long dm_bufio_peak_allocated;
214static unsigned long dm_bufio_allocated_kmem_cache;
215static unsigned long dm_bufio_allocated_get_free_pages;
216static unsigned long dm_bufio_allocated_vmalloc;
217static unsigned long dm_bufio_current_allocated;
218
219/*----------------------------------------------------------------*/
220
221/*
222 * The current number of clients.
223 */
224static int dm_bufio_client_count;
225
226/*
227 * The list of all clients.
228 */
229static LIST_HEAD(dm_bufio_all_clients);
230
231/*
232 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
233 */
234static DEFINE_MUTEX(dm_bufio_clients_lock);
235
236static struct workqueue_struct *dm_bufio_wq;
237static struct delayed_work dm_bufio_cleanup_old_work;
238static struct work_struct dm_bufio_replacement_work;
239
240
241#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
242static void buffer_record_stack(struct dm_buffer *b)
243{
244 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
245}
246#endif
247
248/*----------------------------------------------------------------
249 * A red/black tree acts as an index for all the buffers.
250 *--------------------------------------------------------------*/
251static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
252{
253 struct rb_node *n = c->buffer_tree.rb_node;
254 struct dm_buffer *b;
255
256 while (n) {
257 b = container_of(n, struct dm_buffer, node);
258
259 if (b->block == block)
260 return b;
261
262 n = block < b->block ? n->rb_left : n->rb_right;
263 }
264
265 return NULL;
266}
267
268static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
269{
270 struct rb_node *n = c->buffer_tree.rb_node;
271 struct dm_buffer *b;
272 struct dm_buffer *best = NULL;
273
274 while (n) {
275 b = container_of(n, struct dm_buffer, node);
276
277 if (b->block == block)
278 return b;
279
280 if (block <= b->block) {
281 n = n->rb_left;
282 best = b;
283 } else {
284 n = n->rb_right;
285 }
286 }
287
288 return best;
289}
290
291static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
292{
293 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
294 struct dm_buffer *found;
295
296 while (*new) {
297 found = container_of(*new, struct dm_buffer, node);
298
299 if (found->block == b->block) {
300 BUG_ON(found != b);
301 return;
302 }
303
304 parent = *new;
305 new = b->block < found->block ?
306 &found->node.rb_left : &found->node.rb_right;
307 }
308
309 rb_link_node(&b->node, parent, new);
310 rb_insert_color(&b->node, &c->buffer_tree);
311}
312
313static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
314{
315 rb_erase(&b->node, &c->buffer_tree);
316}
317
318/*----------------------------------------------------------------*/
319
320static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
321{
322 unsigned char data_mode;
323 long diff;
324
325 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
326 &dm_bufio_allocated_kmem_cache,
327 &dm_bufio_allocated_get_free_pages,
328 &dm_bufio_allocated_vmalloc,
329 };
330
331 data_mode = b->data_mode;
332 diff = (long)b->c->block_size;
333 if (unlink)
334 diff = -diff;
335
336 spin_lock(&global_spinlock);
337
338 *class_ptr[data_mode] += diff;
339
340 dm_bufio_current_allocated += diff;
341
342 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
343 dm_bufio_peak_allocated = dm_bufio_current_allocated;
344
345 b->accessed = 1;
346
347 if (!unlink) {
348 list_add(&b->global_list, &global_queue);
349 global_num++;
350 if (dm_bufio_current_allocated > dm_bufio_cache_size)
351 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
352 } else {
353 list_del(&b->global_list);
354 global_num--;
355 }
356
357 spin_unlock(&global_spinlock);
358}
359
360/*
361 * Change the number of clients and recalculate per-client limit.
362 */
363static void __cache_size_refresh(void)
364{
365 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
366 BUG_ON(dm_bufio_client_count < 0);
367
368 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
369
370 /*
371 * Use default if set to 0 and report the actual cache size used.
372 */
373 if (!dm_bufio_cache_size_latch) {
374 (void)cmpxchg(&dm_bufio_cache_size, 0,
375 dm_bufio_default_cache_size);
376 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
377 }
378}
379
380/*
381 * Allocating buffer data.
382 *
383 * Small buffers are allocated with kmem_cache, to use space optimally.
384 *
385 * For large buffers, we choose between get_free_pages and vmalloc.
386 * Each has advantages and disadvantages.
387 *
388 * __get_free_pages can randomly fail if the memory is fragmented.
389 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
390 * as low as 128M) so using it for caching is not appropriate.
391 *
392 * If the allocation may fail we use __get_free_pages. Memory fragmentation
393 * won't have a fatal effect here, but it just causes flushes of some other
394 * buffers and more I/O will be performed. Don't use __get_free_pages if it
395 * always fails (i.e. order >= MAX_ORDER).
396 *
397 * If the allocation shouldn't fail we use __vmalloc. This is only for the
398 * initial reserve allocation, so there's no risk of wasting all vmalloc
399 * space.
400 */
401static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
402 unsigned char *data_mode)
403{
404 if (unlikely(c->slab_cache != NULL)) {
405 *data_mode = DATA_MODE_SLAB;
406 return kmem_cache_alloc(c->slab_cache, gfp_mask);
407 }
408
409 if (c->block_size <= KMALLOC_MAX_SIZE &&
410 gfp_mask & __GFP_NORETRY) {
411 *data_mode = DATA_MODE_GET_FREE_PAGES;
412 return (void *)__get_free_pages(gfp_mask,
413 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
414 }
415
416 *data_mode = DATA_MODE_VMALLOC;
417
418 /*
419 * __vmalloc allocates the data pages and auxiliary structures with
420 * gfp_flags that were specified, but pagetables are always allocated
421 * with GFP_KERNEL, no matter what was specified as gfp_mask.
422 *
423 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
424 * all allocations done by this process (including pagetables) are done
425 * as if GFP_NOIO was specified.
426 */
427 if (gfp_mask & __GFP_NORETRY) {
428 unsigned noio_flag = memalloc_noio_save();
429 void *ptr = __vmalloc(c->block_size, gfp_mask);
430
431 memalloc_noio_restore(noio_flag);
432 return ptr;
433 }
434
435 return __vmalloc(c->block_size, gfp_mask);
436}
437
438/*
439 * Free buffer's data.
440 */
441static void free_buffer_data(struct dm_bufio_client *c,
442 void *data, unsigned char data_mode)
443{
444 switch (data_mode) {
445 case DATA_MODE_SLAB:
446 kmem_cache_free(c->slab_cache, data);
447 break;
448
449 case DATA_MODE_GET_FREE_PAGES:
450 free_pages((unsigned long)data,
451 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
452 break;
453
454 case DATA_MODE_VMALLOC:
455 vfree(data);
456 break;
457
458 default:
459 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
460 data_mode);
461 BUG();
462 }
463}
464
465/*
466 * Allocate buffer and its data.
467 */
468static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
469{
470 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
471
472 if (!b)
473 return NULL;
474
475 b->c = c;
476
477 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
478 if (!b->data) {
479 kmem_cache_free(c->slab_buffer, b);
480 return NULL;
481 }
482
483#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
484 b->stack_len = 0;
485#endif
486 return b;
487}
488
489/*
490 * Free buffer and its data.
491 */
492static void free_buffer(struct dm_buffer *b)
493{
494 struct dm_bufio_client *c = b->c;
495
496 free_buffer_data(c, b->data, b->data_mode);
497 kmem_cache_free(c->slab_buffer, b);
498}
499
500/*
501 * Link buffer to the buffer tree and clean or dirty queue.
502 */
503static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
504{
505 struct dm_bufio_client *c = b->c;
506
507 c->n_buffers[dirty]++;
508 b->block = block;
509 b->list_mode = dirty;
510 list_add(&b->lru_list, &c->lru[dirty]);
511 __insert(b->c, b);
512 b->last_accessed = jiffies;
513
514 adjust_total_allocated(b, false);
515}
516
517/*
518 * Unlink buffer from the buffer tree and dirty or clean queue.
519 */
520static void __unlink_buffer(struct dm_buffer *b)
521{
522 struct dm_bufio_client *c = b->c;
523
524 BUG_ON(!c->n_buffers[b->list_mode]);
525
526 c->n_buffers[b->list_mode]--;
527 __remove(b->c, b);
528 list_del(&b->lru_list);
529
530 adjust_total_allocated(b, true);
531}
532
533/*
534 * Place the buffer to the head of dirty or clean LRU queue.
535 */
536static void __relink_lru(struct dm_buffer *b, int dirty)
537{
538 struct dm_bufio_client *c = b->c;
539
540 b->accessed = 1;
541
542 BUG_ON(!c->n_buffers[b->list_mode]);
543
544 c->n_buffers[b->list_mode]--;
545 c->n_buffers[dirty]++;
546 b->list_mode = dirty;
547 list_move(&b->lru_list, &c->lru[dirty]);
548 b->last_accessed = jiffies;
549}
550
551/*----------------------------------------------------------------
552 * Submit I/O on the buffer.
553 *
554 * Bio interface is faster but it has some problems:
555 * the vector list is limited (increasing this limit increases
556 * memory-consumption per buffer, so it is not viable);
557 *
558 * the memory must be direct-mapped, not vmalloced;
559 *
560 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
561 * it is not vmalloced, try using the bio interface.
562 *
563 * If the buffer is big, if it is vmalloced or if the underlying device
564 * rejects the bio because it is too large, use dm-io layer to do the I/O.
565 * The dm-io layer splits the I/O into multiple requests, avoiding the above
566 * shortcomings.
567 *--------------------------------------------------------------*/
568
569/*
570 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
571 * that the request was handled directly with bio interface.
572 */
573static void dmio_complete(unsigned long error, void *context)
574{
575 struct dm_buffer *b = context;
576
577 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
578}
579
580static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
581 unsigned n_sectors, unsigned offset)
582{
583 int r;
584 struct dm_io_request io_req = {
585 .bi_op = rw,
586 .bi_op_flags = 0,
587 .notify.fn = dmio_complete,
588 .notify.context = b,
589 .client = b->c->dm_io,
590 };
591 struct dm_io_region region = {
592 .bdev = b->c->bdev,
593 .sector = sector,
594 .count = n_sectors,
595 };
596
597 if (b->data_mode != DATA_MODE_VMALLOC) {
598 io_req.mem.type = DM_IO_KMEM;
599 io_req.mem.ptr.addr = (char *)b->data + offset;
600 } else {
601 io_req.mem.type = DM_IO_VMA;
602 io_req.mem.ptr.vma = (char *)b->data + offset;
603 }
604
605 r = dm_io(&io_req, 1, ®ion, NULL);
606 if (unlikely(r))
607 b->end_io(b, errno_to_blk_status(r));
608}
609
610static void bio_complete(struct bio *bio)
611{
612 struct dm_buffer *b = bio->bi_private;
613 blk_status_t status = bio->bi_status;
614 bio_put(bio);
615 b->end_io(b, status);
616}
617
618static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
619 unsigned n_sectors, unsigned offset)
620{
621 struct bio *bio;
622 char *ptr;
623 unsigned vec_size, len;
624
625 vec_size = b->c->block_size >> PAGE_SHIFT;
626 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
627 vec_size += 2;
628
629 bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
630 if (!bio) {
631dmio:
632 use_dmio(b, rw, sector, n_sectors, offset);
633 return;
634 }
635
636 bio->bi_iter.bi_sector = sector;
637 bio_set_dev(bio, b->c->bdev);
638 bio_set_op_attrs(bio, rw, 0);
639 bio->bi_end_io = bio_complete;
640 bio->bi_private = b;
641
642 ptr = (char *)b->data + offset;
643 len = n_sectors << SECTOR_SHIFT;
644
645 do {
646 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
647 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
648 offset_in_page(ptr))) {
649 bio_put(bio);
650 goto dmio;
651 }
652
653 len -= this_step;
654 ptr += this_step;
655 } while (len > 0);
656
657 submit_bio(bio);
658}
659
660static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
661{
662 sector_t sector;
663
664 if (likely(c->sectors_per_block_bits >= 0))
665 sector = block << c->sectors_per_block_bits;
666 else
667 sector = block * (c->block_size >> SECTOR_SHIFT);
668 sector += c->start;
669
670 return sector;
671}
672
673static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
674{
675 unsigned n_sectors;
676 sector_t sector;
677 unsigned offset, end;
678
679 b->end_io = end_io;
680
681 sector = block_to_sector(b->c, b->block);
682
683 if (rw != REQ_OP_WRITE) {
684 n_sectors = b->c->block_size >> SECTOR_SHIFT;
685 offset = 0;
686 } else {
687 if (b->c->write_callback)
688 b->c->write_callback(b);
689 offset = b->write_start;
690 end = b->write_end;
691 offset &= -DM_BUFIO_WRITE_ALIGN;
692 end += DM_BUFIO_WRITE_ALIGN - 1;
693 end &= -DM_BUFIO_WRITE_ALIGN;
694 if (unlikely(end > b->c->block_size))
695 end = b->c->block_size;
696
697 sector += offset >> SECTOR_SHIFT;
698 n_sectors = (end - offset) >> SECTOR_SHIFT;
699 }
700
701 if (b->data_mode != DATA_MODE_VMALLOC)
702 use_bio(b, rw, sector, n_sectors, offset);
703 else
704 use_dmio(b, rw, sector, n_sectors, offset);
705}
706
707/*----------------------------------------------------------------
708 * Writing dirty buffers
709 *--------------------------------------------------------------*/
710
711/*
712 * The endio routine for write.
713 *
714 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
715 * it.
716 */
717static void write_endio(struct dm_buffer *b, blk_status_t status)
718{
719 b->write_error = status;
720 if (unlikely(status)) {
721 struct dm_bufio_client *c = b->c;
722
723 (void)cmpxchg(&c->async_write_error, 0,
724 blk_status_to_errno(status));
725 }
726
727 BUG_ON(!test_bit(B_WRITING, &b->state));
728
729 smp_mb__before_atomic();
730 clear_bit(B_WRITING, &b->state);
731 smp_mb__after_atomic();
732
733 wake_up_bit(&b->state, B_WRITING);
734}
735
736/*
737 * Initiate a write on a dirty buffer, but don't wait for it.
738 *
739 * - If the buffer is not dirty, exit.
740 * - If there some previous write going on, wait for it to finish (we can't
741 * have two writes on the same buffer simultaneously).
742 * - Submit our write and don't wait on it. We set B_WRITING indicating
743 * that there is a write in progress.
744 */
745static void __write_dirty_buffer(struct dm_buffer *b,
746 struct list_head *write_list)
747{
748 if (!test_bit(B_DIRTY, &b->state))
749 return;
750
751 clear_bit(B_DIRTY, &b->state);
752 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
753
754 b->write_start = b->dirty_start;
755 b->write_end = b->dirty_end;
756
757 if (!write_list)
758 submit_io(b, REQ_OP_WRITE, write_endio);
759 else
760 list_add_tail(&b->write_list, write_list);
761}
762
763static void __flush_write_list(struct list_head *write_list)
764{
765 struct blk_plug plug;
766 blk_start_plug(&plug);
767 while (!list_empty(write_list)) {
768 struct dm_buffer *b =
769 list_entry(write_list->next, struct dm_buffer, write_list);
770 list_del(&b->write_list);
771 submit_io(b, REQ_OP_WRITE, write_endio);
772 cond_resched();
773 }
774 blk_finish_plug(&plug);
775}
776
777/*
778 * Wait until any activity on the buffer finishes. Possibly write the
779 * buffer if it is dirty. When this function finishes, there is no I/O
780 * running on the buffer and the buffer is not dirty.
781 */
782static void __make_buffer_clean(struct dm_buffer *b)
783{
784 BUG_ON(b->hold_count);
785
786 if (!b->state) /* fast case */
787 return;
788
789 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
790 __write_dirty_buffer(b, NULL);
791 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
792}
793
794/*
795 * Find some buffer that is not held by anybody, clean it, unlink it and
796 * return it.
797 */
798static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
799{
800 struct dm_buffer *b;
801
802 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
803 BUG_ON(test_bit(B_WRITING, &b->state));
804 BUG_ON(test_bit(B_DIRTY, &b->state));
805
806 if (!b->hold_count) {
807 __make_buffer_clean(b);
808 __unlink_buffer(b);
809 return b;
810 }
811 cond_resched();
812 }
813
814 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
815 BUG_ON(test_bit(B_READING, &b->state));
816
817 if (!b->hold_count) {
818 __make_buffer_clean(b);
819 __unlink_buffer(b);
820 return b;
821 }
822 cond_resched();
823 }
824
825 return NULL;
826}
827
828/*
829 * Wait until some other threads free some buffer or release hold count on
830 * some buffer.
831 *
832 * This function is entered with c->lock held, drops it and regains it
833 * before exiting.
834 */
835static void __wait_for_free_buffer(struct dm_bufio_client *c)
836{
837 DECLARE_WAITQUEUE(wait, current);
838
839 add_wait_queue(&c->free_buffer_wait, &wait);
840 set_current_state(TASK_UNINTERRUPTIBLE);
841 dm_bufio_unlock(c);
842
843 io_schedule();
844
845 remove_wait_queue(&c->free_buffer_wait, &wait);
846
847 dm_bufio_lock(c);
848}
849
850enum new_flag {
851 NF_FRESH = 0,
852 NF_READ = 1,
853 NF_GET = 2,
854 NF_PREFETCH = 3
855};
856
857/*
858 * Allocate a new buffer. If the allocation is not possible, wait until
859 * some other thread frees a buffer.
860 *
861 * May drop the lock and regain it.
862 */
863static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
864{
865 struct dm_buffer *b;
866 bool tried_noio_alloc = false;
867
868 /*
869 * dm-bufio is resistant to allocation failures (it just keeps
870 * one buffer reserved in cases all the allocations fail).
871 * So set flags to not try too hard:
872 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
873 * mutex and wait ourselves.
874 * __GFP_NORETRY: don't retry and rather return failure
875 * __GFP_NOMEMALLOC: don't use emergency reserves
876 * __GFP_NOWARN: don't print a warning in case of failure
877 *
878 * For debugging, if we set the cache size to 1, no new buffers will
879 * be allocated.
880 */
881 while (1) {
882 if (dm_bufio_cache_size_latch != 1) {
883 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
884 if (b)
885 return b;
886 }
887
888 if (nf == NF_PREFETCH)
889 return NULL;
890
891 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
892 dm_bufio_unlock(c);
893 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
894 dm_bufio_lock(c);
895 if (b)
896 return b;
897 tried_noio_alloc = true;
898 }
899
900 if (!list_empty(&c->reserved_buffers)) {
901 b = list_entry(c->reserved_buffers.next,
902 struct dm_buffer, lru_list);
903 list_del(&b->lru_list);
904 c->need_reserved_buffers++;
905
906 return b;
907 }
908
909 b = __get_unclaimed_buffer(c);
910 if (b)
911 return b;
912
913 __wait_for_free_buffer(c);
914 }
915}
916
917static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
918{
919 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
920
921 if (!b)
922 return NULL;
923
924 if (c->alloc_callback)
925 c->alloc_callback(b);
926
927 return b;
928}
929
930/*
931 * Free a buffer and wake other threads waiting for free buffers.
932 */
933static void __free_buffer_wake(struct dm_buffer *b)
934{
935 struct dm_bufio_client *c = b->c;
936
937 if (!c->need_reserved_buffers)
938 free_buffer(b);
939 else {
940 list_add(&b->lru_list, &c->reserved_buffers);
941 c->need_reserved_buffers--;
942 }
943
944 wake_up(&c->free_buffer_wait);
945}
946
947static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
948 struct list_head *write_list)
949{
950 struct dm_buffer *b, *tmp;
951
952 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
953 BUG_ON(test_bit(B_READING, &b->state));
954
955 if (!test_bit(B_DIRTY, &b->state) &&
956 !test_bit(B_WRITING, &b->state)) {
957 __relink_lru(b, LIST_CLEAN);
958 continue;
959 }
960
961 if (no_wait && test_bit(B_WRITING, &b->state))
962 return;
963
964 __write_dirty_buffer(b, write_list);
965 cond_resched();
966 }
967}
968
969/*
970 * Check if we're over watermark.
971 * If we are over threshold_buffers, start freeing buffers.
972 * If we're over "limit_buffers", block until we get under the limit.
973 */
974static void __check_watermark(struct dm_bufio_client *c,
975 struct list_head *write_list)
976{
977 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
978 __write_dirty_buffers_async(c, 1, write_list);
979}
980
981/*----------------------------------------------------------------
982 * Getting a buffer
983 *--------------------------------------------------------------*/
984
985static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
986 enum new_flag nf, int *need_submit,
987 struct list_head *write_list)
988{
989 struct dm_buffer *b, *new_b = NULL;
990
991 *need_submit = 0;
992
993 b = __find(c, block);
994 if (b)
995 goto found_buffer;
996
997 if (nf == NF_GET)
998 return NULL;
999
1000 new_b = __alloc_buffer_wait(c, nf);
1001 if (!new_b)
1002 return NULL;
1003
1004 /*
1005 * We've had a period where the mutex was unlocked, so need to
1006 * recheck the buffer tree.
1007 */
1008 b = __find(c, block);
1009 if (b) {
1010 __free_buffer_wake(new_b);
1011 goto found_buffer;
1012 }
1013
1014 __check_watermark(c, write_list);
1015
1016 b = new_b;
1017 b->hold_count = 1;
1018 b->read_error = 0;
1019 b->write_error = 0;
1020 __link_buffer(b, block, LIST_CLEAN);
1021
1022 if (nf == NF_FRESH) {
1023 b->state = 0;
1024 return b;
1025 }
1026
1027 b->state = 1 << B_READING;
1028 *need_submit = 1;
1029
1030 return b;
1031
1032found_buffer:
1033 if (nf == NF_PREFETCH)
1034 return NULL;
1035 /*
1036 * Note: it is essential that we don't wait for the buffer to be
1037 * read if dm_bufio_get function is used. Both dm_bufio_get and
1038 * dm_bufio_prefetch can be used in the driver request routine.
1039 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1040 * the same buffer, it would deadlock if we waited.
1041 */
1042 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1043 return NULL;
1044
1045 b->hold_count++;
1046 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1047 test_bit(B_WRITING, &b->state));
1048 return b;
1049}
1050
1051/*
1052 * The endio routine for reading: set the error, clear the bit and wake up
1053 * anyone waiting on the buffer.
1054 */
1055static void read_endio(struct dm_buffer *b, blk_status_t status)
1056{
1057 b->read_error = status;
1058
1059 BUG_ON(!test_bit(B_READING, &b->state));
1060
1061 smp_mb__before_atomic();
1062 clear_bit(B_READING, &b->state);
1063 smp_mb__after_atomic();
1064
1065 wake_up_bit(&b->state, B_READING);
1066}
1067
1068/*
1069 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1070 * functions is similar except that dm_bufio_new doesn't read the
1071 * buffer from the disk (assuming that the caller overwrites all the data
1072 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1073 */
1074static void *new_read(struct dm_bufio_client *c, sector_t block,
1075 enum new_flag nf, struct dm_buffer **bp)
1076{
1077 int need_submit;
1078 struct dm_buffer *b;
1079
1080 LIST_HEAD(write_list);
1081
1082 dm_bufio_lock(c);
1083 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1084#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1085 if (b && b->hold_count == 1)
1086 buffer_record_stack(b);
1087#endif
1088 dm_bufio_unlock(c);
1089
1090 __flush_write_list(&write_list);
1091
1092 if (!b)
1093 return NULL;
1094
1095 if (need_submit)
1096 submit_io(b, REQ_OP_READ, read_endio);
1097
1098 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1099
1100 if (b->read_error) {
1101 int error = blk_status_to_errno(b->read_error);
1102
1103 dm_bufio_release(b);
1104
1105 return ERR_PTR(error);
1106 }
1107
1108 *bp = b;
1109
1110 return b->data;
1111}
1112
1113void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1114 struct dm_buffer **bp)
1115{
1116 return new_read(c, block, NF_GET, bp);
1117}
1118EXPORT_SYMBOL_GPL(dm_bufio_get);
1119
1120void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1121 struct dm_buffer **bp)
1122{
1123 BUG_ON(dm_bufio_in_request());
1124
1125 return new_read(c, block, NF_READ, bp);
1126}
1127EXPORT_SYMBOL_GPL(dm_bufio_read);
1128
1129void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1130 struct dm_buffer **bp)
1131{
1132 BUG_ON(dm_bufio_in_request());
1133
1134 return new_read(c, block, NF_FRESH, bp);
1135}
1136EXPORT_SYMBOL_GPL(dm_bufio_new);
1137
1138void dm_bufio_prefetch(struct dm_bufio_client *c,
1139 sector_t block, unsigned n_blocks)
1140{
1141 struct blk_plug plug;
1142
1143 LIST_HEAD(write_list);
1144
1145 BUG_ON(dm_bufio_in_request());
1146
1147 blk_start_plug(&plug);
1148 dm_bufio_lock(c);
1149
1150 for (; n_blocks--; block++) {
1151 int need_submit;
1152 struct dm_buffer *b;
1153 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1154 &write_list);
1155 if (unlikely(!list_empty(&write_list))) {
1156 dm_bufio_unlock(c);
1157 blk_finish_plug(&plug);
1158 __flush_write_list(&write_list);
1159 blk_start_plug(&plug);
1160 dm_bufio_lock(c);
1161 }
1162 if (unlikely(b != NULL)) {
1163 dm_bufio_unlock(c);
1164
1165 if (need_submit)
1166 submit_io(b, REQ_OP_READ, read_endio);
1167 dm_bufio_release(b);
1168
1169 cond_resched();
1170
1171 if (!n_blocks)
1172 goto flush_plug;
1173 dm_bufio_lock(c);
1174 }
1175 }
1176
1177 dm_bufio_unlock(c);
1178
1179flush_plug:
1180 blk_finish_plug(&plug);
1181}
1182EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1183
1184void dm_bufio_release(struct dm_buffer *b)
1185{
1186 struct dm_bufio_client *c = b->c;
1187
1188 dm_bufio_lock(c);
1189
1190 BUG_ON(!b->hold_count);
1191
1192 b->hold_count--;
1193 if (!b->hold_count) {
1194 wake_up(&c->free_buffer_wait);
1195
1196 /*
1197 * If there were errors on the buffer, and the buffer is not
1198 * to be written, free the buffer. There is no point in caching
1199 * invalid buffer.
1200 */
1201 if ((b->read_error || b->write_error) &&
1202 !test_bit(B_READING, &b->state) &&
1203 !test_bit(B_WRITING, &b->state) &&
1204 !test_bit(B_DIRTY, &b->state)) {
1205 __unlink_buffer(b);
1206 __free_buffer_wake(b);
1207 }
1208 }
1209
1210 dm_bufio_unlock(c);
1211}
1212EXPORT_SYMBOL_GPL(dm_bufio_release);
1213
1214void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1215 unsigned start, unsigned end)
1216{
1217 struct dm_bufio_client *c = b->c;
1218
1219 BUG_ON(start >= end);
1220 BUG_ON(end > b->c->block_size);
1221
1222 dm_bufio_lock(c);
1223
1224 BUG_ON(test_bit(B_READING, &b->state));
1225
1226 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1227 b->dirty_start = start;
1228 b->dirty_end = end;
1229 __relink_lru(b, LIST_DIRTY);
1230 } else {
1231 if (start < b->dirty_start)
1232 b->dirty_start = start;
1233 if (end > b->dirty_end)
1234 b->dirty_end = end;
1235 }
1236
1237 dm_bufio_unlock(c);
1238}
1239EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1240
1241void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1242{
1243 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1244}
1245EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1246
1247void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1248{
1249 LIST_HEAD(write_list);
1250
1251 BUG_ON(dm_bufio_in_request());
1252
1253 dm_bufio_lock(c);
1254 __write_dirty_buffers_async(c, 0, &write_list);
1255 dm_bufio_unlock(c);
1256 __flush_write_list(&write_list);
1257}
1258EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1259
1260/*
1261 * For performance, it is essential that the buffers are written asynchronously
1262 * and simultaneously (so that the block layer can merge the writes) and then
1263 * waited upon.
1264 *
1265 * Finally, we flush hardware disk cache.
1266 */
1267int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1268{
1269 int a, f;
1270 unsigned long buffers_processed = 0;
1271 struct dm_buffer *b, *tmp;
1272
1273 LIST_HEAD(write_list);
1274
1275 dm_bufio_lock(c);
1276 __write_dirty_buffers_async(c, 0, &write_list);
1277 dm_bufio_unlock(c);
1278 __flush_write_list(&write_list);
1279 dm_bufio_lock(c);
1280
1281again:
1282 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1283 int dropped_lock = 0;
1284
1285 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1286 buffers_processed++;
1287
1288 BUG_ON(test_bit(B_READING, &b->state));
1289
1290 if (test_bit(B_WRITING, &b->state)) {
1291 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1292 dropped_lock = 1;
1293 b->hold_count++;
1294 dm_bufio_unlock(c);
1295 wait_on_bit_io(&b->state, B_WRITING,
1296 TASK_UNINTERRUPTIBLE);
1297 dm_bufio_lock(c);
1298 b->hold_count--;
1299 } else
1300 wait_on_bit_io(&b->state, B_WRITING,
1301 TASK_UNINTERRUPTIBLE);
1302 }
1303
1304 if (!test_bit(B_DIRTY, &b->state) &&
1305 !test_bit(B_WRITING, &b->state))
1306 __relink_lru(b, LIST_CLEAN);
1307
1308 cond_resched();
1309
1310 /*
1311 * If we dropped the lock, the list is no longer consistent,
1312 * so we must restart the search.
1313 *
1314 * In the most common case, the buffer just processed is
1315 * relinked to the clean list, so we won't loop scanning the
1316 * same buffer again and again.
1317 *
1318 * This may livelock if there is another thread simultaneously
1319 * dirtying buffers, so we count the number of buffers walked
1320 * and if it exceeds the total number of buffers, it means that
1321 * someone is doing some writes simultaneously with us. In
1322 * this case, stop, dropping the lock.
1323 */
1324 if (dropped_lock)
1325 goto again;
1326 }
1327 wake_up(&c->free_buffer_wait);
1328 dm_bufio_unlock(c);
1329
1330 a = xchg(&c->async_write_error, 0);
1331 f = dm_bufio_issue_flush(c);
1332 if (a)
1333 return a;
1334
1335 return f;
1336}
1337EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1338
1339/*
1340 * Use dm-io to send an empty barrier to flush the device.
1341 */
1342int dm_bufio_issue_flush(struct dm_bufio_client *c)
1343{
1344 struct dm_io_request io_req = {
1345 .bi_op = REQ_OP_WRITE,
1346 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1347 .mem.type = DM_IO_KMEM,
1348 .mem.ptr.addr = NULL,
1349 .client = c->dm_io,
1350 };
1351 struct dm_io_region io_reg = {
1352 .bdev = c->bdev,
1353 .sector = 0,
1354 .count = 0,
1355 };
1356
1357 BUG_ON(dm_bufio_in_request());
1358
1359 return dm_io(&io_req, 1, &io_reg, NULL);
1360}
1361EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1362
1363/*
1364 * Use dm-io to send a discard request to flush the device.
1365 */
1366int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1367{
1368 struct dm_io_request io_req = {
1369 .bi_op = REQ_OP_DISCARD,
1370 .bi_op_flags = REQ_SYNC,
1371 .mem.type = DM_IO_KMEM,
1372 .mem.ptr.addr = NULL,
1373 .client = c->dm_io,
1374 };
1375 struct dm_io_region io_reg = {
1376 .bdev = c->bdev,
1377 .sector = block_to_sector(c, block),
1378 .count = block_to_sector(c, count),
1379 };
1380
1381 BUG_ON(dm_bufio_in_request());
1382
1383 return dm_io(&io_req, 1, &io_reg, NULL);
1384}
1385EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1386
1387/*
1388 * We first delete any other buffer that may be at that new location.
1389 *
1390 * Then, we write the buffer to the original location if it was dirty.
1391 *
1392 * Then, if we are the only one who is holding the buffer, relink the buffer
1393 * in the buffer tree for the new location.
1394 *
1395 * If there was someone else holding the buffer, we write it to the new
1396 * location but not relink it, because that other user needs to have the buffer
1397 * at the same place.
1398 */
1399void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1400{
1401 struct dm_bufio_client *c = b->c;
1402 struct dm_buffer *new;
1403
1404 BUG_ON(dm_bufio_in_request());
1405
1406 dm_bufio_lock(c);
1407
1408retry:
1409 new = __find(c, new_block);
1410 if (new) {
1411 if (new->hold_count) {
1412 __wait_for_free_buffer(c);
1413 goto retry;
1414 }
1415
1416 /*
1417 * FIXME: Is there any point waiting for a write that's going
1418 * to be overwritten in a bit?
1419 */
1420 __make_buffer_clean(new);
1421 __unlink_buffer(new);
1422 __free_buffer_wake(new);
1423 }
1424
1425 BUG_ON(!b->hold_count);
1426 BUG_ON(test_bit(B_READING, &b->state));
1427
1428 __write_dirty_buffer(b, NULL);
1429 if (b->hold_count == 1) {
1430 wait_on_bit_io(&b->state, B_WRITING,
1431 TASK_UNINTERRUPTIBLE);
1432 set_bit(B_DIRTY, &b->state);
1433 b->dirty_start = 0;
1434 b->dirty_end = c->block_size;
1435 __unlink_buffer(b);
1436 __link_buffer(b, new_block, LIST_DIRTY);
1437 } else {
1438 sector_t old_block;
1439 wait_on_bit_lock_io(&b->state, B_WRITING,
1440 TASK_UNINTERRUPTIBLE);
1441 /*
1442 * Relink buffer to "new_block" so that write_callback
1443 * sees "new_block" as a block number.
1444 * After the write, link the buffer back to old_block.
1445 * All this must be done in bufio lock, so that block number
1446 * change isn't visible to other threads.
1447 */
1448 old_block = b->block;
1449 __unlink_buffer(b);
1450 __link_buffer(b, new_block, b->list_mode);
1451 submit_io(b, REQ_OP_WRITE, write_endio);
1452 wait_on_bit_io(&b->state, B_WRITING,
1453 TASK_UNINTERRUPTIBLE);
1454 __unlink_buffer(b);
1455 __link_buffer(b, old_block, b->list_mode);
1456 }
1457
1458 dm_bufio_unlock(c);
1459 dm_bufio_release(b);
1460}
1461EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1462
1463static void forget_buffer_locked(struct dm_buffer *b)
1464{
1465 if (likely(!b->hold_count) && likely(!b->state)) {
1466 __unlink_buffer(b);
1467 __free_buffer_wake(b);
1468 }
1469}
1470
1471/*
1472 * Free the given buffer.
1473 *
1474 * This is just a hint, if the buffer is in use or dirty, this function
1475 * does nothing.
1476 */
1477void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1478{
1479 struct dm_buffer *b;
1480
1481 dm_bufio_lock(c);
1482
1483 b = __find(c, block);
1484 if (b)
1485 forget_buffer_locked(b);
1486
1487 dm_bufio_unlock(c);
1488}
1489EXPORT_SYMBOL_GPL(dm_bufio_forget);
1490
1491void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1492{
1493 struct dm_buffer *b;
1494 sector_t end_block = block + n_blocks;
1495
1496 while (block < end_block) {
1497 dm_bufio_lock(c);
1498
1499 b = __find_next(c, block);
1500 if (b) {
1501 block = b->block + 1;
1502 forget_buffer_locked(b);
1503 }
1504
1505 dm_bufio_unlock(c);
1506
1507 if (!b)
1508 break;
1509 }
1510
1511}
1512EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1513
1514void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1515{
1516 c->minimum_buffers = n;
1517}
1518EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1519
1520unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1521{
1522 return c->block_size;
1523}
1524EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1525
1526sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1527{
1528 sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1529 if (likely(c->sectors_per_block_bits >= 0))
1530 s >>= c->sectors_per_block_bits;
1531 else
1532 sector_div(s, c->block_size >> SECTOR_SHIFT);
1533 return s;
1534}
1535EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1536
1537sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1538{
1539 return b->block;
1540}
1541EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1542
1543void *dm_bufio_get_block_data(struct dm_buffer *b)
1544{
1545 return b->data;
1546}
1547EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1548
1549void *dm_bufio_get_aux_data(struct dm_buffer *b)
1550{
1551 return b + 1;
1552}
1553EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1554
1555struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1556{
1557 return b->c;
1558}
1559EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1560
1561static void drop_buffers(struct dm_bufio_client *c)
1562{
1563 struct dm_buffer *b;
1564 int i;
1565 bool warned = false;
1566
1567 BUG_ON(dm_bufio_in_request());
1568
1569 /*
1570 * An optimization so that the buffers are not written one-by-one.
1571 */
1572 dm_bufio_write_dirty_buffers_async(c);
1573
1574 dm_bufio_lock(c);
1575
1576 while ((b = __get_unclaimed_buffer(c)))
1577 __free_buffer_wake(b);
1578
1579 for (i = 0; i < LIST_SIZE; i++)
1580 list_for_each_entry(b, &c->lru[i], lru_list) {
1581 WARN_ON(!warned);
1582 warned = true;
1583 DMERR("leaked buffer %llx, hold count %u, list %d",
1584 (unsigned long long)b->block, b->hold_count, i);
1585#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1586 stack_trace_print(b->stack_entries, b->stack_len, 1);
1587 /* mark unclaimed to avoid BUG_ON below */
1588 b->hold_count = 0;
1589#endif
1590 }
1591
1592#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1593 while ((b = __get_unclaimed_buffer(c)))
1594 __free_buffer_wake(b);
1595#endif
1596
1597 for (i = 0; i < LIST_SIZE; i++)
1598 BUG_ON(!list_empty(&c->lru[i]));
1599
1600 dm_bufio_unlock(c);
1601}
1602
1603/*
1604 * We may not be able to evict this buffer if IO pending or the client
1605 * is still using it. Caller is expected to know buffer is too old.
1606 *
1607 * And if GFP_NOFS is used, we must not do any I/O because we hold
1608 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1609 * rerouted to different bufio client.
1610 */
1611static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1612{
1613 if (!(gfp & __GFP_FS)) {
1614 if (test_bit(B_READING, &b->state) ||
1615 test_bit(B_WRITING, &b->state) ||
1616 test_bit(B_DIRTY, &b->state))
1617 return false;
1618 }
1619
1620 if (b->hold_count)
1621 return false;
1622
1623 __make_buffer_clean(b);
1624 __unlink_buffer(b);
1625 __free_buffer_wake(b);
1626
1627 return true;
1628}
1629
1630static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1631{
1632 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1633 if (likely(c->sectors_per_block_bits >= 0))
1634 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1635 else
1636 retain_bytes /= c->block_size;
1637 return retain_bytes;
1638}
1639
1640static void __scan(struct dm_bufio_client *c)
1641{
1642 int l;
1643 struct dm_buffer *b, *tmp;
1644 unsigned long freed = 0;
1645 unsigned long count = c->n_buffers[LIST_CLEAN] +
1646 c->n_buffers[LIST_DIRTY];
1647 unsigned long retain_target = get_retain_buffers(c);
1648
1649 for (l = 0; l < LIST_SIZE; l++) {
1650 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1651 if (count - freed <= retain_target)
1652 atomic_long_set(&c->need_shrink, 0);
1653 if (!atomic_long_read(&c->need_shrink))
1654 return;
1655 if (__try_evict_buffer(b, GFP_KERNEL)) {
1656 atomic_long_dec(&c->need_shrink);
1657 freed++;
1658 }
1659 cond_resched();
1660 }
1661 }
1662}
1663
1664static void shrink_work(struct work_struct *w)
1665{
1666 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1667
1668 dm_bufio_lock(c);
1669 __scan(c);
1670 dm_bufio_unlock(c);
1671}
1672
1673static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1674{
1675 struct dm_bufio_client *c;
1676
1677 c = container_of(shrink, struct dm_bufio_client, shrinker);
1678 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1679 queue_work(dm_bufio_wq, &c->shrink_work);
1680
1681 return sc->nr_to_scan;
1682}
1683
1684static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1685{
1686 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1687 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1688 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1689 unsigned long retain_target = get_retain_buffers(c);
1690 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1691
1692 if (unlikely(count < retain_target))
1693 count = 0;
1694 else
1695 count -= retain_target;
1696
1697 if (unlikely(count < queued_for_cleanup))
1698 count = 0;
1699 else
1700 count -= queued_for_cleanup;
1701
1702 return count;
1703}
1704
1705/*
1706 * Create the buffering interface
1707 */
1708struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1709 unsigned reserved_buffers, unsigned aux_size,
1710 void (*alloc_callback)(struct dm_buffer *),
1711 void (*write_callback)(struct dm_buffer *))
1712{
1713 int r;
1714 struct dm_bufio_client *c;
1715 unsigned i;
1716 char slab_name[27];
1717
1718 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1719 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1720 r = -EINVAL;
1721 goto bad_client;
1722 }
1723
1724 c = kzalloc(sizeof(*c), GFP_KERNEL);
1725 if (!c) {
1726 r = -ENOMEM;
1727 goto bad_client;
1728 }
1729 c->buffer_tree = RB_ROOT;
1730
1731 c->bdev = bdev;
1732 c->block_size = block_size;
1733 if (is_power_of_2(block_size))
1734 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1735 else
1736 c->sectors_per_block_bits = -1;
1737
1738 c->alloc_callback = alloc_callback;
1739 c->write_callback = write_callback;
1740
1741 for (i = 0; i < LIST_SIZE; i++) {
1742 INIT_LIST_HEAD(&c->lru[i]);
1743 c->n_buffers[i] = 0;
1744 }
1745
1746 mutex_init(&c->lock);
1747 INIT_LIST_HEAD(&c->reserved_buffers);
1748 c->need_reserved_buffers = reserved_buffers;
1749
1750 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1751
1752 init_waitqueue_head(&c->free_buffer_wait);
1753 c->async_write_error = 0;
1754
1755 c->dm_io = dm_io_client_create();
1756 if (IS_ERR(c->dm_io)) {
1757 r = PTR_ERR(c->dm_io);
1758 goto bad_dm_io;
1759 }
1760
1761 if (block_size <= KMALLOC_MAX_SIZE &&
1762 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1763 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1764 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1765 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1766 SLAB_RECLAIM_ACCOUNT, NULL);
1767 if (!c->slab_cache) {
1768 r = -ENOMEM;
1769 goto bad;
1770 }
1771 }
1772 if (aux_size)
1773 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1774 else
1775 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1776 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1777 0, SLAB_RECLAIM_ACCOUNT, NULL);
1778 if (!c->slab_buffer) {
1779 r = -ENOMEM;
1780 goto bad;
1781 }
1782
1783 while (c->need_reserved_buffers) {
1784 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1785
1786 if (!b) {
1787 r = -ENOMEM;
1788 goto bad;
1789 }
1790 __free_buffer_wake(b);
1791 }
1792
1793 INIT_WORK(&c->shrink_work, shrink_work);
1794 atomic_long_set(&c->need_shrink, 0);
1795
1796 c->shrinker.count_objects = dm_bufio_shrink_count;
1797 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1798 c->shrinker.seeks = 1;
1799 c->shrinker.batch = 0;
1800 r = register_shrinker(&c->shrinker);
1801 if (r)
1802 goto bad;
1803
1804 mutex_lock(&dm_bufio_clients_lock);
1805 dm_bufio_client_count++;
1806 list_add(&c->client_list, &dm_bufio_all_clients);
1807 __cache_size_refresh();
1808 mutex_unlock(&dm_bufio_clients_lock);
1809
1810 return c;
1811
1812bad:
1813 while (!list_empty(&c->reserved_buffers)) {
1814 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1815 struct dm_buffer, lru_list);
1816 list_del(&b->lru_list);
1817 free_buffer(b);
1818 }
1819 kmem_cache_destroy(c->slab_cache);
1820 kmem_cache_destroy(c->slab_buffer);
1821 dm_io_client_destroy(c->dm_io);
1822bad_dm_io:
1823 mutex_destroy(&c->lock);
1824 kfree(c);
1825bad_client:
1826 return ERR_PTR(r);
1827}
1828EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1829
1830/*
1831 * Free the buffering interface.
1832 * It is required that there are no references on any buffers.
1833 */
1834void dm_bufio_client_destroy(struct dm_bufio_client *c)
1835{
1836 unsigned i;
1837
1838 drop_buffers(c);
1839
1840 unregister_shrinker(&c->shrinker);
1841 flush_work(&c->shrink_work);
1842
1843 mutex_lock(&dm_bufio_clients_lock);
1844
1845 list_del(&c->client_list);
1846 dm_bufio_client_count--;
1847 __cache_size_refresh();
1848
1849 mutex_unlock(&dm_bufio_clients_lock);
1850
1851 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1852 BUG_ON(c->need_reserved_buffers);
1853
1854 while (!list_empty(&c->reserved_buffers)) {
1855 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1856 struct dm_buffer, lru_list);
1857 list_del(&b->lru_list);
1858 free_buffer(b);
1859 }
1860
1861 for (i = 0; i < LIST_SIZE; i++)
1862 if (c->n_buffers[i])
1863 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1864
1865 for (i = 0; i < LIST_SIZE; i++)
1866 BUG_ON(c->n_buffers[i]);
1867
1868 kmem_cache_destroy(c->slab_cache);
1869 kmem_cache_destroy(c->slab_buffer);
1870 dm_io_client_destroy(c->dm_io);
1871 mutex_destroy(&c->lock);
1872 kfree(c);
1873}
1874EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1875
1876void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1877{
1878 c->start = start;
1879}
1880EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1881
1882static unsigned get_max_age_hz(void)
1883{
1884 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1885
1886 if (max_age > UINT_MAX / HZ)
1887 max_age = UINT_MAX / HZ;
1888
1889 return max_age * HZ;
1890}
1891
1892static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1893{
1894 return time_after_eq(jiffies, b->last_accessed + age_hz);
1895}
1896
1897static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1898{
1899 struct dm_buffer *b, *tmp;
1900 unsigned long retain_target = get_retain_buffers(c);
1901 unsigned long count;
1902 LIST_HEAD(write_list);
1903
1904 dm_bufio_lock(c);
1905
1906 __check_watermark(c, &write_list);
1907 if (unlikely(!list_empty(&write_list))) {
1908 dm_bufio_unlock(c);
1909 __flush_write_list(&write_list);
1910 dm_bufio_lock(c);
1911 }
1912
1913 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1914 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1915 if (count <= retain_target)
1916 break;
1917
1918 if (!older_than(b, age_hz))
1919 break;
1920
1921 if (__try_evict_buffer(b, 0))
1922 count--;
1923
1924 cond_resched();
1925 }
1926
1927 dm_bufio_unlock(c);
1928}
1929
1930static void do_global_cleanup(struct work_struct *w)
1931{
1932 struct dm_bufio_client *locked_client = NULL;
1933 struct dm_bufio_client *current_client;
1934 struct dm_buffer *b;
1935 unsigned spinlock_hold_count;
1936 unsigned long threshold = dm_bufio_cache_size -
1937 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1938 unsigned long loops = global_num * 2;
1939
1940 mutex_lock(&dm_bufio_clients_lock);
1941
1942 while (1) {
1943 cond_resched();
1944
1945 spin_lock(&global_spinlock);
1946 if (unlikely(dm_bufio_current_allocated <= threshold))
1947 break;
1948
1949 spinlock_hold_count = 0;
1950get_next:
1951 if (!loops--)
1952 break;
1953 if (unlikely(list_empty(&global_queue)))
1954 break;
1955 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1956
1957 if (b->accessed) {
1958 b->accessed = 0;
1959 list_move(&b->global_list, &global_queue);
1960 if (likely(++spinlock_hold_count < 16))
1961 goto get_next;
1962 spin_unlock(&global_spinlock);
1963 continue;
1964 }
1965
1966 current_client = b->c;
1967 if (unlikely(current_client != locked_client)) {
1968 if (locked_client)
1969 dm_bufio_unlock(locked_client);
1970
1971 if (!dm_bufio_trylock(current_client)) {
1972 spin_unlock(&global_spinlock);
1973 dm_bufio_lock(current_client);
1974 locked_client = current_client;
1975 continue;
1976 }
1977
1978 locked_client = current_client;
1979 }
1980
1981 spin_unlock(&global_spinlock);
1982
1983 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1984 spin_lock(&global_spinlock);
1985 list_move(&b->global_list, &global_queue);
1986 spin_unlock(&global_spinlock);
1987 }
1988 }
1989
1990 spin_unlock(&global_spinlock);
1991
1992 if (locked_client)
1993 dm_bufio_unlock(locked_client);
1994
1995 mutex_unlock(&dm_bufio_clients_lock);
1996}
1997
1998static void cleanup_old_buffers(void)
1999{
2000 unsigned long max_age_hz = get_max_age_hz();
2001 struct dm_bufio_client *c;
2002
2003 mutex_lock(&dm_bufio_clients_lock);
2004
2005 __cache_size_refresh();
2006
2007 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2008 __evict_old_buffers(c, max_age_hz);
2009
2010 mutex_unlock(&dm_bufio_clients_lock);
2011}
2012
2013static void work_fn(struct work_struct *w)
2014{
2015 cleanup_old_buffers();
2016
2017 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2018 DM_BUFIO_WORK_TIMER_SECS * HZ);
2019}
2020
2021/*----------------------------------------------------------------
2022 * Module setup
2023 *--------------------------------------------------------------*/
2024
2025/*
2026 * This is called only once for the whole dm_bufio module.
2027 * It initializes memory limit.
2028 */
2029static int __init dm_bufio_init(void)
2030{
2031 __u64 mem;
2032
2033 dm_bufio_allocated_kmem_cache = 0;
2034 dm_bufio_allocated_get_free_pages = 0;
2035 dm_bufio_allocated_vmalloc = 0;
2036 dm_bufio_current_allocated = 0;
2037
2038 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2039 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2040
2041 if (mem > ULONG_MAX)
2042 mem = ULONG_MAX;
2043
2044#ifdef CONFIG_MMU
2045 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2046 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2047#endif
2048
2049 dm_bufio_default_cache_size = mem;
2050
2051 mutex_lock(&dm_bufio_clients_lock);
2052 __cache_size_refresh();
2053 mutex_unlock(&dm_bufio_clients_lock);
2054
2055 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2056 if (!dm_bufio_wq)
2057 return -ENOMEM;
2058
2059 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2060 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2061 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2062 DM_BUFIO_WORK_TIMER_SECS * HZ);
2063
2064 return 0;
2065}
2066
2067/*
2068 * This is called once when unloading the dm_bufio module.
2069 */
2070static void __exit dm_bufio_exit(void)
2071{
2072 int bug = 0;
2073
2074 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2075 flush_workqueue(dm_bufio_wq);
2076 destroy_workqueue(dm_bufio_wq);
2077
2078 if (dm_bufio_client_count) {
2079 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2080 __func__, dm_bufio_client_count);
2081 bug = 1;
2082 }
2083
2084 if (dm_bufio_current_allocated) {
2085 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2086 __func__, dm_bufio_current_allocated);
2087 bug = 1;
2088 }
2089
2090 if (dm_bufio_allocated_get_free_pages) {
2091 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2092 __func__, dm_bufio_allocated_get_free_pages);
2093 bug = 1;
2094 }
2095
2096 if (dm_bufio_allocated_vmalloc) {
2097 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2098 __func__, dm_bufio_allocated_vmalloc);
2099 bug = 1;
2100 }
2101
2102 BUG_ON(bug);
2103}
2104
2105module_init(dm_bufio_init)
2106module_exit(dm_bufio_exit)
2107
2108module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2109MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2110
2111module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2112MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2113
2114module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2115MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2116
2117module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2118MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2119
2120module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2121MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2122
2123module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2124MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2125
2126module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2127MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2128
2129module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2130MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2131
2132MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2133MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2134MODULE_LICENSE("GPL");