Loading...
1/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/dm-bufio.h>
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/sched/mm.h>
15#include <linux/jiffies.h>
16#include <linux/vmalloc.h>
17#include <linux/shrinker.h>
18#include <linux/module.h>
19#include <linux/rbtree.h>
20#include <linux/stacktrace.h>
21#include <linux/jump_label.h>
22
23#define DM_MSG_PREFIX "bufio"
24
25/*
26 * Memory management policy:
27 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
28 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
29 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
30 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
31 * dirty buffers.
32 */
33#define DM_BUFIO_MIN_BUFFERS 8
34
35#define DM_BUFIO_MEMORY_PERCENT 2
36#define DM_BUFIO_VMALLOC_PERCENT 25
37#define DM_BUFIO_WRITEBACK_RATIO 3
38#define DM_BUFIO_LOW_WATERMARK_RATIO 16
39
40/*
41 * Check buffer ages in this interval (seconds)
42 */
43#define DM_BUFIO_WORK_TIMER_SECS 30
44
45/*
46 * Free buffers when they are older than this (seconds)
47 */
48#define DM_BUFIO_DEFAULT_AGE_SECS 300
49
50/*
51 * The nr of bytes of cached data to keep around.
52 */
53#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
54
55/*
56 * Align buffer writes to this boundary.
57 * Tests show that SSDs have the highest IOPS when using 4k writes.
58 */
59#define DM_BUFIO_WRITE_ALIGN 4096
60
61/*
62 * dm_buffer->list_mode
63 */
64#define LIST_CLEAN 0
65#define LIST_DIRTY 1
66#define LIST_SIZE 2
67
68/*
69 * Linking of buffers:
70 * All buffers are linked to buffer_tree with their node field.
71 *
72 * Clean buffers that are not being written (B_WRITING not set)
73 * are linked to lru[LIST_CLEAN] with their lru_list field.
74 *
75 * Dirty and clean buffers that are being written are linked to
76 * lru[LIST_DIRTY] with their lru_list field. When the write
77 * finishes, the buffer cannot be relinked immediately (because we
78 * are in an interrupt context and relinking requires process
79 * context), so some clean-not-writing buffers can be held on
80 * dirty_lru too. They are later added to lru in the process
81 * context.
82 */
83struct dm_bufio_client {
84 struct mutex lock;
85 spinlock_t spinlock;
86 bool no_sleep;
87
88 struct list_head lru[LIST_SIZE];
89 unsigned long n_buffers[LIST_SIZE];
90
91 struct block_device *bdev;
92 unsigned block_size;
93 s8 sectors_per_block_bits;
94 void (*alloc_callback)(struct dm_buffer *);
95 void (*write_callback)(struct dm_buffer *);
96 struct kmem_cache *slab_buffer;
97 struct kmem_cache *slab_cache;
98 struct dm_io_client *dm_io;
99
100 struct list_head reserved_buffers;
101 unsigned need_reserved_buffers;
102
103 unsigned minimum_buffers;
104
105 struct rb_root buffer_tree;
106 wait_queue_head_t free_buffer_wait;
107
108 sector_t start;
109
110 int async_write_error;
111
112 struct list_head client_list;
113
114 struct shrinker shrinker;
115 struct work_struct shrink_work;
116 atomic_long_t need_shrink;
117};
118
119/*
120 * Buffer state bits.
121 */
122#define B_READING 0
123#define B_WRITING 1
124#define B_DIRTY 2
125
126/*
127 * Describes how the block was allocated:
128 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
129 * See the comment at alloc_buffer_data.
130 */
131enum data_mode {
132 DATA_MODE_SLAB = 0,
133 DATA_MODE_GET_FREE_PAGES = 1,
134 DATA_MODE_VMALLOC = 2,
135 DATA_MODE_LIMIT = 3
136};
137
138struct dm_buffer {
139 struct rb_node node;
140 struct list_head lru_list;
141 struct list_head global_list;
142 sector_t block;
143 void *data;
144 unsigned char data_mode; /* DATA_MODE_* */
145 unsigned char list_mode; /* LIST_* */
146 blk_status_t read_error;
147 blk_status_t write_error;
148 unsigned accessed;
149 unsigned hold_count;
150 unsigned long state;
151 unsigned long last_accessed;
152 unsigned dirty_start;
153 unsigned dirty_end;
154 unsigned write_start;
155 unsigned write_end;
156 struct dm_bufio_client *c;
157 struct list_head write_list;
158 void (*end_io)(struct dm_buffer *, blk_status_t);
159#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
160#define MAX_STACK 10
161 unsigned int stack_len;
162 unsigned long stack_entries[MAX_STACK];
163#endif
164};
165
166static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
167
168/*----------------------------------------------------------------*/
169
170#define dm_bufio_in_request() (!!current->bio_list)
171
172static void dm_bufio_lock(struct dm_bufio_client *c)
173{
174 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
175 spin_lock_bh(&c->spinlock);
176 else
177 mutex_lock_nested(&c->lock, dm_bufio_in_request());
178}
179
180static int dm_bufio_trylock(struct dm_bufio_client *c)
181{
182 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
183 return spin_trylock_bh(&c->spinlock);
184 else
185 return mutex_trylock(&c->lock);
186}
187
188static void dm_bufio_unlock(struct dm_bufio_client *c)
189{
190 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
191 spin_unlock_bh(&c->spinlock);
192 else
193 mutex_unlock(&c->lock);
194}
195
196/*----------------------------------------------------------------*/
197
198/*
199 * Default cache size: available memory divided by the ratio.
200 */
201static unsigned long dm_bufio_default_cache_size;
202
203/*
204 * Total cache size set by the user.
205 */
206static unsigned long dm_bufio_cache_size;
207
208/*
209 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
210 * at any time. If it disagrees, the user has changed cache size.
211 */
212static unsigned long dm_bufio_cache_size_latch;
213
214static DEFINE_SPINLOCK(global_spinlock);
215
216static LIST_HEAD(global_queue);
217
218static unsigned long global_num = 0;
219
220/*
221 * Buffers are freed after this timeout
222 */
223static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
224static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
225
226static unsigned long dm_bufio_peak_allocated;
227static unsigned long dm_bufio_allocated_kmem_cache;
228static unsigned long dm_bufio_allocated_get_free_pages;
229static unsigned long dm_bufio_allocated_vmalloc;
230static unsigned long dm_bufio_current_allocated;
231
232/*----------------------------------------------------------------*/
233
234/*
235 * The current number of clients.
236 */
237static int dm_bufio_client_count;
238
239/*
240 * The list of all clients.
241 */
242static LIST_HEAD(dm_bufio_all_clients);
243
244/*
245 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
246 */
247static DEFINE_MUTEX(dm_bufio_clients_lock);
248
249static struct workqueue_struct *dm_bufio_wq;
250static struct delayed_work dm_bufio_cleanup_old_work;
251static struct work_struct dm_bufio_replacement_work;
252
253
254#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
255static void buffer_record_stack(struct dm_buffer *b)
256{
257 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
258}
259#endif
260
261/*----------------------------------------------------------------
262 * A red/black tree acts as an index for all the buffers.
263 *--------------------------------------------------------------*/
264static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
265{
266 struct rb_node *n = c->buffer_tree.rb_node;
267 struct dm_buffer *b;
268
269 while (n) {
270 b = container_of(n, struct dm_buffer, node);
271
272 if (b->block == block)
273 return b;
274
275 n = block < b->block ? n->rb_left : n->rb_right;
276 }
277
278 return NULL;
279}
280
281static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
282{
283 struct rb_node *n = c->buffer_tree.rb_node;
284 struct dm_buffer *b;
285 struct dm_buffer *best = NULL;
286
287 while (n) {
288 b = container_of(n, struct dm_buffer, node);
289
290 if (b->block == block)
291 return b;
292
293 if (block <= b->block) {
294 n = n->rb_left;
295 best = b;
296 } else {
297 n = n->rb_right;
298 }
299 }
300
301 return best;
302}
303
304static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
305{
306 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
307 struct dm_buffer *found;
308
309 while (*new) {
310 found = container_of(*new, struct dm_buffer, node);
311
312 if (found->block == b->block) {
313 BUG_ON(found != b);
314 return;
315 }
316
317 parent = *new;
318 new = b->block < found->block ?
319 &found->node.rb_left : &found->node.rb_right;
320 }
321
322 rb_link_node(&b->node, parent, new);
323 rb_insert_color(&b->node, &c->buffer_tree);
324}
325
326static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
327{
328 rb_erase(&b->node, &c->buffer_tree);
329}
330
331/*----------------------------------------------------------------*/
332
333static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
334{
335 unsigned char data_mode;
336 long diff;
337
338 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
339 &dm_bufio_allocated_kmem_cache,
340 &dm_bufio_allocated_get_free_pages,
341 &dm_bufio_allocated_vmalloc,
342 };
343
344 data_mode = b->data_mode;
345 diff = (long)b->c->block_size;
346 if (unlink)
347 diff = -diff;
348
349 spin_lock(&global_spinlock);
350
351 *class_ptr[data_mode] += diff;
352
353 dm_bufio_current_allocated += diff;
354
355 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
356 dm_bufio_peak_allocated = dm_bufio_current_allocated;
357
358 b->accessed = 1;
359
360 if (!unlink) {
361 list_add(&b->global_list, &global_queue);
362 global_num++;
363 if (dm_bufio_current_allocated > dm_bufio_cache_size)
364 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
365 } else {
366 list_del(&b->global_list);
367 global_num--;
368 }
369
370 spin_unlock(&global_spinlock);
371}
372
373/*
374 * Change the number of clients and recalculate per-client limit.
375 */
376static void __cache_size_refresh(void)
377{
378 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
379 BUG_ON(dm_bufio_client_count < 0);
380
381 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
382
383 /*
384 * Use default if set to 0 and report the actual cache size used.
385 */
386 if (!dm_bufio_cache_size_latch) {
387 (void)cmpxchg(&dm_bufio_cache_size, 0,
388 dm_bufio_default_cache_size);
389 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
390 }
391}
392
393/*
394 * Allocating buffer data.
395 *
396 * Small buffers are allocated with kmem_cache, to use space optimally.
397 *
398 * For large buffers, we choose between get_free_pages and vmalloc.
399 * Each has advantages and disadvantages.
400 *
401 * __get_free_pages can randomly fail if the memory is fragmented.
402 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
403 * as low as 128M) so using it for caching is not appropriate.
404 *
405 * If the allocation may fail we use __get_free_pages. Memory fragmentation
406 * won't have a fatal effect here, but it just causes flushes of some other
407 * buffers and more I/O will be performed. Don't use __get_free_pages if it
408 * always fails (i.e. order >= MAX_ORDER).
409 *
410 * If the allocation shouldn't fail we use __vmalloc. This is only for the
411 * initial reserve allocation, so there's no risk of wasting all vmalloc
412 * space.
413 */
414static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
415 unsigned char *data_mode)
416{
417 if (unlikely(c->slab_cache != NULL)) {
418 *data_mode = DATA_MODE_SLAB;
419 return kmem_cache_alloc(c->slab_cache, gfp_mask);
420 }
421
422 if (c->block_size <= KMALLOC_MAX_SIZE &&
423 gfp_mask & __GFP_NORETRY) {
424 *data_mode = DATA_MODE_GET_FREE_PAGES;
425 return (void *)__get_free_pages(gfp_mask,
426 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
427 }
428
429 *data_mode = DATA_MODE_VMALLOC;
430
431 /*
432 * __vmalloc allocates the data pages and auxiliary structures with
433 * gfp_flags that were specified, but pagetables are always allocated
434 * with GFP_KERNEL, no matter what was specified as gfp_mask.
435 *
436 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
437 * all allocations done by this process (including pagetables) are done
438 * as if GFP_NOIO was specified.
439 */
440 if (gfp_mask & __GFP_NORETRY) {
441 unsigned noio_flag = memalloc_noio_save();
442 void *ptr = __vmalloc(c->block_size, gfp_mask);
443
444 memalloc_noio_restore(noio_flag);
445 return ptr;
446 }
447
448 return __vmalloc(c->block_size, gfp_mask);
449}
450
451/*
452 * Free buffer's data.
453 */
454static void free_buffer_data(struct dm_bufio_client *c,
455 void *data, unsigned char data_mode)
456{
457 switch (data_mode) {
458 case DATA_MODE_SLAB:
459 kmem_cache_free(c->slab_cache, data);
460 break;
461
462 case DATA_MODE_GET_FREE_PAGES:
463 free_pages((unsigned long)data,
464 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
465 break;
466
467 case DATA_MODE_VMALLOC:
468 vfree(data);
469 break;
470
471 default:
472 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
473 data_mode);
474 BUG();
475 }
476}
477
478/*
479 * Allocate buffer and its data.
480 */
481static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
482{
483 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
484
485 if (!b)
486 return NULL;
487
488 b->c = c;
489
490 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
491 if (!b->data) {
492 kmem_cache_free(c->slab_buffer, b);
493 return NULL;
494 }
495
496#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
497 b->stack_len = 0;
498#endif
499 return b;
500}
501
502/*
503 * Free buffer and its data.
504 */
505static void free_buffer(struct dm_buffer *b)
506{
507 struct dm_bufio_client *c = b->c;
508
509 free_buffer_data(c, b->data, b->data_mode);
510 kmem_cache_free(c->slab_buffer, b);
511}
512
513/*
514 * Link buffer to the buffer tree and clean or dirty queue.
515 */
516static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
517{
518 struct dm_bufio_client *c = b->c;
519
520 c->n_buffers[dirty]++;
521 b->block = block;
522 b->list_mode = dirty;
523 list_add(&b->lru_list, &c->lru[dirty]);
524 __insert(b->c, b);
525 b->last_accessed = jiffies;
526
527 adjust_total_allocated(b, false);
528}
529
530/*
531 * Unlink buffer from the buffer tree and dirty or clean queue.
532 */
533static void __unlink_buffer(struct dm_buffer *b)
534{
535 struct dm_bufio_client *c = b->c;
536
537 BUG_ON(!c->n_buffers[b->list_mode]);
538
539 c->n_buffers[b->list_mode]--;
540 __remove(b->c, b);
541 list_del(&b->lru_list);
542
543 adjust_total_allocated(b, true);
544}
545
546/*
547 * Place the buffer to the head of dirty or clean LRU queue.
548 */
549static void __relink_lru(struct dm_buffer *b, int dirty)
550{
551 struct dm_bufio_client *c = b->c;
552
553 b->accessed = 1;
554
555 BUG_ON(!c->n_buffers[b->list_mode]);
556
557 c->n_buffers[b->list_mode]--;
558 c->n_buffers[dirty]++;
559 b->list_mode = dirty;
560 list_move(&b->lru_list, &c->lru[dirty]);
561 b->last_accessed = jiffies;
562}
563
564/*----------------------------------------------------------------
565 * Submit I/O on the buffer.
566 *
567 * Bio interface is faster but it has some problems:
568 * the vector list is limited (increasing this limit increases
569 * memory-consumption per buffer, so it is not viable);
570 *
571 * the memory must be direct-mapped, not vmalloced;
572 *
573 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
574 * it is not vmalloced, try using the bio interface.
575 *
576 * If the buffer is big, if it is vmalloced or if the underlying device
577 * rejects the bio because it is too large, use dm-io layer to do the I/O.
578 * The dm-io layer splits the I/O into multiple requests, avoiding the above
579 * shortcomings.
580 *--------------------------------------------------------------*/
581
582/*
583 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
584 * that the request was handled directly with bio interface.
585 */
586static void dmio_complete(unsigned long error, void *context)
587{
588 struct dm_buffer *b = context;
589
590 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
591}
592
593static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
594 unsigned n_sectors, unsigned offset)
595{
596 int r;
597 struct dm_io_request io_req = {
598 .bi_opf = op,
599 .notify.fn = dmio_complete,
600 .notify.context = b,
601 .client = b->c->dm_io,
602 };
603 struct dm_io_region region = {
604 .bdev = b->c->bdev,
605 .sector = sector,
606 .count = n_sectors,
607 };
608
609 if (b->data_mode != DATA_MODE_VMALLOC) {
610 io_req.mem.type = DM_IO_KMEM;
611 io_req.mem.ptr.addr = (char *)b->data + offset;
612 } else {
613 io_req.mem.type = DM_IO_VMA;
614 io_req.mem.ptr.vma = (char *)b->data + offset;
615 }
616
617 r = dm_io(&io_req, 1, ®ion, NULL);
618 if (unlikely(r))
619 b->end_io(b, errno_to_blk_status(r));
620}
621
622static void bio_complete(struct bio *bio)
623{
624 struct dm_buffer *b = bio->bi_private;
625 blk_status_t status = bio->bi_status;
626 bio_uninit(bio);
627 kfree(bio);
628 b->end_io(b, status);
629}
630
631static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
632 unsigned n_sectors, unsigned offset)
633{
634 struct bio *bio;
635 char *ptr;
636 unsigned vec_size, len;
637
638 vec_size = b->c->block_size >> PAGE_SHIFT;
639 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
640 vec_size += 2;
641
642 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
643 if (!bio) {
644dmio:
645 use_dmio(b, op, sector, n_sectors, offset);
646 return;
647 }
648 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
649 bio->bi_iter.bi_sector = sector;
650 bio->bi_end_io = bio_complete;
651 bio->bi_private = b;
652
653 ptr = (char *)b->data + offset;
654 len = n_sectors << SECTOR_SHIFT;
655
656 do {
657 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
658 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
659 offset_in_page(ptr))) {
660 bio_put(bio);
661 goto dmio;
662 }
663
664 len -= this_step;
665 ptr += this_step;
666 } while (len > 0);
667
668 submit_bio(bio);
669}
670
671static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
672{
673 sector_t sector;
674
675 if (likely(c->sectors_per_block_bits >= 0))
676 sector = block << c->sectors_per_block_bits;
677 else
678 sector = block * (c->block_size >> SECTOR_SHIFT);
679 sector += c->start;
680
681 return sector;
682}
683
684static void submit_io(struct dm_buffer *b, enum req_op op,
685 void (*end_io)(struct dm_buffer *, blk_status_t))
686{
687 unsigned n_sectors;
688 sector_t sector;
689 unsigned offset, end;
690
691 b->end_io = end_io;
692
693 sector = block_to_sector(b->c, b->block);
694
695 if (op != REQ_OP_WRITE) {
696 n_sectors = b->c->block_size >> SECTOR_SHIFT;
697 offset = 0;
698 } else {
699 if (b->c->write_callback)
700 b->c->write_callback(b);
701 offset = b->write_start;
702 end = b->write_end;
703 offset &= -DM_BUFIO_WRITE_ALIGN;
704 end += DM_BUFIO_WRITE_ALIGN - 1;
705 end &= -DM_BUFIO_WRITE_ALIGN;
706 if (unlikely(end > b->c->block_size))
707 end = b->c->block_size;
708
709 sector += offset >> SECTOR_SHIFT;
710 n_sectors = (end - offset) >> SECTOR_SHIFT;
711 }
712
713 if (b->data_mode != DATA_MODE_VMALLOC)
714 use_bio(b, op, sector, n_sectors, offset);
715 else
716 use_dmio(b, op, sector, n_sectors, offset);
717}
718
719/*----------------------------------------------------------------
720 * Writing dirty buffers
721 *--------------------------------------------------------------*/
722
723/*
724 * The endio routine for write.
725 *
726 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
727 * it.
728 */
729static void write_endio(struct dm_buffer *b, blk_status_t status)
730{
731 b->write_error = status;
732 if (unlikely(status)) {
733 struct dm_bufio_client *c = b->c;
734
735 (void)cmpxchg(&c->async_write_error, 0,
736 blk_status_to_errno(status));
737 }
738
739 BUG_ON(!test_bit(B_WRITING, &b->state));
740
741 smp_mb__before_atomic();
742 clear_bit(B_WRITING, &b->state);
743 smp_mb__after_atomic();
744
745 wake_up_bit(&b->state, B_WRITING);
746}
747
748/*
749 * Initiate a write on a dirty buffer, but don't wait for it.
750 *
751 * - If the buffer is not dirty, exit.
752 * - If there some previous write going on, wait for it to finish (we can't
753 * have two writes on the same buffer simultaneously).
754 * - Submit our write and don't wait on it. We set B_WRITING indicating
755 * that there is a write in progress.
756 */
757static void __write_dirty_buffer(struct dm_buffer *b,
758 struct list_head *write_list)
759{
760 if (!test_bit(B_DIRTY, &b->state))
761 return;
762
763 clear_bit(B_DIRTY, &b->state);
764 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
765
766 b->write_start = b->dirty_start;
767 b->write_end = b->dirty_end;
768
769 if (!write_list)
770 submit_io(b, REQ_OP_WRITE, write_endio);
771 else
772 list_add_tail(&b->write_list, write_list);
773}
774
775static void __flush_write_list(struct list_head *write_list)
776{
777 struct blk_plug plug;
778 blk_start_plug(&plug);
779 while (!list_empty(write_list)) {
780 struct dm_buffer *b =
781 list_entry(write_list->next, struct dm_buffer, write_list);
782 list_del(&b->write_list);
783 submit_io(b, REQ_OP_WRITE, write_endio);
784 cond_resched();
785 }
786 blk_finish_plug(&plug);
787}
788
789/*
790 * Wait until any activity on the buffer finishes. Possibly write the
791 * buffer if it is dirty. When this function finishes, there is no I/O
792 * running on the buffer and the buffer is not dirty.
793 */
794static void __make_buffer_clean(struct dm_buffer *b)
795{
796 BUG_ON(b->hold_count);
797
798 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
799 if (!smp_load_acquire(&b->state)) /* fast case */
800 return;
801
802 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
803 __write_dirty_buffer(b, NULL);
804 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
805}
806
807/*
808 * Find some buffer that is not held by anybody, clean it, unlink it and
809 * return it.
810 */
811static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
812{
813 struct dm_buffer *b;
814
815 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
816 BUG_ON(test_bit(B_WRITING, &b->state));
817 BUG_ON(test_bit(B_DIRTY, &b->state));
818
819 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
820 unlikely(test_bit_acquire(B_READING, &b->state)))
821 continue;
822
823 if (!b->hold_count) {
824 __make_buffer_clean(b);
825 __unlink_buffer(b);
826 return b;
827 }
828 cond_resched();
829 }
830
831 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
832 return NULL;
833
834 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
835 BUG_ON(test_bit(B_READING, &b->state));
836
837 if (!b->hold_count) {
838 __make_buffer_clean(b);
839 __unlink_buffer(b);
840 return b;
841 }
842 cond_resched();
843 }
844
845 return NULL;
846}
847
848/*
849 * Wait until some other threads free some buffer or release hold count on
850 * some buffer.
851 *
852 * This function is entered with c->lock held, drops it and regains it
853 * before exiting.
854 */
855static void __wait_for_free_buffer(struct dm_bufio_client *c)
856{
857 DECLARE_WAITQUEUE(wait, current);
858
859 add_wait_queue(&c->free_buffer_wait, &wait);
860 set_current_state(TASK_UNINTERRUPTIBLE);
861 dm_bufio_unlock(c);
862
863 io_schedule();
864
865 remove_wait_queue(&c->free_buffer_wait, &wait);
866
867 dm_bufio_lock(c);
868}
869
870enum new_flag {
871 NF_FRESH = 0,
872 NF_READ = 1,
873 NF_GET = 2,
874 NF_PREFETCH = 3
875};
876
877/*
878 * Allocate a new buffer. If the allocation is not possible, wait until
879 * some other thread frees a buffer.
880 *
881 * May drop the lock and regain it.
882 */
883static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
884{
885 struct dm_buffer *b;
886 bool tried_noio_alloc = false;
887
888 /*
889 * dm-bufio is resistant to allocation failures (it just keeps
890 * one buffer reserved in cases all the allocations fail).
891 * So set flags to not try too hard:
892 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
893 * mutex and wait ourselves.
894 * __GFP_NORETRY: don't retry and rather return failure
895 * __GFP_NOMEMALLOC: don't use emergency reserves
896 * __GFP_NOWARN: don't print a warning in case of failure
897 *
898 * For debugging, if we set the cache size to 1, no new buffers will
899 * be allocated.
900 */
901 while (1) {
902 if (dm_bufio_cache_size_latch != 1) {
903 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
904 if (b)
905 return b;
906 }
907
908 if (nf == NF_PREFETCH)
909 return NULL;
910
911 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
912 dm_bufio_unlock(c);
913 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
914 dm_bufio_lock(c);
915 if (b)
916 return b;
917 tried_noio_alloc = true;
918 }
919
920 if (!list_empty(&c->reserved_buffers)) {
921 b = list_entry(c->reserved_buffers.next,
922 struct dm_buffer, lru_list);
923 list_del(&b->lru_list);
924 c->need_reserved_buffers++;
925
926 return b;
927 }
928
929 b = __get_unclaimed_buffer(c);
930 if (b)
931 return b;
932
933 __wait_for_free_buffer(c);
934 }
935}
936
937static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
938{
939 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
940
941 if (!b)
942 return NULL;
943
944 if (c->alloc_callback)
945 c->alloc_callback(b);
946
947 return b;
948}
949
950/*
951 * Free a buffer and wake other threads waiting for free buffers.
952 */
953static void __free_buffer_wake(struct dm_buffer *b)
954{
955 struct dm_bufio_client *c = b->c;
956
957 if (!c->need_reserved_buffers)
958 free_buffer(b);
959 else {
960 list_add(&b->lru_list, &c->reserved_buffers);
961 c->need_reserved_buffers--;
962 }
963
964 wake_up(&c->free_buffer_wait);
965}
966
967static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
968 struct list_head *write_list)
969{
970 struct dm_buffer *b, *tmp;
971
972 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
973 BUG_ON(test_bit(B_READING, &b->state));
974
975 if (!test_bit(B_DIRTY, &b->state) &&
976 !test_bit(B_WRITING, &b->state)) {
977 __relink_lru(b, LIST_CLEAN);
978 continue;
979 }
980
981 if (no_wait && test_bit(B_WRITING, &b->state))
982 return;
983
984 __write_dirty_buffer(b, write_list);
985 cond_resched();
986 }
987}
988
989/*
990 * Check if we're over watermark.
991 * If we are over threshold_buffers, start freeing buffers.
992 * If we're over "limit_buffers", block until we get under the limit.
993 */
994static void __check_watermark(struct dm_bufio_client *c,
995 struct list_head *write_list)
996{
997 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
998 __write_dirty_buffers_async(c, 1, write_list);
999}
1000
1001/*----------------------------------------------------------------
1002 * Getting a buffer
1003 *--------------------------------------------------------------*/
1004
1005static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1006 enum new_flag nf, int *need_submit,
1007 struct list_head *write_list)
1008{
1009 struct dm_buffer *b, *new_b = NULL;
1010
1011 *need_submit = 0;
1012
1013 b = __find(c, block);
1014 if (b)
1015 goto found_buffer;
1016
1017 if (nf == NF_GET)
1018 return NULL;
1019
1020 new_b = __alloc_buffer_wait(c, nf);
1021 if (!new_b)
1022 return NULL;
1023
1024 /*
1025 * We've had a period where the mutex was unlocked, so need to
1026 * recheck the buffer tree.
1027 */
1028 b = __find(c, block);
1029 if (b) {
1030 __free_buffer_wake(new_b);
1031 goto found_buffer;
1032 }
1033
1034 __check_watermark(c, write_list);
1035
1036 b = new_b;
1037 b->hold_count = 1;
1038 b->read_error = 0;
1039 b->write_error = 0;
1040 __link_buffer(b, block, LIST_CLEAN);
1041
1042 if (nf == NF_FRESH) {
1043 b->state = 0;
1044 return b;
1045 }
1046
1047 b->state = 1 << B_READING;
1048 *need_submit = 1;
1049
1050 return b;
1051
1052found_buffer:
1053 if (nf == NF_PREFETCH)
1054 return NULL;
1055 /*
1056 * Note: it is essential that we don't wait for the buffer to be
1057 * read if dm_bufio_get function is used. Both dm_bufio_get and
1058 * dm_bufio_prefetch can be used in the driver request routine.
1059 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1060 * the same buffer, it would deadlock if we waited.
1061 */
1062 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
1063 return NULL;
1064
1065 b->hold_count++;
1066 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1067 test_bit(B_WRITING, &b->state));
1068 return b;
1069}
1070
1071/*
1072 * The endio routine for reading: set the error, clear the bit and wake up
1073 * anyone waiting on the buffer.
1074 */
1075static void read_endio(struct dm_buffer *b, blk_status_t status)
1076{
1077 b->read_error = status;
1078
1079 BUG_ON(!test_bit(B_READING, &b->state));
1080
1081 smp_mb__before_atomic();
1082 clear_bit(B_READING, &b->state);
1083 smp_mb__after_atomic();
1084
1085 wake_up_bit(&b->state, B_READING);
1086}
1087
1088/*
1089 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1090 * functions is similar except that dm_bufio_new doesn't read the
1091 * buffer from the disk (assuming that the caller overwrites all the data
1092 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1093 */
1094static void *new_read(struct dm_bufio_client *c, sector_t block,
1095 enum new_flag nf, struct dm_buffer **bp)
1096{
1097 int need_submit;
1098 struct dm_buffer *b;
1099
1100 LIST_HEAD(write_list);
1101
1102 dm_bufio_lock(c);
1103 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1104#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1105 if (b && b->hold_count == 1)
1106 buffer_record_stack(b);
1107#endif
1108 dm_bufio_unlock(c);
1109
1110 __flush_write_list(&write_list);
1111
1112 if (!b)
1113 return NULL;
1114
1115 if (need_submit)
1116 submit_io(b, REQ_OP_READ, read_endio);
1117
1118 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1119
1120 if (b->read_error) {
1121 int error = blk_status_to_errno(b->read_error);
1122
1123 dm_bufio_release(b);
1124
1125 return ERR_PTR(error);
1126 }
1127
1128 *bp = b;
1129
1130 return b->data;
1131}
1132
1133void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1134 struct dm_buffer **bp)
1135{
1136 return new_read(c, block, NF_GET, bp);
1137}
1138EXPORT_SYMBOL_GPL(dm_bufio_get);
1139
1140void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1141 struct dm_buffer **bp)
1142{
1143 BUG_ON(dm_bufio_in_request());
1144
1145 return new_read(c, block, NF_READ, bp);
1146}
1147EXPORT_SYMBOL_GPL(dm_bufio_read);
1148
1149void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1150 struct dm_buffer **bp)
1151{
1152 BUG_ON(dm_bufio_in_request());
1153
1154 return new_read(c, block, NF_FRESH, bp);
1155}
1156EXPORT_SYMBOL_GPL(dm_bufio_new);
1157
1158void dm_bufio_prefetch(struct dm_bufio_client *c,
1159 sector_t block, unsigned n_blocks)
1160{
1161 struct blk_plug plug;
1162
1163 LIST_HEAD(write_list);
1164
1165 BUG_ON(dm_bufio_in_request());
1166
1167 blk_start_plug(&plug);
1168 dm_bufio_lock(c);
1169
1170 for (; n_blocks--; block++) {
1171 int need_submit;
1172 struct dm_buffer *b;
1173 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1174 &write_list);
1175 if (unlikely(!list_empty(&write_list))) {
1176 dm_bufio_unlock(c);
1177 blk_finish_plug(&plug);
1178 __flush_write_list(&write_list);
1179 blk_start_plug(&plug);
1180 dm_bufio_lock(c);
1181 }
1182 if (unlikely(b != NULL)) {
1183 dm_bufio_unlock(c);
1184
1185 if (need_submit)
1186 submit_io(b, REQ_OP_READ, read_endio);
1187 dm_bufio_release(b);
1188
1189 cond_resched();
1190
1191 if (!n_blocks)
1192 goto flush_plug;
1193 dm_bufio_lock(c);
1194 }
1195 }
1196
1197 dm_bufio_unlock(c);
1198
1199flush_plug:
1200 blk_finish_plug(&plug);
1201}
1202EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1203
1204void dm_bufio_release(struct dm_buffer *b)
1205{
1206 struct dm_bufio_client *c = b->c;
1207
1208 dm_bufio_lock(c);
1209
1210 BUG_ON(!b->hold_count);
1211
1212 b->hold_count--;
1213 if (!b->hold_count) {
1214 wake_up(&c->free_buffer_wait);
1215
1216 /*
1217 * If there were errors on the buffer, and the buffer is not
1218 * to be written, free the buffer. There is no point in caching
1219 * invalid buffer.
1220 */
1221 if ((b->read_error || b->write_error) &&
1222 !test_bit_acquire(B_READING, &b->state) &&
1223 !test_bit(B_WRITING, &b->state) &&
1224 !test_bit(B_DIRTY, &b->state)) {
1225 __unlink_buffer(b);
1226 __free_buffer_wake(b);
1227 }
1228 }
1229
1230 dm_bufio_unlock(c);
1231}
1232EXPORT_SYMBOL_GPL(dm_bufio_release);
1233
1234void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1235 unsigned start, unsigned end)
1236{
1237 struct dm_bufio_client *c = b->c;
1238
1239 BUG_ON(start >= end);
1240 BUG_ON(end > b->c->block_size);
1241
1242 dm_bufio_lock(c);
1243
1244 BUG_ON(test_bit(B_READING, &b->state));
1245
1246 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1247 b->dirty_start = start;
1248 b->dirty_end = end;
1249 __relink_lru(b, LIST_DIRTY);
1250 } else {
1251 if (start < b->dirty_start)
1252 b->dirty_start = start;
1253 if (end > b->dirty_end)
1254 b->dirty_end = end;
1255 }
1256
1257 dm_bufio_unlock(c);
1258}
1259EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1260
1261void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1262{
1263 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1264}
1265EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1266
1267void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1268{
1269 LIST_HEAD(write_list);
1270
1271 BUG_ON(dm_bufio_in_request());
1272
1273 dm_bufio_lock(c);
1274 __write_dirty_buffers_async(c, 0, &write_list);
1275 dm_bufio_unlock(c);
1276 __flush_write_list(&write_list);
1277}
1278EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1279
1280/*
1281 * For performance, it is essential that the buffers are written asynchronously
1282 * and simultaneously (so that the block layer can merge the writes) and then
1283 * waited upon.
1284 *
1285 * Finally, we flush hardware disk cache.
1286 */
1287int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1288{
1289 int a, f;
1290 unsigned long buffers_processed = 0;
1291 struct dm_buffer *b, *tmp;
1292
1293 LIST_HEAD(write_list);
1294
1295 dm_bufio_lock(c);
1296 __write_dirty_buffers_async(c, 0, &write_list);
1297 dm_bufio_unlock(c);
1298 __flush_write_list(&write_list);
1299 dm_bufio_lock(c);
1300
1301again:
1302 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1303 int dropped_lock = 0;
1304
1305 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1306 buffers_processed++;
1307
1308 BUG_ON(test_bit(B_READING, &b->state));
1309
1310 if (test_bit(B_WRITING, &b->state)) {
1311 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1312 dropped_lock = 1;
1313 b->hold_count++;
1314 dm_bufio_unlock(c);
1315 wait_on_bit_io(&b->state, B_WRITING,
1316 TASK_UNINTERRUPTIBLE);
1317 dm_bufio_lock(c);
1318 b->hold_count--;
1319 } else
1320 wait_on_bit_io(&b->state, B_WRITING,
1321 TASK_UNINTERRUPTIBLE);
1322 }
1323
1324 if (!test_bit(B_DIRTY, &b->state) &&
1325 !test_bit(B_WRITING, &b->state))
1326 __relink_lru(b, LIST_CLEAN);
1327
1328 cond_resched();
1329
1330 /*
1331 * If we dropped the lock, the list is no longer consistent,
1332 * so we must restart the search.
1333 *
1334 * In the most common case, the buffer just processed is
1335 * relinked to the clean list, so we won't loop scanning the
1336 * same buffer again and again.
1337 *
1338 * This may livelock if there is another thread simultaneously
1339 * dirtying buffers, so we count the number of buffers walked
1340 * and if it exceeds the total number of buffers, it means that
1341 * someone is doing some writes simultaneously with us. In
1342 * this case, stop, dropping the lock.
1343 */
1344 if (dropped_lock)
1345 goto again;
1346 }
1347 wake_up(&c->free_buffer_wait);
1348 dm_bufio_unlock(c);
1349
1350 a = xchg(&c->async_write_error, 0);
1351 f = dm_bufio_issue_flush(c);
1352 if (a)
1353 return a;
1354
1355 return f;
1356}
1357EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1358
1359/*
1360 * Use dm-io to send an empty barrier to flush the device.
1361 */
1362int dm_bufio_issue_flush(struct dm_bufio_client *c)
1363{
1364 struct dm_io_request io_req = {
1365 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1366 .mem.type = DM_IO_KMEM,
1367 .mem.ptr.addr = NULL,
1368 .client = c->dm_io,
1369 };
1370 struct dm_io_region io_reg = {
1371 .bdev = c->bdev,
1372 .sector = 0,
1373 .count = 0,
1374 };
1375
1376 BUG_ON(dm_bufio_in_request());
1377
1378 return dm_io(&io_req, 1, &io_reg, NULL);
1379}
1380EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1381
1382/*
1383 * Use dm-io to send a discard request to flush the device.
1384 */
1385int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1386{
1387 struct dm_io_request io_req = {
1388 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
1389 .mem.type = DM_IO_KMEM,
1390 .mem.ptr.addr = NULL,
1391 .client = c->dm_io,
1392 };
1393 struct dm_io_region io_reg = {
1394 .bdev = c->bdev,
1395 .sector = block_to_sector(c, block),
1396 .count = block_to_sector(c, count),
1397 };
1398
1399 BUG_ON(dm_bufio_in_request());
1400
1401 return dm_io(&io_req, 1, &io_reg, NULL);
1402}
1403EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1404
1405/*
1406 * We first delete any other buffer that may be at that new location.
1407 *
1408 * Then, we write the buffer to the original location if it was dirty.
1409 *
1410 * Then, if we are the only one who is holding the buffer, relink the buffer
1411 * in the buffer tree for the new location.
1412 *
1413 * If there was someone else holding the buffer, we write it to the new
1414 * location but not relink it, because that other user needs to have the buffer
1415 * at the same place.
1416 */
1417void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1418{
1419 struct dm_bufio_client *c = b->c;
1420 struct dm_buffer *new;
1421
1422 BUG_ON(dm_bufio_in_request());
1423
1424 dm_bufio_lock(c);
1425
1426retry:
1427 new = __find(c, new_block);
1428 if (new) {
1429 if (new->hold_count) {
1430 __wait_for_free_buffer(c);
1431 goto retry;
1432 }
1433
1434 /*
1435 * FIXME: Is there any point waiting for a write that's going
1436 * to be overwritten in a bit?
1437 */
1438 __make_buffer_clean(new);
1439 __unlink_buffer(new);
1440 __free_buffer_wake(new);
1441 }
1442
1443 BUG_ON(!b->hold_count);
1444 BUG_ON(test_bit(B_READING, &b->state));
1445
1446 __write_dirty_buffer(b, NULL);
1447 if (b->hold_count == 1) {
1448 wait_on_bit_io(&b->state, B_WRITING,
1449 TASK_UNINTERRUPTIBLE);
1450 set_bit(B_DIRTY, &b->state);
1451 b->dirty_start = 0;
1452 b->dirty_end = c->block_size;
1453 __unlink_buffer(b);
1454 __link_buffer(b, new_block, LIST_DIRTY);
1455 } else {
1456 sector_t old_block;
1457 wait_on_bit_lock_io(&b->state, B_WRITING,
1458 TASK_UNINTERRUPTIBLE);
1459 /*
1460 * Relink buffer to "new_block" so that write_callback
1461 * sees "new_block" as a block number.
1462 * After the write, link the buffer back to old_block.
1463 * All this must be done in bufio lock, so that block number
1464 * change isn't visible to other threads.
1465 */
1466 old_block = b->block;
1467 __unlink_buffer(b);
1468 __link_buffer(b, new_block, b->list_mode);
1469 submit_io(b, REQ_OP_WRITE, write_endio);
1470 wait_on_bit_io(&b->state, B_WRITING,
1471 TASK_UNINTERRUPTIBLE);
1472 __unlink_buffer(b);
1473 __link_buffer(b, old_block, b->list_mode);
1474 }
1475
1476 dm_bufio_unlock(c);
1477 dm_bufio_release(b);
1478}
1479EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1480
1481static void forget_buffer_locked(struct dm_buffer *b)
1482{
1483 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
1484 __unlink_buffer(b);
1485 __free_buffer_wake(b);
1486 }
1487}
1488
1489/*
1490 * Free the given buffer.
1491 *
1492 * This is just a hint, if the buffer is in use or dirty, this function
1493 * does nothing.
1494 */
1495void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1496{
1497 struct dm_buffer *b;
1498
1499 dm_bufio_lock(c);
1500
1501 b = __find(c, block);
1502 if (b)
1503 forget_buffer_locked(b);
1504
1505 dm_bufio_unlock(c);
1506}
1507EXPORT_SYMBOL_GPL(dm_bufio_forget);
1508
1509void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1510{
1511 struct dm_buffer *b;
1512 sector_t end_block = block + n_blocks;
1513
1514 while (block < end_block) {
1515 dm_bufio_lock(c);
1516
1517 b = __find_next(c, block);
1518 if (b) {
1519 block = b->block + 1;
1520 forget_buffer_locked(b);
1521 }
1522
1523 dm_bufio_unlock(c);
1524
1525 if (!b)
1526 break;
1527 }
1528
1529}
1530EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1531
1532void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1533{
1534 c->minimum_buffers = n;
1535}
1536EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1537
1538unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1539{
1540 return c->block_size;
1541}
1542EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1543
1544sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1545{
1546 sector_t s = bdev_nr_sectors(c->bdev);
1547 if (s >= c->start)
1548 s -= c->start;
1549 else
1550 s = 0;
1551 if (likely(c->sectors_per_block_bits >= 0))
1552 s >>= c->sectors_per_block_bits;
1553 else
1554 sector_div(s, c->block_size >> SECTOR_SHIFT);
1555 return s;
1556}
1557EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1558
1559struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1560{
1561 return c->dm_io;
1562}
1563EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1564
1565sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1566{
1567 return b->block;
1568}
1569EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1570
1571void *dm_bufio_get_block_data(struct dm_buffer *b)
1572{
1573 return b->data;
1574}
1575EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1576
1577void *dm_bufio_get_aux_data(struct dm_buffer *b)
1578{
1579 return b + 1;
1580}
1581EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1582
1583struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1584{
1585 return b->c;
1586}
1587EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1588
1589static void drop_buffers(struct dm_bufio_client *c)
1590{
1591 struct dm_buffer *b;
1592 int i;
1593 bool warned = false;
1594
1595 BUG_ON(dm_bufio_in_request());
1596
1597 /*
1598 * An optimization so that the buffers are not written one-by-one.
1599 */
1600 dm_bufio_write_dirty_buffers_async(c);
1601
1602 dm_bufio_lock(c);
1603
1604 while ((b = __get_unclaimed_buffer(c)))
1605 __free_buffer_wake(b);
1606
1607 for (i = 0; i < LIST_SIZE; i++)
1608 list_for_each_entry(b, &c->lru[i], lru_list) {
1609 WARN_ON(!warned);
1610 warned = true;
1611 DMERR("leaked buffer %llx, hold count %u, list %d",
1612 (unsigned long long)b->block, b->hold_count, i);
1613#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1614 stack_trace_print(b->stack_entries, b->stack_len, 1);
1615 /* mark unclaimed to avoid BUG_ON below */
1616 b->hold_count = 0;
1617#endif
1618 }
1619
1620#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1621 while ((b = __get_unclaimed_buffer(c)))
1622 __free_buffer_wake(b);
1623#endif
1624
1625 for (i = 0; i < LIST_SIZE; i++)
1626 BUG_ON(!list_empty(&c->lru[i]));
1627
1628 dm_bufio_unlock(c);
1629}
1630
1631/*
1632 * We may not be able to evict this buffer if IO pending or the client
1633 * is still using it. Caller is expected to know buffer is too old.
1634 *
1635 * And if GFP_NOFS is used, we must not do any I/O because we hold
1636 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1637 * rerouted to different bufio client.
1638 */
1639static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1640{
1641 if (!(gfp & __GFP_FS) ||
1642 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
1643 if (test_bit_acquire(B_READING, &b->state) ||
1644 test_bit(B_WRITING, &b->state) ||
1645 test_bit(B_DIRTY, &b->state))
1646 return false;
1647 }
1648
1649 if (b->hold_count)
1650 return false;
1651
1652 __make_buffer_clean(b);
1653 __unlink_buffer(b);
1654 __free_buffer_wake(b);
1655
1656 return true;
1657}
1658
1659static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1660{
1661 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1662 if (likely(c->sectors_per_block_bits >= 0))
1663 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1664 else
1665 retain_bytes /= c->block_size;
1666 return retain_bytes;
1667}
1668
1669static void __scan(struct dm_bufio_client *c)
1670{
1671 int l;
1672 struct dm_buffer *b, *tmp;
1673 unsigned long freed = 0;
1674 unsigned long count = c->n_buffers[LIST_CLEAN] +
1675 c->n_buffers[LIST_DIRTY];
1676 unsigned long retain_target = get_retain_buffers(c);
1677
1678 for (l = 0; l < LIST_SIZE; l++) {
1679 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1680 if (count - freed <= retain_target)
1681 atomic_long_set(&c->need_shrink, 0);
1682 if (!atomic_long_read(&c->need_shrink))
1683 return;
1684 if (__try_evict_buffer(b, GFP_KERNEL)) {
1685 atomic_long_dec(&c->need_shrink);
1686 freed++;
1687 }
1688 cond_resched();
1689 }
1690 }
1691}
1692
1693static void shrink_work(struct work_struct *w)
1694{
1695 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1696
1697 dm_bufio_lock(c);
1698 __scan(c);
1699 dm_bufio_unlock(c);
1700}
1701
1702static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1703{
1704 struct dm_bufio_client *c;
1705
1706 c = container_of(shrink, struct dm_bufio_client, shrinker);
1707 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1708 queue_work(dm_bufio_wq, &c->shrink_work);
1709
1710 return sc->nr_to_scan;
1711}
1712
1713static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1714{
1715 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1716 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1717 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1718 unsigned long retain_target = get_retain_buffers(c);
1719 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1720
1721 if (unlikely(count < retain_target))
1722 count = 0;
1723 else
1724 count -= retain_target;
1725
1726 if (unlikely(count < queued_for_cleanup))
1727 count = 0;
1728 else
1729 count -= queued_for_cleanup;
1730
1731 return count;
1732}
1733
1734/*
1735 * Create the buffering interface
1736 */
1737struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1738 unsigned reserved_buffers, unsigned aux_size,
1739 void (*alloc_callback)(struct dm_buffer *),
1740 void (*write_callback)(struct dm_buffer *),
1741 unsigned int flags)
1742{
1743 int r;
1744 struct dm_bufio_client *c;
1745 unsigned i;
1746 char slab_name[27];
1747
1748 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1749 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1750 r = -EINVAL;
1751 goto bad_client;
1752 }
1753
1754 c = kzalloc(sizeof(*c), GFP_KERNEL);
1755 if (!c) {
1756 r = -ENOMEM;
1757 goto bad_client;
1758 }
1759 c->buffer_tree = RB_ROOT;
1760
1761 c->bdev = bdev;
1762 c->block_size = block_size;
1763 if (is_power_of_2(block_size))
1764 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1765 else
1766 c->sectors_per_block_bits = -1;
1767
1768 c->alloc_callback = alloc_callback;
1769 c->write_callback = write_callback;
1770
1771 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
1772 c->no_sleep = true;
1773 static_branch_inc(&no_sleep_enabled);
1774 }
1775
1776 for (i = 0; i < LIST_SIZE; i++) {
1777 INIT_LIST_HEAD(&c->lru[i]);
1778 c->n_buffers[i] = 0;
1779 }
1780
1781 mutex_init(&c->lock);
1782 spin_lock_init(&c->spinlock);
1783 INIT_LIST_HEAD(&c->reserved_buffers);
1784 c->need_reserved_buffers = reserved_buffers;
1785
1786 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1787
1788 init_waitqueue_head(&c->free_buffer_wait);
1789 c->async_write_error = 0;
1790
1791 c->dm_io = dm_io_client_create();
1792 if (IS_ERR(c->dm_io)) {
1793 r = PTR_ERR(c->dm_io);
1794 goto bad_dm_io;
1795 }
1796
1797 if (block_size <= KMALLOC_MAX_SIZE &&
1798 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1799 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1800 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1801 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1802 SLAB_RECLAIM_ACCOUNT, NULL);
1803 if (!c->slab_cache) {
1804 r = -ENOMEM;
1805 goto bad;
1806 }
1807 }
1808 if (aux_size)
1809 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1810 else
1811 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1812 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1813 0, SLAB_RECLAIM_ACCOUNT, NULL);
1814 if (!c->slab_buffer) {
1815 r = -ENOMEM;
1816 goto bad;
1817 }
1818
1819 while (c->need_reserved_buffers) {
1820 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1821
1822 if (!b) {
1823 r = -ENOMEM;
1824 goto bad;
1825 }
1826 __free_buffer_wake(b);
1827 }
1828
1829 INIT_WORK(&c->shrink_work, shrink_work);
1830 atomic_long_set(&c->need_shrink, 0);
1831
1832 c->shrinker.count_objects = dm_bufio_shrink_count;
1833 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1834 c->shrinker.seeks = 1;
1835 c->shrinker.batch = 0;
1836 r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
1837 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1838 if (r)
1839 goto bad;
1840
1841 mutex_lock(&dm_bufio_clients_lock);
1842 dm_bufio_client_count++;
1843 list_add(&c->client_list, &dm_bufio_all_clients);
1844 __cache_size_refresh();
1845 mutex_unlock(&dm_bufio_clients_lock);
1846
1847 return c;
1848
1849bad:
1850 while (!list_empty(&c->reserved_buffers)) {
1851 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1852 struct dm_buffer, lru_list);
1853 list_del(&b->lru_list);
1854 free_buffer(b);
1855 }
1856 kmem_cache_destroy(c->slab_cache);
1857 kmem_cache_destroy(c->slab_buffer);
1858 dm_io_client_destroy(c->dm_io);
1859bad_dm_io:
1860 mutex_destroy(&c->lock);
1861 if (c->no_sleep)
1862 static_branch_dec(&no_sleep_enabled);
1863 kfree(c);
1864bad_client:
1865 return ERR_PTR(r);
1866}
1867EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1868
1869/*
1870 * Free the buffering interface.
1871 * It is required that there are no references on any buffers.
1872 */
1873void dm_bufio_client_destroy(struct dm_bufio_client *c)
1874{
1875 unsigned i;
1876
1877 drop_buffers(c);
1878
1879 unregister_shrinker(&c->shrinker);
1880 flush_work(&c->shrink_work);
1881
1882 mutex_lock(&dm_bufio_clients_lock);
1883
1884 list_del(&c->client_list);
1885 dm_bufio_client_count--;
1886 __cache_size_refresh();
1887
1888 mutex_unlock(&dm_bufio_clients_lock);
1889
1890 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1891 BUG_ON(c->need_reserved_buffers);
1892
1893 while (!list_empty(&c->reserved_buffers)) {
1894 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1895 struct dm_buffer, lru_list);
1896 list_del(&b->lru_list);
1897 free_buffer(b);
1898 }
1899
1900 for (i = 0; i < LIST_SIZE; i++)
1901 if (c->n_buffers[i])
1902 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1903
1904 for (i = 0; i < LIST_SIZE; i++)
1905 BUG_ON(c->n_buffers[i]);
1906
1907 kmem_cache_destroy(c->slab_cache);
1908 kmem_cache_destroy(c->slab_buffer);
1909 dm_io_client_destroy(c->dm_io);
1910 mutex_destroy(&c->lock);
1911 if (c->no_sleep)
1912 static_branch_dec(&no_sleep_enabled);
1913 kfree(c);
1914}
1915EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1916
1917void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1918{
1919 c->start = start;
1920}
1921EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1922
1923static unsigned get_max_age_hz(void)
1924{
1925 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1926
1927 if (max_age > UINT_MAX / HZ)
1928 max_age = UINT_MAX / HZ;
1929
1930 return max_age * HZ;
1931}
1932
1933static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1934{
1935 return time_after_eq(jiffies, b->last_accessed + age_hz);
1936}
1937
1938static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1939{
1940 struct dm_buffer *b, *tmp;
1941 unsigned long retain_target = get_retain_buffers(c);
1942 unsigned long count;
1943 LIST_HEAD(write_list);
1944
1945 dm_bufio_lock(c);
1946
1947 __check_watermark(c, &write_list);
1948 if (unlikely(!list_empty(&write_list))) {
1949 dm_bufio_unlock(c);
1950 __flush_write_list(&write_list);
1951 dm_bufio_lock(c);
1952 }
1953
1954 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1955 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1956 if (count <= retain_target)
1957 break;
1958
1959 if (!older_than(b, age_hz))
1960 break;
1961
1962 if (__try_evict_buffer(b, 0))
1963 count--;
1964
1965 cond_resched();
1966 }
1967
1968 dm_bufio_unlock(c);
1969}
1970
1971static void do_global_cleanup(struct work_struct *w)
1972{
1973 struct dm_bufio_client *locked_client = NULL;
1974 struct dm_bufio_client *current_client;
1975 struct dm_buffer *b;
1976 unsigned spinlock_hold_count;
1977 unsigned long threshold = dm_bufio_cache_size -
1978 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1979 unsigned long loops = global_num * 2;
1980
1981 mutex_lock(&dm_bufio_clients_lock);
1982
1983 while (1) {
1984 cond_resched();
1985
1986 spin_lock(&global_spinlock);
1987 if (unlikely(dm_bufio_current_allocated <= threshold))
1988 break;
1989
1990 spinlock_hold_count = 0;
1991get_next:
1992 if (!loops--)
1993 break;
1994 if (unlikely(list_empty(&global_queue)))
1995 break;
1996 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1997
1998 if (b->accessed) {
1999 b->accessed = 0;
2000 list_move(&b->global_list, &global_queue);
2001 if (likely(++spinlock_hold_count < 16))
2002 goto get_next;
2003 spin_unlock(&global_spinlock);
2004 continue;
2005 }
2006
2007 current_client = b->c;
2008 if (unlikely(current_client != locked_client)) {
2009 if (locked_client)
2010 dm_bufio_unlock(locked_client);
2011
2012 if (!dm_bufio_trylock(current_client)) {
2013 spin_unlock(&global_spinlock);
2014 dm_bufio_lock(current_client);
2015 locked_client = current_client;
2016 continue;
2017 }
2018
2019 locked_client = current_client;
2020 }
2021
2022 spin_unlock(&global_spinlock);
2023
2024 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2025 spin_lock(&global_spinlock);
2026 list_move(&b->global_list, &global_queue);
2027 spin_unlock(&global_spinlock);
2028 }
2029 }
2030
2031 spin_unlock(&global_spinlock);
2032
2033 if (locked_client)
2034 dm_bufio_unlock(locked_client);
2035
2036 mutex_unlock(&dm_bufio_clients_lock);
2037}
2038
2039static void cleanup_old_buffers(void)
2040{
2041 unsigned long max_age_hz = get_max_age_hz();
2042 struct dm_bufio_client *c;
2043
2044 mutex_lock(&dm_bufio_clients_lock);
2045
2046 __cache_size_refresh();
2047
2048 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2049 __evict_old_buffers(c, max_age_hz);
2050
2051 mutex_unlock(&dm_bufio_clients_lock);
2052}
2053
2054static void work_fn(struct work_struct *w)
2055{
2056 cleanup_old_buffers();
2057
2058 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2059 DM_BUFIO_WORK_TIMER_SECS * HZ);
2060}
2061
2062/*----------------------------------------------------------------
2063 * Module setup
2064 *--------------------------------------------------------------*/
2065
2066/*
2067 * This is called only once for the whole dm_bufio module.
2068 * It initializes memory limit.
2069 */
2070static int __init dm_bufio_init(void)
2071{
2072 __u64 mem;
2073
2074 dm_bufio_allocated_kmem_cache = 0;
2075 dm_bufio_allocated_get_free_pages = 0;
2076 dm_bufio_allocated_vmalloc = 0;
2077 dm_bufio_current_allocated = 0;
2078
2079 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2080 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2081
2082 if (mem > ULONG_MAX)
2083 mem = ULONG_MAX;
2084
2085#ifdef CONFIG_MMU
2086 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2087 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2088#endif
2089
2090 dm_bufio_default_cache_size = mem;
2091
2092 mutex_lock(&dm_bufio_clients_lock);
2093 __cache_size_refresh();
2094 mutex_unlock(&dm_bufio_clients_lock);
2095
2096 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2097 if (!dm_bufio_wq)
2098 return -ENOMEM;
2099
2100 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2101 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2102 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2103 DM_BUFIO_WORK_TIMER_SECS * HZ);
2104
2105 return 0;
2106}
2107
2108/*
2109 * This is called once when unloading the dm_bufio module.
2110 */
2111static void __exit dm_bufio_exit(void)
2112{
2113 int bug = 0;
2114
2115 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2116 destroy_workqueue(dm_bufio_wq);
2117
2118 if (dm_bufio_client_count) {
2119 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2120 __func__, dm_bufio_client_count);
2121 bug = 1;
2122 }
2123
2124 if (dm_bufio_current_allocated) {
2125 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2126 __func__, dm_bufio_current_allocated);
2127 bug = 1;
2128 }
2129
2130 if (dm_bufio_allocated_get_free_pages) {
2131 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2132 __func__, dm_bufio_allocated_get_free_pages);
2133 bug = 1;
2134 }
2135
2136 if (dm_bufio_allocated_vmalloc) {
2137 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2138 __func__, dm_bufio_allocated_vmalloc);
2139 bug = 1;
2140 }
2141
2142 BUG_ON(bug);
2143}
2144
2145module_init(dm_bufio_init)
2146module_exit(dm_bufio_exit)
2147
2148module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2149MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2150
2151module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2152MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2153
2154module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2155MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2156
2157module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2158MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2159
2160module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2161MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2162
2163module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2164MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2165
2166module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2167MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2168
2169module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2170MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2171
2172MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2173MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2174MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/dm-bufio.h>
11
12#include <linux/device-mapper.h>
13#include <linux/dm-io.h>
14#include <linux/slab.h>
15#include <linux/sched/mm.h>
16#include <linux/jiffies.h>
17#include <linux/vmalloc.h>
18#include <linux/shrinker.h>
19#include <linux/module.h>
20#include <linux/rbtree.h>
21#include <linux/stacktrace.h>
22#include <linux/jump_label.h>
23
24#include "dm.h"
25
26#define DM_MSG_PREFIX "bufio"
27
28/*
29 * Memory management policy:
30 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
34 * dirty buffers.
35 */
36#define DM_BUFIO_MIN_BUFFERS 8
37
38#define DM_BUFIO_MEMORY_PERCENT 2
39#define DM_BUFIO_VMALLOC_PERCENT 25
40#define DM_BUFIO_WRITEBACK_RATIO 3
41#define DM_BUFIO_LOW_WATERMARK_RATIO 16
42
43/*
44 * Check buffer ages in this interval (seconds)
45 */
46#define DM_BUFIO_WORK_TIMER_SECS 30
47
48/*
49 * Free buffers when they are older than this (seconds)
50 */
51#define DM_BUFIO_DEFAULT_AGE_SECS 300
52
53/*
54 * The nr of bytes of cached data to keep around.
55 */
56#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
57
58/*
59 * Align buffer writes to this boundary.
60 * Tests show that SSDs have the highest IOPS when using 4k writes.
61 */
62#define DM_BUFIO_WRITE_ALIGN 4096
63
64/*
65 * dm_buffer->list_mode
66 */
67#define LIST_CLEAN 0
68#define LIST_DIRTY 1
69#define LIST_SIZE 2
70
71/*--------------------------------------------------------------*/
72
73/*
74 * Rather than use an LRU list, we use a clock algorithm where entries
75 * are held in a circular list. When an entry is 'hit' a reference bit
76 * is set. The least recently used entry is approximated by running a
77 * cursor around the list selecting unreferenced entries. Referenced
78 * entries have their reference bit cleared as the cursor passes them.
79 */
80struct lru_entry {
81 struct list_head list;
82 atomic_t referenced;
83};
84
85struct lru_iter {
86 struct lru *lru;
87 struct list_head list;
88 struct lru_entry *stop;
89 struct lru_entry *e;
90};
91
92struct lru {
93 struct list_head *cursor;
94 unsigned long count;
95
96 struct list_head iterators;
97};
98
99/*--------------*/
100
101static void lru_init(struct lru *lru)
102{
103 lru->cursor = NULL;
104 lru->count = 0;
105 INIT_LIST_HEAD(&lru->iterators);
106}
107
108static void lru_destroy(struct lru *lru)
109{
110 WARN_ON_ONCE(lru->cursor);
111 WARN_ON_ONCE(!list_empty(&lru->iterators));
112}
113
114/*
115 * Insert a new entry into the lru.
116 */
117static void lru_insert(struct lru *lru, struct lru_entry *le)
118{
119 /*
120 * Don't be tempted to set to 1, makes the lru aspect
121 * perform poorly.
122 */
123 atomic_set(&le->referenced, 0);
124
125 if (lru->cursor) {
126 list_add_tail(&le->list, lru->cursor);
127 } else {
128 INIT_LIST_HEAD(&le->list);
129 lru->cursor = &le->list;
130 }
131 lru->count++;
132}
133
134/*--------------*/
135
136/*
137 * Convert a list_head pointer to an lru_entry pointer.
138 */
139static inline struct lru_entry *to_le(struct list_head *l)
140{
141 return container_of(l, struct lru_entry, list);
142}
143
144/*
145 * Initialize an lru_iter and add it to the list of cursors in the lru.
146 */
147static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
148{
149 it->lru = lru;
150 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
151 it->e = lru->cursor ? to_le(lru->cursor) : NULL;
152 list_add(&it->list, &lru->iterators);
153}
154
155/*
156 * Remove an lru_iter from the list of cursors in the lru.
157 */
158static inline void lru_iter_end(struct lru_iter *it)
159{
160 list_del(&it->list);
161}
162
163/* Predicate function type to be used with lru_iter_next */
164typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
165
166/*
167 * Advance the cursor to the next entry that passes the
168 * predicate, and return that entry. Returns NULL if the
169 * iteration is complete.
170 */
171static struct lru_entry *lru_iter_next(struct lru_iter *it,
172 iter_predicate pred, void *context)
173{
174 struct lru_entry *e;
175
176 while (it->e) {
177 e = it->e;
178
179 /* advance the cursor */
180 if (it->e == it->stop)
181 it->e = NULL;
182 else
183 it->e = to_le(it->e->list.next);
184
185 if (pred(e, context))
186 return e;
187 }
188
189 return NULL;
190}
191
192/*
193 * Invalidate a specific lru_entry and update all cursors in
194 * the lru accordingly.
195 */
196static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
197{
198 struct lru_iter *it;
199
200 list_for_each_entry(it, &lru->iterators, list) {
201 /* Move c->e forwards if necc. */
202 if (it->e == e) {
203 it->e = to_le(it->e->list.next);
204 if (it->e == e)
205 it->e = NULL;
206 }
207
208 /* Move it->stop backwards if necc. */
209 if (it->stop == e) {
210 it->stop = to_le(it->stop->list.prev);
211 if (it->stop == e)
212 it->stop = NULL;
213 }
214 }
215}
216
217/*--------------*/
218
219/*
220 * Remove a specific entry from the lru.
221 */
222static void lru_remove(struct lru *lru, struct lru_entry *le)
223{
224 lru_iter_invalidate(lru, le);
225 if (lru->count == 1) {
226 lru->cursor = NULL;
227 } else {
228 if (lru->cursor == &le->list)
229 lru->cursor = lru->cursor->next;
230 list_del(&le->list);
231 }
232 lru->count--;
233}
234
235/*
236 * Mark as referenced.
237 */
238static inline void lru_reference(struct lru_entry *le)
239{
240 atomic_set(&le->referenced, 1);
241}
242
243/*--------------*/
244
245/*
246 * Remove the least recently used entry (approx), that passes the predicate.
247 * Returns NULL on failure.
248 */
249enum evict_result {
250 ER_EVICT,
251 ER_DONT_EVICT,
252 ER_STOP, /* stop looking for something to evict */
253};
254
255typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
256
257static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
258{
259 unsigned long tested = 0;
260 struct list_head *h = lru->cursor;
261 struct lru_entry *le;
262
263 if (!h)
264 return NULL;
265 /*
266 * In the worst case we have to loop around twice. Once to clear
267 * the reference flags, and then again to discover the predicate
268 * fails for all entries.
269 */
270 while (tested < lru->count) {
271 le = container_of(h, struct lru_entry, list);
272
273 if (atomic_read(&le->referenced)) {
274 atomic_set(&le->referenced, 0);
275 } else {
276 tested++;
277 switch (pred(le, context)) {
278 case ER_EVICT:
279 /*
280 * Adjust the cursor, so we start the next
281 * search from here.
282 */
283 lru->cursor = le->list.next;
284 lru_remove(lru, le);
285 return le;
286
287 case ER_DONT_EVICT:
288 break;
289
290 case ER_STOP:
291 lru->cursor = le->list.next;
292 return NULL;
293 }
294 }
295
296 h = h->next;
297
298 if (!no_sleep)
299 cond_resched();
300 }
301
302 return NULL;
303}
304
305/*--------------------------------------------------------------*/
306
307/*
308 * Buffer state bits.
309 */
310#define B_READING 0
311#define B_WRITING 1
312#define B_DIRTY 2
313
314/*
315 * Describes how the block was allocated:
316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
317 * See the comment at alloc_buffer_data.
318 */
319enum data_mode {
320 DATA_MODE_SLAB = 0,
321 DATA_MODE_KMALLOC = 1,
322 DATA_MODE_GET_FREE_PAGES = 2,
323 DATA_MODE_VMALLOC = 3,
324 DATA_MODE_LIMIT = 4
325};
326
327struct dm_buffer {
328 /* protected by the locks in dm_buffer_cache */
329 struct rb_node node;
330
331 /* immutable, so don't need protecting */
332 sector_t block;
333 void *data;
334 unsigned char data_mode; /* DATA_MODE_* */
335
336 /*
337 * These two fields are used in isolation, so do not need
338 * a surrounding lock.
339 */
340 atomic_t hold_count;
341 unsigned long last_accessed;
342
343 /*
344 * Everything else is protected by the mutex in
345 * dm_bufio_client
346 */
347 unsigned long state;
348 struct lru_entry lru;
349 unsigned char list_mode; /* LIST_* */
350 blk_status_t read_error;
351 blk_status_t write_error;
352 unsigned int dirty_start;
353 unsigned int dirty_end;
354 unsigned int write_start;
355 unsigned int write_end;
356 struct list_head write_list;
357 struct dm_bufio_client *c;
358 void (*end_io)(struct dm_buffer *b, blk_status_t bs);
359#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
360#define MAX_STACK 10
361 unsigned int stack_len;
362 unsigned long stack_entries[MAX_STACK];
363#endif
364};
365
366/*--------------------------------------------------------------*/
367
368/*
369 * The buffer cache manages buffers, particularly:
370 * - inc/dec of holder count
371 * - setting the last_accessed field
372 * - maintains clean/dirty state along with lru
373 * - selecting buffers that match predicates
374 *
375 * It does *not* handle:
376 * - allocation/freeing of buffers.
377 * - IO
378 * - Eviction or cache sizing.
379 *
380 * cache_get() and cache_put() are threadsafe, you do not need to
381 * protect these calls with a surrounding mutex. All the other
382 * methods are not threadsafe; they do use locking primitives, but
383 * only enough to ensure get/put are threadsafe.
384 */
385
386struct buffer_tree {
387 union {
388 struct rw_semaphore lock;
389 rwlock_t spinlock;
390 } u;
391 struct rb_root root;
392} ____cacheline_aligned_in_smp;
393
394struct dm_buffer_cache {
395 struct lru lru[LIST_SIZE];
396 /*
397 * We spread entries across multiple trees to reduce contention
398 * on the locks.
399 */
400 unsigned int num_locks;
401 bool no_sleep;
402 struct buffer_tree trees[];
403};
404
405static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
406
407static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
408{
409 return dm_hash_locks_index(block, num_locks);
410}
411
412static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
413{
414 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
415 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416 else
417 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
418}
419
420static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
421{
422 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
423 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424 else
425 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
426}
427
428static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
429{
430 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
431 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
432 else
433 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
434}
435
436static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
437{
438 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
439 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
440 else
441 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
442}
443
444/*
445 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
446 * This struct helps avoid redundant drop and gets of the same lock.
447 */
448struct lock_history {
449 struct dm_buffer_cache *cache;
450 bool write;
451 unsigned int previous;
452 unsigned int no_previous;
453};
454
455static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
456{
457 lh->cache = cache;
458 lh->write = write;
459 lh->no_previous = cache->num_locks;
460 lh->previous = lh->no_previous;
461}
462
463static void __lh_lock(struct lock_history *lh, unsigned int index)
464{
465 if (lh->write) {
466 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
467 write_lock_bh(&lh->cache->trees[index].u.spinlock);
468 else
469 down_write(&lh->cache->trees[index].u.lock);
470 } else {
471 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
472 read_lock_bh(&lh->cache->trees[index].u.spinlock);
473 else
474 down_read(&lh->cache->trees[index].u.lock);
475 }
476}
477
478static void __lh_unlock(struct lock_history *lh, unsigned int index)
479{
480 if (lh->write) {
481 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
482 write_unlock_bh(&lh->cache->trees[index].u.spinlock);
483 else
484 up_write(&lh->cache->trees[index].u.lock);
485 } else {
486 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
487 read_unlock_bh(&lh->cache->trees[index].u.spinlock);
488 else
489 up_read(&lh->cache->trees[index].u.lock);
490 }
491}
492
493/*
494 * Make sure you call this since it will unlock the final lock.
495 */
496static void lh_exit(struct lock_history *lh)
497{
498 if (lh->previous != lh->no_previous) {
499 __lh_unlock(lh, lh->previous);
500 lh->previous = lh->no_previous;
501 }
502}
503
504/*
505 * Named 'next' because there is no corresponding
506 * 'up/unlock' call since it's done automatically.
507 */
508static void lh_next(struct lock_history *lh, sector_t b)
509{
510 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
511
512 if (lh->previous != lh->no_previous) {
513 if (lh->previous != index) {
514 __lh_unlock(lh, lh->previous);
515 __lh_lock(lh, index);
516 lh->previous = index;
517 }
518 } else {
519 __lh_lock(lh, index);
520 lh->previous = index;
521 }
522}
523
524static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
525{
526 return container_of(le, struct dm_buffer, lru);
527}
528
529static struct dm_buffer *list_to_buffer(struct list_head *l)
530{
531 struct lru_entry *le = list_entry(l, struct lru_entry, list);
532
533 return le_to_buffer(le);
534}
535
536static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
537{
538 unsigned int i;
539
540 bc->num_locks = num_locks;
541 bc->no_sleep = no_sleep;
542
543 for (i = 0; i < bc->num_locks; i++) {
544 if (no_sleep)
545 rwlock_init(&bc->trees[i].u.spinlock);
546 else
547 init_rwsem(&bc->trees[i].u.lock);
548 bc->trees[i].root = RB_ROOT;
549 }
550
551 lru_init(&bc->lru[LIST_CLEAN]);
552 lru_init(&bc->lru[LIST_DIRTY]);
553}
554
555static void cache_destroy(struct dm_buffer_cache *bc)
556{
557 unsigned int i;
558
559 for (i = 0; i < bc->num_locks; i++)
560 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
561
562 lru_destroy(&bc->lru[LIST_CLEAN]);
563 lru_destroy(&bc->lru[LIST_DIRTY]);
564}
565
566/*--------------*/
567
568/*
569 * not threadsafe, or racey depending how you look at it
570 */
571static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
572{
573 return bc->lru[list_mode].count;
574}
575
576static inline unsigned long cache_total(struct dm_buffer_cache *bc)
577{
578 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
579}
580
581/*--------------*/
582
583/*
584 * Gets a specific buffer, indexed by block.
585 * If the buffer is found then its holder count will be incremented and
586 * lru_reference will be called.
587 *
588 * threadsafe
589 */
590static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
591{
592 struct rb_node *n = root->rb_node;
593 struct dm_buffer *b;
594
595 while (n) {
596 b = container_of(n, struct dm_buffer, node);
597
598 if (b->block == block)
599 return b;
600
601 n = block < b->block ? n->rb_left : n->rb_right;
602 }
603
604 return NULL;
605}
606
607static void __cache_inc_buffer(struct dm_buffer *b)
608{
609 atomic_inc(&b->hold_count);
610 WRITE_ONCE(b->last_accessed, jiffies);
611}
612
613static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
614{
615 struct dm_buffer *b;
616
617 cache_read_lock(bc, block);
618 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
619 if (b) {
620 lru_reference(&b->lru);
621 __cache_inc_buffer(b);
622 }
623 cache_read_unlock(bc, block);
624
625 return b;
626}
627
628/*--------------*/
629
630/*
631 * Returns true if the hold count hits zero.
632 * threadsafe
633 */
634static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
635{
636 bool r;
637
638 cache_read_lock(bc, b->block);
639 BUG_ON(!atomic_read(&b->hold_count));
640 r = atomic_dec_and_test(&b->hold_count);
641 cache_read_unlock(bc, b->block);
642
643 return r;
644}
645
646/*--------------*/
647
648typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
649
650/*
651 * Evicts a buffer based on a predicate. The oldest buffer that
652 * matches the predicate will be selected. In addition to the
653 * predicate the hold_count of the selected buffer will be zero.
654 */
655struct evict_wrapper {
656 struct lock_history *lh;
657 b_predicate pred;
658 void *context;
659};
660
661/*
662 * Wraps the buffer predicate turning it into an lru predicate. Adds
663 * extra test for hold_count.
664 */
665static enum evict_result __evict_pred(struct lru_entry *le, void *context)
666{
667 struct evict_wrapper *w = context;
668 struct dm_buffer *b = le_to_buffer(le);
669
670 lh_next(w->lh, b->block);
671
672 if (atomic_read(&b->hold_count))
673 return ER_DONT_EVICT;
674
675 return w->pred(b, w->context);
676}
677
678static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
679 b_predicate pred, void *context,
680 struct lock_history *lh)
681{
682 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
683 struct lru_entry *le;
684 struct dm_buffer *b;
685
686 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
687 if (!le)
688 return NULL;
689
690 b = le_to_buffer(le);
691 /* __evict_pred will have locked the appropriate tree. */
692 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
693
694 return b;
695}
696
697static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
698 b_predicate pred, void *context)
699{
700 struct dm_buffer *b;
701 struct lock_history lh;
702
703 lh_init(&lh, bc, true);
704 b = __cache_evict(bc, list_mode, pred, context, &lh);
705 lh_exit(&lh);
706
707 return b;
708}
709
710/*--------------*/
711
712/*
713 * Mark a buffer as clean or dirty. Not threadsafe.
714 */
715static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
716{
717 cache_write_lock(bc, b->block);
718 if (list_mode != b->list_mode) {
719 lru_remove(&bc->lru[b->list_mode], &b->lru);
720 b->list_mode = list_mode;
721 lru_insert(&bc->lru[b->list_mode], &b->lru);
722 }
723 cache_write_unlock(bc, b->block);
724}
725
726/*--------------*/
727
728/*
729 * Runs through the lru associated with 'old_mode', if the predicate matches then
730 * it moves them to 'new_mode'. Not threadsafe.
731 */
732static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
733 b_predicate pred, void *context, struct lock_history *lh)
734{
735 struct lru_entry *le;
736 struct dm_buffer *b;
737 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
738
739 while (true) {
740 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
741 if (!le)
742 break;
743
744 b = le_to_buffer(le);
745 b->list_mode = new_mode;
746 lru_insert(&bc->lru[b->list_mode], &b->lru);
747 }
748}
749
750static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
751 b_predicate pred, void *context)
752{
753 struct lock_history lh;
754
755 lh_init(&lh, bc, true);
756 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
757 lh_exit(&lh);
758}
759
760/*--------------*/
761
762/*
763 * Iterates through all clean or dirty entries calling a function for each
764 * entry. The callback may terminate the iteration early. Not threadsafe.
765 */
766
767/*
768 * Iterator functions should return one of these actions to indicate
769 * how the iteration should proceed.
770 */
771enum it_action {
772 IT_NEXT,
773 IT_COMPLETE,
774};
775
776typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
777
778static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
779 iter_fn fn, void *context, struct lock_history *lh)
780{
781 struct lru *lru = &bc->lru[list_mode];
782 struct lru_entry *le, *first;
783
784 if (!lru->cursor)
785 return;
786
787 first = le = to_le(lru->cursor);
788 do {
789 struct dm_buffer *b = le_to_buffer(le);
790
791 lh_next(lh, b->block);
792
793 switch (fn(b, context)) {
794 case IT_NEXT:
795 break;
796
797 case IT_COMPLETE:
798 return;
799 }
800 cond_resched();
801
802 le = to_le(le->list.next);
803 } while (le != first);
804}
805
806static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
807 iter_fn fn, void *context)
808{
809 struct lock_history lh;
810
811 lh_init(&lh, bc, false);
812 __cache_iterate(bc, list_mode, fn, context, &lh);
813 lh_exit(&lh);
814}
815
816/*--------------*/
817
818/*
819 * Passes ownership of the buffer to the cache. Returns false if the
820 * buffer was already present (in which case ownership does not pass).
821 * eg, a race with another thread.
822 *
823 * Holder count should be 1 on insertion.
824 *
825 * Not threadsafe.
826 */
827static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
828{
829 struct rb_node **new = &root->rb_node, *parent = NULL;
830 struct dm_buffer *found;
831
832 while (*new) {
833 found = container_of(*new, struct dm_buffer, node);
834
835 if (found->block == b->block)
836 return false;
837
838 parent = *new;
839 new = b->block < found->block ?
840 &found->node.rb_left : &found->node.rb_right;
841 }
842
843 rb_link_node(&b->node, parent, new);
844 rb_insert_color(&b->node, root);
845
846 return true;
847}
848
849static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
850{
851 bool r;
852
853 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
854 return false;
855
856 cache_write_lock(bc, b->block);
857 BUG_ON(atomic_read(&b->hold_count) != 1);
858 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
859 if (r)
860 lru_insert(&bc->lru[b->list_mode], &b->lru);
861 cache_write_unlock(bc, b->block);
862
863 return r;
864}
865
866/*--------------*/
867
868/*
869 * Removes buffer from cache, ownership of the buffer passes back to the caller.
870 * Fails if the hold_count is not one (ie. the caller holds the only reference).
871 *
872 * Not threadsafe.
873 */
874static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
875{
876 bool r;
877
878 cache_write_lock(bc, b->block);
879
880 if (atomic_read(&b->hold_count) != 1) {
881 r = false;
882 } else {
883 r = true;
884 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
885 lru_remove(&bc->lru[b->list_mode], &b->lru);
886 }
887
888 cache_write_unlock(bc, b->block);
889
890 return r;
891}
892
893/*--------------*/
894
895typedef void (*b_release)(struct dm_buffer *);
896
897static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
898{
899 struct rb_node *n = root->rb_node;
900 struct dm_buffer *b;
901 struct dm_buffer *best = NULL;
902
903 while (n) {
904 b = container_of(n, struct dm_buffer, node);
905
906 if (b->block == block)
907 return b;
908
909 if (block <= b->block) {
910 n = n->rb_left;
911 best = b;
912 } else {
913 n = n->rb_right;
914 }
915 }
916
917 return best;
918}
919
920static void __remove_range(struct dm_buffer_cache *bc,
921 struct rb_root *root,
922 sector_t begin, sector_t end,
923 b_predicate pred, b_release release)
924{
925 struct dm_buffer *b;
926
927 while (true) {
928 cond_resched();
929
930 b = __find_next(root, begin);
931 if (!b || (b->block >= end))
932 break;
933
934 begin = b->block + 1;
935
936 if (atomic_read(&b->hold_count))
937 continue;
938
939 if (pred(b, NULL) == ER_EVICT) {
940 rb_erase(&b->node, root);
941 lru_remove(&bc->lru[b->list_mode], &b->lru);
942 release(b);
943 }
944 }
945}
946
947static void cache_remove_range(struct dm_buffer_cache *bc,
948 sector_t begin, sector_t end,
949 b_predicate pred, b_release release)
950{
951 unsigned int i;
952
953 BUG_ON(bc->no_sleep);
954 for (i = 0; i < bc->num_locks; i++) {
955 down_write(&bc->trees[i].u.lock);
956 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
957 up_write(&bc->trees[i].u.lock);
958 }
959}
960
961/*----------------------------------------------------------------*/
962
963/*
964 * Linking of buffers:
965 * All buffers are linked to buffer_cache with their node field.
966 *
967 * Clean buffers that are not being written (B_WRITING not set)
968 * are linked to lru[LIST_CLEAN] with their lru_list field.
969 *
970 * Dirty and clean buffers that are being written are linked to
971 * lru[LIST_DIRTY] with their lru_list field. When the write
972 * finishes, the buffer cannot be relinked immediately (because we
973 * are in an interrupt context and relinking requires process
974 * context), so some clean-not-writing buffers can be held on
975 * dirty_lru too. They are later added to lru in the process
976 * context.
977 */
978struct dm_bufio_client {
979 struct block_device *bdev;
980 unsigned int block_size;
981 s8 sectors_per_block_bits;
982
983 bool no_sleep;
984 struct mutex lock;
985 spinlock_t spinlock;
986
987 int async_write_error;
988
989 void (*alloc_callback)(struct dm_buffer *buf);
990 void (*write_callback)(struct dm_buffer *buf);
991 struct kmem_cache *slab_buffer;
992 struct kmem_cache *slab_cache;
993 struct dm_io_client *dm_io;
994
995 struct list_head reserved_buffers;
996 unsigned int need_reserved_buffers;
997
998 unsigned int minimum_buffers;
999
1000 sector_t start;
1001
1002 struct shrinker *shrinker;
1003 struct work_struct shrink_work;
1004 atomic_long_t need_shrink;
1005
1006 wait_queue_head_t free_buffer_wait;
1007
1008 struct list_head client_list;
1009
1010 /*
1011 * Used by global_cleanup to sort the clients list.
1012 */
1013 unsigned long oldest_buffer;
1014
1015 struct dm_buffer_cache cache; /* must be last member */
1016};
1017
1018/*----------------------------------------------------------------*/
1019
1020#define dm_bufio_in_request() (!!current->bio_list)
1021
1022static void dm_bufio_lock(struct dm_bufio_client *c)
1023{
1024 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1025 spin_lock_bh(&c->spinlock);
1026 else
1027 mutex_lock_nested(&c->lock, dm_bufio_in_request());
1028}
1029
1030static void dm_bufio_unlock(struct dm_bufio_client *c)
1031{
1032 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1033 spin_unlock_bh(&c->spinlock);
1034 else
1035 mutex_unlock(&c->lock);
1036}
1037
1038/*----------------------------------------------------------------*/
1039
1040/*
1041 * Default cache size: available memory divided by the ratio.
1042 */
1043static unsigned long dm_bufio_default_cache_size;
1044
1045/*
1046 * Total cache size set by the user.
1047 */
1048static unsigned long dm_bufio_cache_size;
1049
1050/*
1051 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1052 * at any time. If it disagrees, the user has changed cache size.
1053 */
1054static unsigned long dm_bufio_cache_size_latch;
1055
1056static DEFINE_SPINLOCK(global_spinlock);
1057
1058/*
1059 * Buffers are freed after this timeout
1060 */
1061static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1062static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1063
1064static unsigned long dm_bufio_peak_allocated;
1065static unsigned long dm_bufio_allocated_kmem_cache;
1066static unsigned long dm_bufio_allocated_kmalloc;
1067static unsigned long dm_bufio_allocated_get_free_pages;
1068static unsigned long dm_bufio_allocated_vmalloc;
1069static unsigned long dm_bufio_current_allocated;
1070
1071/*----------------------------------------------------------------*/
1072
1073/*
1074 * The current number of clients.
1075 */
1076static int dm_bufio_client_count;
1077
1078/*
1079 * The list of all clients.
1080 */
1081static LIST_HEAD(dm_bufio_all_clients);
1082
1083/*
1084 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1085 */
1086static DEFINE_MUTEX(dm_bufio_clients_lock);
1087
1088static struct workqueue_struct *dm_bufio_wq;
1089static struct delayed_work dm_bufio_cleanup_old_work;
1090static struct work_struct dm_bufio_replacement_work;
1091
1092
1093#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1094static void buffer_record_stack(struct dm_buffer *b)
1095{
1096 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1097}
1098#endif
1099
1100/*----------------------------------------------------------------*/
1101
1102static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1103{
1104 unsigned char data_mode;
1105 long diff;
1106
1107 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1108 &dm_bufio_allocated_kmem_cache,
1109 &dm_bufio_allocated_kmalloc,
1110 &dm_bufio_allocated_get_free_pages,
1111 &dm_bufio_allocated_vmalloc,
1112 };
1113
1114 data_mode = b->data_mode;
1115 diff = (long)b->c->block_size;
1116 if (unlink)
1117 diff = -diff;
1118
1119 spin_lock(&global_spinlock);
1120
1121 *class_ptr[data_mode] += diff;
1122
1123 dm_bufio_current_allocated += diff;
1124
1125 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126 dm_bufio_peak_allocated = dm_bufio_current_allocated;
1127
1128 if (!unlink) {
1129 if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1131 }
1132
1133 spin_unlock(&global_spinlock);
1134}
1135
1136/*
1137 * Change the number of clients and recalculate per-client limit.
1138 */
1139static void __cache_size_refresh(void)
1140{
1141 if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1142 return;
1143 if (WARN_ON(dm_bufio_client_count < 0))
1144 return;
1145
1146 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1147
1148 /*
1149 * Use default if set to 0 and report the actual cache size used.
1150 */
1151 if (!dm_bufio_cache_size_latch) {
1152 (void)cmpxchg(&dm_bufio_cache_size, 0,
1153 dm_bufio_default_cache_size);
1154 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1155 }
1156}
1157
1158/*
1159 * Allocating buffer data.
1160 *
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1162 *
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1165 *
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1169 *
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_PAGE_ORDER).
1174 *
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1177 * space.
1178 */
1179static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180 unsigned char *data_mode)
1181{
1182 if (unlikely(c->slab_cache != NULL)) {
1183 *data_mode = DATA_MODE_SLAB;
1184 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1185 }
1186
1187 if (unlikely(c->block_size < PAGE_SIZE)) {
1188 *data_mode = DATA_MODE_KMALLOC;
1189 return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE);
1190 }
1191
1192 if (c->block_size <= KMALLOC_MAX_SIZE &&
1193 gfp_mask & __GFP_NORETRY) {
1194 *data_mode = DATA_MODE_GET_FREE_PAGES;
1195 return (void *)__get_free_pages(gfp_mask,
1196 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1197 }
1198
1199 *data_mode = DATA_MODE_VMALLOC;
1200
1201 return __vmalloc(c->block_size, gfp_mask);
1202}
1203
1204/*
1205 * Free buffer's data.
1206 */
1207static void free_buffer_data(struct dm_bufio_client *c,
1208 void *data, unsigned char data_mode)
1209{
1210 switch (data_mode) {
1211 case DATA_MODE_SLAB:
1212 kmem_cache_free(c->slab_cache, data);
1213 break;
1214
1215 case DATA_MODE_KMALLOC:
1216 kfree(data);
1217 break;
1218
1219 case DATA_MODE_GET_FREE_PAGES:
1220 free_pages((unsigned long)data,
1221 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1222 break;
1223
1224 case DATA_MODE_VMALLOC:
1225 vfree(data);
1226 break;
1227
1228 default:
1229 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1230 data_mode);
1231 BUG();
1232 }
1233}
1234
1235/*
1236 * Allocate buffer and its data.
1237 */
1238static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1239{
1240 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1241
1242 if (!b)
1243 return NULL;
1244
1245 b->c = c;
1246
1247 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1248 if (!b->data) {
1249 kmem_cache_free(c->slab_buffer, b);
1250 return NULL;
1251 }
1252 adjust_total_allocated(b, false);
1253
1254#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1255 b->stack_len = 0;
1256#endif
1257 return b;
1258}
1259
1260/*
1261 * Free buffer and its data.
1262 */
1263static void free_buffer(struct dm_buffer *b)
1264{
1265 struct dm_bufio_client *c = b->c;
1266
1267 adjust_total_allocated(b, true);
1268 free_buffer_data(c, b->data, b->data_mode);
1269 kmem_cache_free(c->slab_buffer, b);
1270}
1271
1272/*
1273 *--------------------------------------------------------------------------
1274 * Submit I/O on the buffer.
1275 *
1276 * Bio interface is faster but it has some problems:
1277 * the vector list is limited (increasing this limit increases
1278 * memory-consumption per buffer, so it is not viable);
1279 *
1280 * the memory must be direct-mapped, not vmalloced;
1281 *
1282 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1283 * it is not vmalloced, try using the bio interface.
1284 *
1285 * If the buffer is big, if it is vmalloced or if the underlying device
1286 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1287 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1288 * shortcomings.
1289 *--------------------------------------------------------------------------
1290 */
1291
1292/*
1293 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1294 * that the request was handled directly with bio interface.
1295 */
1296static void dmio_complete(unsigned long error, void *context)
1297{
1298 struct dm_buffer *b = context;
1299
1300 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1301}
1302
1303static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1304 unsigned int n_sectors, unsigned int offset,
1305 unsigned short ioprio)
1306{
1307 int r;
1308 struct dm_io_request io_req = {
1309 .bi_opf = op,
1310 .notify.fn = dmio_complete,
1311 .notify.context = b,
1312 .client = b->c->dm_io,
1313 };
1314 struct dm_io_region region = {
1315 .bdev = b->c->bdev,
1316 .sector = sector,
1317 .count = n_sectors,
1318 };
1319
1320 if (b->data_mode != DATA_MODE_VMALLOC) {
1321 io_req.mem.type = DM_IO_KMEM;
1322 io_req.mem.ptr.addr = (char *)b->data + offset;
1323 } else {
1324 io_req.mem.type = DM_IO_VMA;
1325 io_req.mem.ptr.vma = (char *)b->data + offset;
1326 }
1327
1328 r = dm_io(&io_req, 1, ®ion, NULL, ioprio);
1329 if (unlikely(r))
1330 b->end_io(b, errno_to_blk_status(r));
1331}
1332
1333static void bio_complete(struct bio *bio)
1334{
1335 struct dm_buffer *b = bio->bi_private;
1336 blk_status_t status = bio->bi_status;
1337
1338 bio_uninit(bio);
1339 kfree(bio);
1340 b->end_io(b, status);
1341}
1342
1343static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1344 unsigned int n_sectors, unsigned int offset,
1345 unsigned short ioprio)
1346{
1347 struct bio *bio;
1348 char *ptr;
1349 unsigned int len;
1350
1351 bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1352 if (!bio) {
1353 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1354 return;
1355 }
1356 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1357 bio->bi_iter.bi_sector = sector;
1358 bio->bi_end_io = bio_complete;
1359 bio->bi_private = b;
1360 bio->bi_ioprio = ioprio;
1361
1362 ptr = (char *)b->data + offset;
1363 len = n_sectors << SECTOR_SHIFT;
1364
1365 __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1366
1367 submit_bio(bio);
1368}
1369
1370static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1371{
1372 sector_t sector;
1373
1374 if (likely(c->sectors_per_block_bits >= 0))
1375 sector = block << c->sectors_per_block_bits;
1376 else
1377 sector = block * (c->block_size >> SECTOR_SHIFT);
1378 sector += c->start;
1379
1380 return sector;
1381}
1382
1383static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
1384 void (*end_io)(struct dm_buffer *, blk_status_t))
1385{
1386 unsigned int n_sectors;
1387 sector_t sector;
1388 unsigned int offset, end;
1389
1390 b->end_io = end_io;
1391
1392 sector = block_to_sector(b->c, b->block);
1393
1394 if (op != REQ_OP_WRITE) {
1395 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1396 offset = 0;
1397 } else {
1398 if (b->c->write_callback)
1399 b->c->write_callback(b);
1400 offset = b->write_start;
1401 end = b->write_end;
1402 offset &= -DM_BUFIO_WRITE_ALIGN;
1403 end += DM_BUFIO_WRITE_ALIGN - 1;
1404 end &= -DM_BUFIO_WRITE_ALIGN;
1405 if (unlikely(end > b->c->block_size))
1406 end = b->c->block_size;
1407
1408 sector += offset >> SECTOR_SHIFT;
1409 n_sectors = (end - offset) >> SECTOR_SHIFT;
1410 }
1411
1412 if (b->data_mode != DATA_MODE_VMALLOC)
1413 use_bio(b, op, sector, n_sectors, offset, ioprio);
1414 else
1415 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1416}
1417
1418/*
1419 *--------------------------------------------------------------
1420 * Writing dirty buffers
1421 *--------------------------------------------------------------
1422 */
1423
1424/*
1425 * The endio routine for write.
1426 *
1427 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1428 * it.
1429 */
1430static void write_endio(struct dm_buffer *b, blk_status_t status)
1431{
1432 b->write_error = status;
1433 if (unlikely(status)) {
1434 struct dm_bufio_client *c = b->c;
1435
1436 (void)cmpxchg(&c->async_write_error, 0,
1437 blk_status_to_errno(status));
1438 }
1439
1440 BUG_ON(!test_bit(B_WRITING, &b->state));
1441
1442 smp_mb__before_atomic();
1443 clear_bit(B_WRITING, &b->state);
1444 smp_mb__after_atomic();
1445
1446 wake_up_bit(&b->state, B_WRITING);
1447}
1448
1449/*
1450 * Initiate a write on a dirty buffer, but don't wait for it.
1451 *
1452 * - If the buffer is not dirty, exit.
1453 * - If there some previous write going on, wait for it to finish (we can't
1454 * have two writes on the same buffer simultaneously).
1455 * - Submit our write and don't wait on it. We set B_WRITING indicating
1456 * that there is a write in progress.
1457 */
1458static void __write_dirty_buffer(struct dm_buffer *b,
1459 struct list_head *write_list)
1460{
1461 if (!test_bit(B_DIRTY, &b->state))
1462 return;
1463
1464 clear_bit(B_DIRTY, &b->state);
1465 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1466
1467 b->write_start = b->dirty_start;
1468 b->write_end = b->dirty_end;
1469
1470 if (!write_list)
1471 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1472 else
1473 list_add_tail(&b->write_list, write_list);
1474}
1475
1476static void __flush_write_list(struct list_head *write_list)
1477{
1478 struct blk_plug plug;
1479
1480 blk_start_plug(&plug);
1481 while (!list_empty(write_list)) {
1482 struct dm_buffer *b =
1483 list_entry(write_list->next, struct dm_buffer, write_list);
1484 list_del(&b->write_list);
1485 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1486 cond_resched();
1487 }
1488 blk_finish_plug(&plug);
1489}
1490
1491/*
1492 * Wait until any activity on the buffer finishes. Possibly write the
1493 * buffer if it is dirty. When this function finishes, there is no I/O
1494 * running on the buffer and the buffer is not dirty.
1495 */
1496static void __make_buffer_clean(struct dm_buffer *b)
1497{
1498 BUG_ON(atomic_read(&b->hold_count));
1499
1500 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1501 if (!smp_load_acquire(&b->state)) /* fast case */
1502 return;
1503
1504 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1505 __write_dirty_buffer(b, NULL);
1506 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1507}
1508
1509static enum evict_result is_clean(struct dm_buffer *b, void *context)
1510{
1511 struct dm_bufio_client *c = context;
1512
1513 /* These should never happen */
1514 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1515 return ER_DONT_EVICT;
1516 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1517 return ER_DONT_EVICT;
1518 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1519 return ER_DONT_EVICT;
1520
1521 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1522 unlikely(test_bit(B_READING, &b->state)))
1523 return ER_DONT_EVICT;
1524
1525 return ER_EVICT;
1526}
1527
1528static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1529{
1530 /* These should never happen */
1531 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1532 return ER_DONT_EVICT;
1533 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1534 return ER_DONT_EVICT;
1535
1536 return ER_EVICT;
1537}
1538
1539/*
1540 * Find some buffer that is not held by anybody, clean it, unlink it and
1541 * return it.
1542 */
1543static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1544{
1545 struct dm_buffer *b;
1546
1547 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1548 if (b) {
1549 /* this also waits for pending reads */
1550 __make_buffer_clean(b);
1551 return b;
1552 }
1553
1554 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1555 return NULL;
1556
1557 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1558 if (b) {
1559 __make_buffer_clean(b);
1560 return b;
1561 }
1562
1563 return NULL;
1564}
1565
1566/*
1567 * Wait until some other threads free some buffer or release hold count on
1568 * some buffer.
1569 *
1570 * This function is entered with c->lock held, drops it and regains it
1571 * before exiting.
1572 */
1573static void __wait_for_free_buffer(struct dm_bufio_client *c)
1574{
1575 DECLARE_WAITQUEUE(wait, current);
1576
1577 add_wait_queue(&c->free_buffer_wait, &wait);
1578 set_current_state(TASK_UNINTERRUPTIBLE);
1579 dm_bufio_unlock(c);
1580
1581 /*
1582 * It's possible to miss a wake up event since we don't always
1583 * hold c->lock when wake_up is called. So we have a timeout here,
1584 * just in case.
1585 */
1586 io_schedule_timeout(5 * HZ);
1587
1588 remove_wait_queue(&c->free_buffer_wait, &wait);
1589
1590 dm_bufio_lock(c);
1591}
1592
1593enum new_flag {
1594 NF_FRESH = 0,
1595 NF_READ = 1,
1596 NF_GET = 2,
1597 NF_PREFETCH = 3
1598};
1599
1600/*
1601 * Allocate a new buffer. If the allocation is not possible, wait until
1602 * some other thread frees a buffer.
1603 *
1604 * May drop the lock and regain it.
1605 */
1606static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1607{
1608 struct dm_buffer *b;
1609 bool tried_noio_alloc = false;
1610
1611 /*
1612 * dm-bufio is resistant to allocation failures (it just keeps
1613 * one buffer reserved in cases all the allocations fail).
1614 * So set flags to not try too hard:
1615 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1616 * mutex and wait ourselves.
1617 * __GFP_NORETRY: don't retry and rather return failure
1618 * __GFP_NOMEMALLOC: don't use emergency reserves
1619 * __GFP_NOWARN: don't print a warning in case of failure
1620 *
1621 * For debugging, if we set the cache size to 1, no new buffers will
1622 * be allocated.
1623 */
1624 while (1) {
1625 if (dm_bufio_cache_size_latch != 1) {
1626 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1627 if (b)
1628 return b;
1629 }
1630
1631 if (nf == NF_PREFETCH)
1632 return NULL;
1633
1634 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1635 dm_bufio_unlock(c);
1636 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1637 dm_bufio_lock(c);
1638 if (b)
1639 return b;
1640 tried_noio_alloc = true;
1641 }
1642
1643 if (!list_empty(&c->reserved_buffers)) {
1644 b = list_to_buffer(c->reserved_buffers.next);
1645 list_del(&b->lru.list);
1646 c->need_reserved_buffers++;
1647
1648 return b;
1649 }
1650
1651 b = __get_unclaimed_buffer(c);
1652 if (b)
1653 return b;
1654
1655 __wait_for_free_buffer(c);
1656 }
1657}
1658
1659static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1660{
1661 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1662
1663 if (!b)
1664 return NULL;
1665
1666 if (c->alloc_callback)
1667 c->alloc_callback(b);
1668
1669 return b;
1670}
1671
1672/*
1673 * Free a buffer and wake other threads waiting for free buffers.
1674 */
1675static void __free_buffer_wake(struct dm_buffer *b)
1676{
1677 struct dm_bufio_client *c = b->c;
1678
1679 b->block = -1;
1680 if (!c->need_reserved_buffers)
1681 free_buffer(b);
1682 else {
1683 list_add(&b->lru.list, &c->reserved_buffers);
1684 c->need_reserved_buffers--;
1685 }
1686
1687 /*
1688 * We hold the bufio lock here, so no one can add entries to the
1689 * wait queue anyway.
1690 */
1691 if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1692 wake_up(&c->free_buffer_wait);
1693}
1694
1695static enum evict_result cleaned(struct dm_buffer *b, void *context)
1696{
1697 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1698 return ER_DONT_EVICT; /* should never happen */
1699
1700 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1701 return ER_DONT_EVICT;
1702 else
1703 return ER_EVICT;
1704}
1705
1706static void __move_clean_buffers(struct dm_bufio_client *c)
1707{
1708 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1709}
1710
1711struct write_context {
1712 int no_wait;
1713 struct list_head *write_list;
1714};
1715
1716static enum it_action write_one(struct dm_buffer *b, void *context)
1717{
1718 struct write_context *wc = context;
1719
1720 if (wc->no_wait && test_bit(B_WRITING, &b->state))
1721 return IT_COMPLETE;
1722
1723 __write_dirty_buffer(b, wc->write_list);
1724 return IT_NEXT;
1725}
1726
1727static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1728 struct list_head *write_list)
1729{
1730 struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1731
1732 __move_clean_buffers(c);
1733 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1734}
1735
1736/*
1737 * Check if we're over watermark.
1738 * If we are over threshold_buffers, start freeing buffers.
1739 * If we're over "limit_buffers", block until we get under the limit.
1740 */
1741static void __check_watermark(struct dm_bufio_client *c,
1742 struct list_head *write_list)
1743{
1744 if (cache_count(&c->cache, LIST_DIRTY) >
1745 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1746 __write_dirty_buffers_async(c, 1, write_list);
1747}
1748
1749/*
1750 *--------------------------------------------------------------
1751 * Getting a buffer
1752 *--------------------------------------------------------------
1753 */
1754
1755static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1756{
1757 /*
1758 * Relying on waitqueue_active() is racey, but we sleep
1759 * with schedule_timeout anyway.
1760 */
1761 if (cache_put(&c->cache, b) &&
1762 unlikely(waitqueue_active(&c->free_buffer_wait)))
1763 wake_up(&c->free_buffer_wait);
1764}
1765
1766/*
1767 * This assumes you have already checked the cache to see if the buffer
1768 * is already present (it will recheck after dropping the lock for allocation).
1769 */
1770static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1771 enum new_flag nf, int *need_submit,
1772 struct list_head *write_list)
1773{
1774 struct dm_buffer *b, *new_b = NULL;
1775
1776 *need_submit = 0;
1777
1778 /* This can't be called with NF_GET */
1779 if (WARN_ON_ONCE(nf == NF_GET))
1780 return NULL;
1781
1782 new_b = __alloc_buffer_wait(c, nf);
1783 if (!new_b)
1784 return NULL;
1785
1786 /*
1787 * We've had a period where the mutex was unlocked, so need to
1788 * recheck the buffer tree.
1789 */
1790 b = cache_get(&c->cache, block);
1791 if (b) {
1792 __free_buffer_wake(new_b);
1793 goto found_buffer;
1794 }
1795
1796 __check_watermark(c, write_list);
1797
1798 b = new_b;
1799 atomic_set(&b->hold_count, 1);
1800 WRITE_ONCE(b->last_accessed, jiffies);
1801 b->block = block;
1802 b->read_error = 0;
1803 b->write_error = 0;
1804 b->list_mode = LIST_CLEAN;
1805
1806 if (nf == NF_FRESH)
1807 b->state = 0;
1808 else {
1809 b->state = 1 << B_READING;
1810 *need_submit = 1;
1811 }
1812
1813 /*
1814 * We mustn't insert into the cache until the B_READING state
1815 * is set. Otherwise another thread could get it and use
1816 * it before it had been read.
1817 */
1818 cache_insert(&c->cache, b);
1819
1820 return b;
1821
1822found_buffer:
1823 if (nf == NF_PREFETCH) {
1824 cache_put_and_wake(c, b);
1825 return NULL;
1826 }
1827
1828 /*
1829 * Note: it is essential that we don't wait for the buffer to be
1830 * read if dm_bufio_get function is used. Both dm_bufio_get and
1831 * dm_bufio_prefetch can be used in the driver request routine.
1832 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1833 * the same buffer, it would deadlock if we waited.
1834 */
1835 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1836 cache_put_and_wake(c, b);
1837 return NULL;
1838 }
1839
1840 return b;
1841}
1842
1843/*
1844 * The endio routine for reading: set the error, clear the bit and wake up
1845 * anyone waiting on the buffer.
1846 */
1847static void read_endio(struct dm_buffer *b, blk_status_t status)
1848{
1849 b->read_error = status;
1850
1851 BUG_ON(!test_bit(B_READING, &b->state));
1852
1853 smp_mb__before_atomic();
1854 clear_bit(B_READING, &b->state);
1855 smp_mb__after_atomic();
1856
1857 wake_up_bit(&b->state, B_READING);
1858}
1859
1860/*
1861 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1862 * functions is similar except that dm_bufio_new doesn't read the
1863 * buffer from the disk (assuming that the caller overwrites all the data
1864 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1865 */
1866static void *new_read(struct dm_bufio_client *c, sector_t block,
1867 enum new_flag nf, struct dm_buffer **bp,
1868 unsigned short ioprio)
1869{
1870 int need_submit = 0;
1871 struct dm_buffer *b;
1872
1873 LIST_HEAD(write_list);
1874
1875 *bp = NULL;
1876
1877 /*
1878 * Fast path, hopefully the block is already in the cache. No need
1879 * to get the client lock for this.
1880 */
1881 b = cache_get(&c->cache, block);
1882 if (b) {
1883 if (nf == NF_PREFETCH) {
1884 cache_put_and_wake(c, b);
1885 return NULL;
1886 }
1887
1888 /*
1889 * Note: it is essential that we don't wait for the buffer to be
1890 * read if dm_bufio_get function is used. Both dm_bufio_get and
1891 * dm_bufio_prefetch can be used in the driver request routine.
1892 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1893 * the same buffer, it would deadlock if we waited.
1894 */
1895 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1896 cache_put_and_wake(c, b);
1897 return NULL;
1898 }
1899 }
1900
1901 if (!b) {
1902 if (nf == NF_GET)
1903 return NULL;
1904
1905 dm_bufio_lock(c);
1906 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1907 dm_bufio_unlock(c);
1908 }
1909
1910#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1911 if (b && (atomic_read(&b->hold_count) == 1))
1912 buffer_record_stack(b);
1913#endif
1914
1915 __flush_write_list(&write_list);
1916
1917 if (!b)
1918 return NULL;
1919
1920 if (need_submit)
1921 submit_io(b, REQ_OP_READ, ioprio, read_endio);
1922
1923 if (nf != NF_GET) /* we already tested this condition above */
1924 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1925
1926 if (b->read_error) {
1927 int error = blk_status_to_errno(b->read_error);
1928
1929 dm_bufio_release(b);
1930
1931 return ERR_PTR(error);
1932 }
1933
1934 *bp = b;
1935
1936 return b->data;
1937}
1938
1939void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1940 struct dm_buffer **bp)
1941{
1942 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1943}
1944EXPORT_SYMBOL_GPL(dm_bufio_get);
1945
1946static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1947 struct dm_buffer **bp, unsigned short ioprio)
1948{
1949 if (WARN_ON_ONCE(dm_bufio_in_request()))
1950 return ERR_PTR(-EINVAL);
1951
1952 return new_read(c, block, NF_READ, bp, ioprio);
1953}
1954
1955void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1956 struct dm_buffer **bp)
1957{
1958 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1959}
1960EXPORT_SYMBOL_GPL(dm_bufio_read);
1961
1962void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1963 struct dm_buffer **bp, unsigned short ioprio)
1964{
1965 return __dm_bufio_read(c, block, bp, ioprio);
1966}
1967EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1968
1969void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1970 struct dm_buffer **bp)
1971{
1972 if (WARN_ON_ONCE(dm_bufio_in_request()))
1973 return ERR_PTR(-EINVAL);
1974
1975 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1976}
1977EXPORT_SYMBOL_GPL(dm_bufio_new);
1978
1979static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1980 sector_t block, unsigned int n_blocks,
1981 unsigned short ioprio)
1982{
1983 struct blk_plug plug;
1984
1985 LIST_HEAD(write_list);
1986
1987 if (WARN_ON_ONCE(dm_bufio_in_request()))
1988 return; /* should never happen */
1989
1990 blk_start_plug(&plug);
1991
1992 for (; n_blocks--; block++) {
1993 int need_submit;
1994 struct dm_buffer *b;
1995
1996 b = cache_get(&c->cache, block);
1997 if (b) {
1998 /* already in cache */
1999 cache_put_and_wake(c, b);
2000 continue;
2001 }
2002
2003 dm_bufio_lock(c);
2004 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
2005 &write_list);
2006 if (unlikely(!list_empty(&write_list))) {
2007 dm_bufio_unlock(c);
2008 blk_finish_plug(&plug);
2009 __flush_write_list(&write_list);
2010 blk_start_plug(&plug);
2011 dm_bufio_lock(c);
2012 }
2013 if (unlikely(b != NULL)) {
2014 dm_bufio_unlock(c);
2015
2016 if (need_submit)
2017 submit_io(b, REQ_OP_READ, ioprio, read_endio);
2018 dm_bufio_release(b);
2019
2020 cond_resched();
2021
2022 if (!n_blocks)
2023 goto flush_plug;
2024 dm_bufio_lock(c);
2025 }
2026 dm_bufio_unlock(c);
2027 }
2028
2029flush_plug:
2030 blk_finish_plug(&plug);
2031}
2032
2033void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
2034{
2035 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
2036}
2037EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2038
2039void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
2040 unsigned int n_blocks, unsigned short ioprio)
2041{
2042 return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
2043}
2044EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
2045
2046void dm_bufio_release(struct dm_buffer *b)
2047{
2048 struct dm_bufio_client *c = b->c;
2049
2050 /*
2051 * If there were errors on the buffer, and the buffer is not
2052 * to be written, free the buffer. There is no point in caching
2053 * invalid buffer.
2054 */
2055 if ((b->read_error || b->write_error) &&
2056 !test_bit_acquire(B_READING, &b->state) &&
2057 !test_bit(B_WRITING, &b->state) &&
2058 !test_bit(B_DIRTY, &b->state)) {
2059 dm_bufio_lock(c);
2060
2061 /* cache remove can fail if there are other holders */
2062 if (cache_remove(&c->cache, b)) {
2063 __free_buffer_wake(b);
2064 dm_bufio_unlock(c);
2065 return;
2066 }
2067
2068 dm_bufio_unlock(c);
2069 }
2070
2071 cache_put_and_wake(c, b);
2072}
2073EXPORT_SYMBOL_GPL(dm_bufio_release);
2074
2075void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2076 unsigned int start, unsigned int end)
2077{
2078 struct dm_bufio_client *c = b->c;
2079
2080 BUG_ON(start >= end);
2081 BUG_ON(end > b->c->block_size);
2082
2083 dm_bufio_lock(c);
2084
2085 BUG_ON(test_bit(B_READING, &b->state));
2086
2087 if (!test_and_set_bit(B_DIRTY, &b->state)) {
2088 b->dirty_start = start;
2089 b->dirty_end = end;
2090 cache_mark(&c->cache, b, LIST_DIRTY);
2091 } else {
2092 if (start < b->dirty_start)
2093 b->dirty_start = start;
2094 if (end > b->dirty_end)
2095 b->dirty_end = end;
2096 }
2097
2098 dm_bufio_unlock(c);
2099}
2100EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2101
2102void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2103{
2104 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2105}
2106EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2107
2108void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2109{
2110 LIST_HEAD(write_list);
2111
2112 if (WARN_ON_ONCE(dm_bufio_in_request()))
2113 return; /* should never happen */
2114
2115 dm_bufio_lock(c);
2116 __write_dirty_buffers_async(c, 0, &write_list);
2117 dm_bufio_unlock(c);
2118 __flush_write_list(&write_list);
2119}
2120EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2121
2122/*
2123 * For performance, it is essential that the buffers are written asynchronously
2124 * and simultaneously (so that the block layer can merge the writes) and then
2125 * waited upon.
2126 *
2127 * Finally, we flush hardware disk cache.
2128 */
2129static bool is_writing(struct lru_entry *e, void *context)
2130{
2131 struct dm_buffer *b = le_to_buffer(e);
2132
2133 return test_bit(B_WRITING, &b->state);
2134}
2135
2136int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2137{
2138 int a, f;
2139 unsigned long nr_buffers;
2140 struct lru_entry *e;
2141 struct lru_iter it;
2142
2143 LIST_HEAD(write_list);
2144
2145 dm_bufio_lock(c);
2146 __write_dirty_buffers_async(c, 0, &write_list);
2147 dm_bufio_unlock(c);
2148 __flush_write_list(&write_list);
2149 dm_bufio_lock(c);
2150
2151 nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2152 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2153 while ((e = lru_iter_next(&it, is_writing, c))) {
2154 struct dm_buffer *b = le_to_buffer(e);
2155 __cache_inc_buffer(b);
2156
2157 BUG_ON(test_bit(B_READING, &b->state));
2158
2159 if (nr_buffers) {
2160 nr_buffers--;
2161 dm_bufio_unlock(c);
2162 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2163 dm_bufio_lock(c);
2164 } else {
2165 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2166 }
2167
2168 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2169 cache_mark(&c->cache, b, LIST_CLEAN);
2170
2171 cache_put_and_wake(c, b);
2172
2173 cond_resched();
2174 }
2175 lru_iter_end(&it);
2176
2177 wake_up(&c->free_buffer_wait);
2178 dm_bufio_unlock(c);
2179
2180 a = xchg(&c->async_write_error, 0);
2181 f = dm_bufio_issue_flush(c);
2182 if (a)
2183 return a;
2184
2185 return f;
2186}
2187EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2188
2189/*
2190 * Use dm-io to send an empty barrier to flush the device.
2191 */
2192int dm_bufio_issue_flush(struct dm_bufio_client *c)
2193{
2194 struct dm_io_request io_req = {
2195 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2196 .mem.type = DM_IO_KMEM,
2197 .mem.ptr.addr = NULL,
2198 .client = c->dm_io,
2199 };
2200 struct dm_io_region io_reg = {
2201 .bdev = c->bdev,
2202 .sector = 0,
2203 .count = 0,
2204 };
2205
2206 if (WARN_ON_ONCE(dm_bufio_in_request()))
2207 return -EINVAL;
2208
2209 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2210}
2211EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2212
2213/*
2214 * Use dm-io to send a discard request to flush the device.
2215 */
2216int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2217{
2218 struct dm_io_request io_req = {
2219 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2220 .mem.type = DM_IO_KMEM,
2221 .mem.ptr.addr = NULL,
2222 .client = c->dm_io,
2223 };
2224 struct dm_io_region io_reg = {
2225 .bdev = c->bdev,
2226 .sector = block_to_sector(c, block),
2227 .count = block_to_sector(c, count),
2228 };
2229
2230 if (WARN_ON_ONCE(dm_bufio_in_request()))
2231 return -EINVAL; /* discards are optional */
2232
2233 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2234}
2235EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2236
2237static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2238{
2239 struct dm_buffer *b;
2240
2241 b = cache_get(&c->cache, block);
2242 if (b) {
2243 if (likely(!smp_load_acquire(&b->state))) {
2244 if (cache_remove(&c->cache, b))
2245 __free_buffer_wake(b);
2246 else
2247 cache_put_and_wake(c, b);
2248 } else {
2249 cache_put_and_wake(c, b);
2250 }
2251 }
2252
2253 return b ? true : false;
2254}
2255
2256/*
2257 * Free the given buffer.
2258 *
2259 * This is just a hint, if the buffer is in use or dirty, this function
2260 * does nothing.
2261 */
2262void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2263{
2264 dm_bufio_lock(c);
2265 forget_buffer(c, block);
2266 dm_bufio_unlock(c);
2267}
2268EXPORT_SYMBOL_GPL(dm_bufio_forget);
2269
2270static enum evict_result idle(struct dm_buffer *b, void *context)
2271{
2272 return b->state ? ER_DONT_EVICT : ER_EVICT;
2273}
2274
2275void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2276{
2277 dm_bufio_lock(c);
2278 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2279 dm_bufio_unlock(c);
2280}
2281EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2282
2283void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2284{
2285 c->minimum_buffers = n;
2286}
2287EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2288
2289unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2290{
2291 return c->block_size;
2292}
2293EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2294
2295sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2296{
2297 sector_t s = bdev_nr_sectors(c->bdev);
2298
2299 if (s >= c->start)
2300 s -= c->start;
2301 else
2302 s = 0;
2303 if (likely(c->sectors_per_block_bits >= 0))
2304 s >>= c->sectors_per_block_bits;
2305 else
2306 sector_div(s, c->block_size >> SECTOR_SHIFT);
2307 return s;
2308}
2309EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2310
2311struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2312{
2313 return c->dm_io;
2314}
2315EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2316
2317sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2318{
2319 return b->block;
2320}
2321EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2322
2323void *dm_bufio_get_block_data(struct dm_buffer *b)
2324{
2325 return b->data;
2326}
2327EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2328
2329void *dm_bufio_get_aux_data(struct dm_buffer *b)
2330{
2331 return b + 1;
2332}
2333EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2334
2335struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2336{
2337 return b->c;
2338}
2339EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2340
2341static enum it_action warn_leak(struct dm_buffer *b, void *context)
2342{
2343 bool *warned = context;
2344
2345 WARN_ON(!(*warned));
2346 *warned = true;
2347 DMERR("leaked buffer %llx, hold count %u, list %d",
2348 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2349#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2350 stack_trace_print(b->stack_entries, b->stack_len, 1);
2351 /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2352 atomic_set(&b->hold_count, 0);
2353#endif
2354 return IT_NEXT;
2355}
2356
2357static void drop_buffers(struct dm_bufio_client *c)
2358{
2359 int i;
2360 struct dm_buffer *b;
2361
2362 if (WARN_ON(dm_bufio_in_request()))
2363 return; /* should never happen */
2364
2365 /*
2366 * An optimization so that the buffers are not written one-by-one.
2367 */
2368 dm_bufio_write_dirty_buffers_async(c);
2369
2370 dm_bufio_lock(c);
2371
2372 while ((b = __get_unclaimed_buffer(c)))
2373 __free_buffer_wake(b);
2374
2375 for (i = 0; i < LIST_SIZE; i++) {
2376 bool warned = false;
2377
2378 cache_iterate(&c->cache, i, warn_leak, &warned);
2379 }
2380
2381#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2382 while ((b = __get_unclaimed_buffer(c)))
2383 __free_buffer_wake(b);
2384#endif
2385
2386 for (i = 0; i < LIST_SIZE; i++)
2387 WARN_ON(cache_count(&c->cache, i));
2388
2389 dm_bufio_unlock(c);
2390}
2391
2392static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2393{
2394 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2395
2396 if (likely(c->sectors_per_block_bits >= 0))
2397 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2398 else
2399 retain_bytes /= c->block_size;
2400
2401 return retain_bytes;
2402}
2403
2404static void __scan(struct dm_bufio_client *c)
2405{
2406 int l;
2407 struct dm_buffer *b;
2408 unsigned long freed = 0;
2409 unsigned long retain_target = get_retain_buffers(c);
2410 unsigned long count = cache_total(&c->cache);
2411
2412 for (l = 0; l < LIST_SIZE; l++) {
2413 while (true) {
2414 if (count - freed <= retain_target)
2415 atomic_long_set(&c->need_shrink, 0);
2416 if (!atomic_long_read(&c->need_shrink))
2417 break;
2418
2419 b = cache_evict(&c->cache, l,
2420 l == LIST_CLEAN ? is_clean : is_dirty, c);
2421 if (!b)
2422 break;
2423
2424 __make_buffer_clean(b);
2425 __free_buffer_wake(b);
2426
2427 atomic_long_dec(&c->need_shrink);
2428 freed++;
2429 cond_resched();
2430 }
2431 }
2432}
2433
2434static void shrink_work(struct work_struct *w)
2435{
2436 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2437
2438 dm_bufio_lock(c);
2439 __scan(c);
2440 dm_bufio_unlock(c);
2441}
2442
2443static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2444{
2445 struct dm_bufio_client *c;
2446
2447 c = shrink->private_data;
2448 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2449 queue_work(dm_bufio_wq, &c->shrink_work);
2450
2451 return sc->nr_to_scan;
2452}
2453
2454static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2455{
2456 struct dm_bufio_client *c = shrink->private_data;
2457 unsigned long count = cache_total(&c->cache);
2458 unsigned long retain_target = get_retain_buffers(c);
2459 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2460
2461 if (unlikely(count < retain_target))
2462 count = 0;
2463 else
2464 count -= retain_target;
2465
2466 if (unlikely(count < queued_for_cleanup))
2467 count = 0;
2468 else
2469 count -= queued_for_cleanup;
2470
2471 return count;
2472}
2473
2474/*
2475 * Create the buffering interface
2476 */
2477struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2478 unsigned int reserved_buffers, unsigned int aux_size,
2479 void (*alloc_callback)(struct dm_buffer *),
2480 void (*write_callback)(struct dm_buffer *),
2481 unsigned int flags)
2482{
2483 int r;
2484 unsigned int num_locks;
2485 struct dm_bufio_client *c;
2486 char slab_name[64];
2487 static atomic_t seqno = ATOMIC_INIT(0);
2488
2489 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2490 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2491 r = -EINVAL;
2492 goto bad_client;
2493 }
2494
2495 num_locks = dm_num_hash_locks();
2496 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2497 if (!c) {
2498 r = -ENOMEM;
2499 goto bad_client;
2500 }
2501 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2502
2503 c->bdev = bdev;
2504 c->block_size = block_size;
2505 if (is_power_of_2(block_size))
2506 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2507 else
2508 c->sectors_per_block_bits = -1;
2509
2510 c->alloc_callback = alloc_callback;
2511 c->write_callback = write_callback;
2512
2513 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2514 c->no_sleep = true;
2515 static_branch_inc(&no_sleep_enabled);
2516 }
2517
2518 mutex_init(&c->lock);
2519 spin_lock_init(&c->spinlock);
2520 INIT_LIST_HEAD(&c->reserved_buffers);
2521 c->need_reserved_buffers = reserved_buffers;
2522
2523 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2524
2525 init_waitqueue_head(&c->free_buffer_wait);
2526 c->async_write_error = 0;
2527
2528 c->dm_io = dm_io_client_create();
2529 if (IS_ERR(c->dm_io)) {
2530 r = PTR_ERR(c->dm_io);
2531 goto bad_dm_io;
2532 }
2533
2534 if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) {
2535 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2536
2537 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2538 block_size, atomic_inc_return(&seqno));
2539 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2540 SLAB_RECLAIM_ACCOUNT, NULL);
2541 if (!c->slab_cache) {
2542 r = -ENOMEM;
2543 goto bad;
2544 }
2545 }
2546 if (aux_size)
2547 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2548 aux_size, atomic_inc_return(&seqno));
2549 else
2550 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2551 atomic_inc_return(&seqno));
2552 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2553 0, SLAB_RECLAIM_ACCOUNT, NULL);
2554 if (!c->slab_buffer) {
2555 r = -ENOMEM;
2556 goto bad;
2557 }
2558
2559 while (c->need_reserved_buffers) {
2560 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2561
2562 if (!b) {
2563 r = -ENOMEM;
2564 goto bad;
2565 }
2566 __free_buffer_wake(b);
2567 }
2568
2569 INIT_WORK(&c->shrink_work, shrink_work);
2570 atomic_long_set(&c->need_shrink, 0);
2571
2572 c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2573 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2574 if (!c->shrinker) {
2575 r = -ENOMEM;
2576 goto bad;
2577 }
2578
2579 c->shrinker->count_objects = dm_bufio_shrink_count;
2580 c->shrinker->scan_objects = dm_bufio_shrink_scan;
2581 c->shrinker->seeks = 1;
2582 c->shrinker->batch = 0;
2583 c->shrinker->private_data = c;
2584
2585 shrinker_register(c->shrinker);
2586
2587 mutex_lock(&dm_bufio_clients_lock);
2588 dm_bufio_client_count++;
2589 list_add(&c->client_list, &dm_bufio_all_clients);
2590 __cache_size_refresh();
2591 mutex_unlock(&dm_bufio_clients_lock);
2592
2593 return c;
2594
2595bad:
2596 while (!list_empty(&c->reserved_buffers)) {
2597 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2598
2599 list_del(&b->lru.list);
2600 free_buffer(b);
2601 }
2602 kmem_cache_destroy(c->slab_cache);
2603 kmem_cache_destroy(c->slab_buffer);
2604 dm_io_client_destroy(c->dm_io);
2605bad_dm_io:
2606 mutex_destroy(&c->lock);
2607 if (c->no_sleep)
2608 static_branch_dec(&no_sleep_enabled);
2609 kfree(c);
2610bad_client:
2611 return ERR_PTR(r);
2612}
2613EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2614
2615/*
2616 * Free the buffering interface.
2617 * It is required that there are no references on any buffers.
2618 */
2619void dm_bufio_client_destroy(struct dm_bufio_client *c)
2620{
2621 unsigned int i;
2622
2623 drop_buffers(c);
2624
2625 shrinker_free(c->shrinker);
2626 flush_work(&c->shrink_work);
2627
2628 mutex_lock(&dm_bufio_clients_lock);
2629
2630 list_del(&c->client_list);
2631 dm_bufio_client_count--;
2632 __cache_size_refresh();
2633
2634 mutex_unlock(&dm_bufio_clients_lock);
2635
2636 WARN_ON(c->need_reserved_buffers);
2637
2638 while (!list_empty(&c->reserved_buffers)) {
2639 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2640
2641 list_del(&b->lru.list);
2642 free_buffer(b);
2643 }
2644
2645 for (i = 0; i < LIST_SIZE; i++)
2646 if (cache_count(&c->cache, i))
2647 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2648
2649 for (i = 0; i < LIST_SIZE; i++)
2650 WARN_ON(cache_count(&c->cache, i));
2651
2652 cache_destroy(&c->cache);
2653 kmem_cache_destroy(c->slab_cache);
2654 kmem_cache_destroy(c->slab_buffer);
2655 dm_io_client_destroy(c->dm_io);
2656 mutex_destroy(&c->lock);
2657 if (c->no_sleep)
2658 static_branch_dec(&no_sleep_enabled);
2659 kfree(c);
2660}
2661EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2662
2663void dm_bufio_client_reset(struct dm_bufio_client *c)
2664{
2665 drop_buffers(c);
2666 flush_work(&c->shrink_work);
2667}
2668EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2669
2670void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2671{
2672 c->start = start;
2673}
2674EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2675
2676/*--------------------------------------------------------------*/
2677
2678static unsigned int get_max_age_hz(void)
2679{
2680 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2681
2682 if (max_age > UINT_MAX / HZ)
2683 max_age = UINT_MAX / HZ;
2684
2685 return max_age * HZ;
2686}
2687
2688static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2689{
2690 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2691}
2692
2693struct evict_params {
2694 gfp_t gfp;
2695 unsigned long age_hz;
2696
2697 /*
2698 * This gets updated with the largest last_accessed (ie. most
2699 * recently used) of the evicted buffers. It will not be reinitialised
2700 * by __evict_many(), so you can use it across multiple invocations.
2701 */
2702 unsigned long last_accessed;
2703};
2704
2705/*
2706 * We may not be able to evict this buffer if IO pending or the client
2707 * is still using it.
2708 *
2709 * And if GFP_NOFS is used, we must not do any I/O because we hold
2710 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2711 * rerouted to different bufio client.
2712 */
2713static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2714{
2715 struct evict_params *params = context;
2716
2717 if (!(params->gfp & __GFP_FS) ||
2718 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2719 if (test_bit_acquire(B_READING, &b->state) ||
2720 test_bit(B_WRITING, &b->state) ||
2721 test_bit(B_DIRTY, &b->state))
2722 return ER_DONT_EVICT;
2723 }
2724
2725 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2726}
2727
2728static unsigned long __evict_many(struct dm_bufio_client *c,
2729 struct evict_params *params,
2730 int list_mode, unsigned long max_count)
2731{
2732 unsigned long count;
2733 unsigned long last_accessed;
2734 struct dm_buffer *b;
2735
2736 for (count = 0; count < max_count; count++) {
2737 b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2738 if (!b)
2739 break;
2740
2741 last_accessed = READ_ONCE(b->last_accessed);
2742 if (time_after_eq(params->last_accessed, last_accessed))
2743 params->last_accessed = last_accessed;
2744
2745 __make_buffer_clean(b);
2746 __free_buffer_wake(b);
2747
2748 cond_resched();
2749 }
2750
2751 return count;
2752}
2753
2754static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2755{
2756 struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2757 unsigned long retain = get_retain_buffers(c);
2758 unsigned long count;
2759 LIST_HEAD(write_list);
2760
2761 dm_bufio_lock(c);
2762
2763 __check_watermark(c, &write_list);
2764 if (unlikely(!list_empty(&write_list))) {
2765 dm_bufio_unlock(c);
2766 __flush_write_list(&write_list);
2767 dm_bufio_lock(c);
2768 }
2769
2770 count = cache_total(&c->cache);
2771 if (count > retain)
2772 __evict_many(c, ¶ms, LIST_CLEAN, count - retain);
2773
2774 dm_bufio_unlock(c);
2775}
2776
2777static void cleanup_old_buffers(void)
2778{
2779 unsigned long max_age_hz = get_max_age_hz();
2780 struct dm_bufio_client *c;
2781
2782 mutex_lock(&dm_bufio_clients_lock);
2783
2784 __cache_size_refresh();
2785
2786 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2787 evict_old_buffers(c, max_age_hz);
2788
2789 mutex_unlock(&dm_bufio_clients_lock);
2790}
2791
2792static void work_fn(struct work_struct *w)
2793{
2794 cleanup_old_buffers();
2795
2796 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2797 DM_BUFIO_WORK_TIMER_SECS * HZ);
2798}
2799
2800/*--------------------------------------------------------------*/
2801
2802/*
2803 * Global cleanup tries to evict the oldest buffers from across _all_
2804 * the clients. It does this by repeatedly evicting a few buffers from
2805 * the client that holds the oldest buffer. It's approximate, but hopefully
2806 * good enough.
2807 */
2808static struct dm_bufio_client *__pop_client(void)
2809{
2810 struct list_head *h;
2811
2812 if (list_empty(&dm_bufio_all_clients))
2813 return NULL;
2814
2815 h = dm_bufio_all_clients.next;
2816 list_del(h);
2817 return container_of(h, struct dm_bufio_client, client_list);
2818}
2819
2820/*
2821 * Inserts the client in the global client list based on its
2822 * 'oldest_buffer' field.
2823 */
2824static void __insert_client(struct dm_bufio_client *new_client)
2825{
2826 struct dm_bufio_client *c;
2827 struct list_head *h = dm_bufio_all_clients.next;
2828
2829 while (h != &dm_bufio_all_clients) {
2830 c = container_of(h, struct dm_bufio_client, client_list);
2831 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2832 break;
2833 h = h->next;
2834 }
2835
2836 list_add_tail(&new_client->client_list, h);
2837}
2838
2839static unsigned long __evict_a_few(unsigned long nr_buffers)
2840{
2841 unsigned long count;
2842 struct dm_bufio_client *c;
2843 struct evict_params params = {
2844 .gfp = GFP_KERNEL,
2845 .age_hz = 0,
2846 /* set to jiffies in case there are no buffers in this client */
2847 .last_accessed = jiffies
2848 };
2849
2850 c = __pop_client();
2851 if (!c)
2852 return 0;
2853
2854 dm_bufio_lock(c);
2855 count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers);
2856 dm_bufio_unlock(c);
2857
2858 if (count)
2859 c->oldest_buffer = params.last_accessed;
2860 __insert_client(c);
2861
2862 return count;
2863}
2864
2865static void check_watermarks(void)
2866{
2867 LIST_HEAD(write_list);
2868 struct dm_bufio_client *c;
2869
2870 mutex_lock(&dm_bufio_clients_lock);
2871 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2872 dm_bufio_lock(c);
2873 __check_watermark(c, &write_list);
2874 dm_bufio_unlock(c);
2875 }
2876 mutex_unlock(&dm_bufio_clients_lock);
2877
2878 __flush_write_list(&write_list);
2879}
2880
2881static void evict_old(void)
2882{
2883 unsigned long threshold = dm_bufio_cache_size -
2884 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2885
2886 mutex_lock(&dm_bufio_clients_lock);
2887 while (dm_bufio_current_allocated > threshold) {
2888 if (!__evict_a_few(64))
2889 break;
2890 cond_resched();
2891 }
2892 mutex_unlock(&dm_bufio_clients_lock);
2893}
2894
2895static void do_global_cleanup(struct work_struct *w)
2896{
2897 check_watermarks();
2898 evict_old();
2899}
2900
2901/*
2902 *--------------------------------------------------------------
2903 * Module setup
2904 *--------------------------------------------------------------
2905 */
2906
2907/*
2908 * This is called only once for the whole dm_bufio module.
2909 * It initializes memory limit.
2910 */
2911static int __init dm_bufio_init(void)
2912{
2913 __u64 mem;
2914
2915 dm_bufio_allocated_kmem_cache = 0;
2916 dm_bufio_allocated_kmalloc = 0;
2917 dm_bufio_allocated_get_free_pages = 0;
2918 dm_bufio_allocated_vmalloc = 0;
2919 dm_bufio_current_allocated = 0;
2920
2921 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2922 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2923
2924 if (mem > ULONG_MAX)
2925 mem = ULONG_MAX;
2926
2927#ifdef CONFIG_MMU
2928 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2929 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2930#endif
2931
2932 dm_bufio_default_cache_size = mem;
2933
2934 mutex_lock(&dm_bufio_clients_lock);
2935 __cache_size_refresh();
2936 mutex_unlock(&dm_bufio_clients_lock);
2937
2938 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2939 if (!dm_bufio_wq)
2940 return -ENOMEM;
2941
2942 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2943 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2944 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2945 DM_BUFIO_WORK_TIMER_SECS * HZ);
2946
2947 return 0;
2948}
2949
2950/*
2951 * This is called once when unloading the dm_bufio module.
2952 */
2953static void __exit dm_bufio_exit(void)
2954{
2955 int bug = 0;
2956
2957 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2958 destroy_workqueue(dm_bufio_wq);
2959
2960 if (dm_bufio_client_count) {
2961 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2962 __func__, dm_bufio_client_count);
2963 bug = 1;
2964 }
2965
2966 if (dm_bufio_current_allocated) {
2967 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2968 __func__, dm_bufio_current_allocated);
2969 bug = 1;
2970 }
2971
2972 if (dm_bufio_allocated_get_free_pages) {
2973 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2974 __func__, dm_bufio_allocated_get_free_pages);
2975 bug = 1;
2976 }
2977
2978 if (dm_bufio_allocated_vmalloc) {
2979 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2980 __func__, dm_bufio_allocated_vmalloc);
2981 bug = 1;
2982 }
2983
2984 WARN_ON(bug); /* leaks are not worth crashing the system */
2985}
2986
2987module_init(dm_bufio_init)
2988module_exit(dm_bufio_exit)
2989
2990module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2991MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2992
2993module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2994MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2995
2996module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2997MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2998
2999module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
3000MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
3001
3002module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
3003MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
3004
3005module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444);
3006MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc");
3007
3008module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
3009MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
3010
3011module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
3012MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
3013
3014module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
3015MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
3016
3017MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
3018MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
3019MODULE_LICENSE("GPL");