Loading...
1/*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
13 */
14
15#define KMSG_COMPONENT "zram"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/bio.h>
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/vmalloc.h>
34#include <linux/err.h>
35
36#include "zram_drv.h"
37
38/* Globals */
39static int zram_major;
40static struct zram *zram_devices;
41static const char *default_compressor = "lzo";
42
43/* Module params (documentation at end) */
44static unsigned int num_devices = 1;
45
46#define ZRAM_ATTR_RO(name) \
47static ssize_t zram_attr_##name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
49{ \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
53} \
54static struct device_attribute dev_attr_##name = \
55 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
56
57static inline int init_done(struct zram *zram)
58{
59 return zram->meta != NULL;
60}
61
62static inline struct zram *dev_to_zram(struct device *dev)
63{
64 return (struct zram *)dev_to_disk(dev)->private_data;
65}
66
67static ssize_t disksize_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct zram *zram = dev_to_zram(dev);
71
72 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73}
74
75static ssize_t initstate_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
77{
78 u32 val;
79 struct zram *zram = dev_to_zram(dev);
80
81 down_read(&zram->init_lock);
82 val = init_done(zram);
83 up_read(&zram->init_lock);
84
85 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86}
87
88static ssize_t orig_data_size_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90{
91 struct zram *zram = dev_to_zram(dev);
92
93 return scnprintf(buf, PAGE_SIZE, "%llu\n",
94 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95}
96
97static ssize_t mem_used_total_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
99{
100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103
104 down_read(&zram->init_lock);
105 if (init_done(zram))
106 val = zs_get_total_size_bytes(meta->mem_pool);
107 up_read(&zram->init_lock);
108
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
110}
111
112static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123}
124
125static ssize_t max_comp_streams_store(struct device *dev,
126 struct device_attribute *attr, const char *buf, size_t len)
127{
128 int num;
129 struct zram *zram = dev_to_zram(dev);
130 int ret;
131
132 ret = kstrtoint(buf, 0, &num);
133 if (ret < 0)
134 return ret;
135 if (num < 1)
136 return -EINVAL;
137
138 down_write(&zram->init_lock);
139 if (init_done(zram)) {
140 if (!zcomp_set_max_streams(zram->comp, num)) {
141 pr_info("Cannot change max compression streams\n");
142 ret = -EINVAL;
143 goto out;
144 }
145 }
146
147 zram->max_comp_streams = num;
148 ret = len;
149out:
150 up_write(&zram->init_lock);
151 return ret;
152}
153
154static ssize_t comp_algorithm_show(struct device *dev,
155 struct device_attribute *attr, char *buf)
156{
157 size_t sz;
158 struct zram *zram = dev_to_zram(dev);
159
160 down_read(&zram->init_lock);
161 sz = zcomp_available_show(zram->compressor, buf);
162 up_read(&zram->init_lock);
163
164 return sz;
165}
166
167static ssize_t comp_algorithm_store(struct device *dev,
168 struct device_attribute *attr, const char *buf, size_t len)
169{
170 struct zram *zram = dev_to_zram(dev);
171 down_write(&zram->init_lock);
172 if (init_done(zram)) {
173 up_write(&zram->init_lock);
174 pr_info("Can't change algorithm for initialized device\n");
175 return -EBUSY;
176 }
177 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
178 up_write(&zram->init_lock);
179 return len;
180}
181
182/* flag operations needs meta->tb_lock */
183static int zram_test_flag(struct zram_meta *meta, u32 index,
184 enum zram_pageflags flag)
185{
186 return meta->table[index].flags & BIT(flag);
187}
188
189static void zram_set_flag(struct zram_meta *meta, u32 index,
190 enum zram_pageflags flag)
191{
192 meta->table[index].flags |= BIT(flag);
193}
194
195static void zram_clear_flag(struct zram_meta *meta, u32 index,
196 enum zram_pageflags flag)
197{
198 meta->table[index].flags &= ~BIT(flag);
199}
200
201static inline int is_partial_io(struct bio_vec *bvec)
202{
203 return bvec->bv_len != PAGE_SIZE;
204}
205
206/*
207 * Check if request is within bounds and aligned on zram logical blocks.
208 */
209static inline int valid_io_request(struct zram *zram, struct bio *bio)
210{
211 u64 start, end, bound;
212
213 /* unaligned request */
214 if (unlikely(bio->bi_iter.bi_sector &
215 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
216 return 0;
217 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
218 return 0;
219
220 start = bio->bi_iter.bi_sector;
221 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
222 bound = zram->disksize >> SECTOR_SHIFT;
223 /* out of range range */
224 if (unlikely(start >= bound || end > bound || start > end))
225 return 0;
226
227 /* I/O request is valid */
228 return 1;
229}
230
231static void zram_meta_free(struct zram_meta *meta)
232{
233 zs_destroy_pool(meta->mem_pool);
234 vfree(meta->table);
235 kfree(meta);
236}
237
238static struct zram_meta *zram_meta_alloc(u64 disksize)
239{
240 size_t num_pages;
241 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
242 if (!meta)
243 goto out;
244
245 num_pages = disksize >> PAGE_SHIFT;
246 meta->table = vzalloc(num_pages * sizeof(*meta->table));
247 if (!meta->table) {
248 pr_err("Error allocating zram address table\n");
249 goto free_meta;
250 }
251
252 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
253 if (!meta->mem_pool) {
254 pr_err("Error creating memory pool\n");
255 goto free_table;
256 }
257
258 rwlock_init(&meta->tb_lock);
259 return meta;
260
261free_table:
262 vfree(meta->table);
263free_meta:
264 kfree(meta);
265 meta = NULL;
266out:
267 return meta;
268}
269
270static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
271{
272 if (*offset + bvec->bv_len >= PAGE_SIZE)
273 (*index)++;
274 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
275}
276
277static int page_zero_filled(void *ptr)
278{
279 unsigned int pos;
280 unsigned long *page;
281
282 page = (unsigned long *)ptr;
283
284 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
285 if (page[pos])
286 return 0;
287 }
288
289 return 1;
290}
291
292static void handle_zero_page(struct bio_vec *bvec)
293{
294 struct page *page = bvec->bv_page;
295 void *user_mem;
296
297 user_mem = kmap_atomic(page);
298 if (is_partial_io(bvec))
299 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
300 else
301 clear_page(user_mem);
302 kunmap_atomic(user_mem);
303
304 flush_dcache_page(page);
305}
306
307/* NOTE: caller should hold meta->tb_lock with write-side */
308static void zram_free_page(struct zram *zram, size_t index)
309{
310 struct zram_meta *meta = zram->meta;
311 unsigned long handle = meta->table[index].handle;
312
313 if (unlikely(!handle)) {
314 /*
315 * No memory is allocated for zero filled pages.
316 * Simply clear zero page flag.
317 */
318 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
319 zram_clear_flag(meta, index, ZRAM_ZERO);
320 atomic64_dec(&zram->stats.zero_pages);
321 }
322 return;
323 }
324
325 zs_free(meta->mem_pool, handle);
326
327 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
328 atomic64_dec(&zram->stats.pages_stored);
329
330 meta->table[index].handle = 0;
331 meta->table[index].size = 0;
332}
333
334static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
335{
336 int ret = 0;
337 unsigned char *cmem;
338 struct zram_meta *meta = zram->meta;
339 unsigned long handle;
340 u16 size;
341
342 read_lock(&meta->tb_lock);
343 handle = meta->table[index].handle;
344 size = meta->table[index].size;
345
346 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
347 read_unlock(&meta->tb_lock);
348 clear_page(mem);
349 return 0;
350 }
351
352 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
353 if (size == PAGE_SIZE)
354 copy_page(mem, cmem);
355 else
356 ret = zcomp_decompress(zram->comp, cmem, size, mem);
357 zs_unmap_object(meta->mem_pool, handle);
358 read_unlock(&meta->tb_lock);
359
360 /* Should NEVER happen. Return bio error if it does. */
361 if (unlikely(ret)) {
362 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
363 atomic64_inc(&zram->stats.failed_reads);
364 return ret;
365 }
366
367 return 0;
368}
369
370static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
371 u32 index, int offset, struct bio *bio)
372{
373 int ret;
374 struct page *page;
375 unsigned char *user_mem, *uncmem = NULL;
376 struct zram_meta *meta = zram->meta;
377 page = bvec->bv_page;
378
379 read_lock(&meta->tb_lock);
380 if (unlikely(!meta->table[index].handle) ||
381 zram_test_flag(meta, index, ZRAM_ZERO)) {
382 read_unlock(&meta->tb_lock);
383 handle_zero_page(bvec);
384 return 0;
385 }
386 read_unlock(&meta->tb_lock);
387
388 if (is_partial_io(bvec))
389 /* Use a temporary buffer to decompress the page */
390 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
391
392 user_mem = kmap_atomic(page);
393 if (!is_partial_io(bvec))
394 uncmem = user_mem;
395
396 if (!uncmem) {
397 pr_info("Unable to allocate temp memory\n");
398 ret = -ENOMEM;
399 goto out_cleanup;
400 }
401
402 ret = zram_decompress_page(zram, uncmem, index);
403 /* Should NEVER happen. Return bio error if it does. */
404 if (unlikely(ret))
405 goto out_cleanup;
406
407 if (is_partial_io(bvec))
408 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
409 bvec->bv_len);
410
411 flush_dcache_page(page);
412 ret = 0;
413out_cleanup:
414 kunmap_atomic(user_mem);
415 if (is_partial_io(bvec))
416 kfree(uncmem);
417 return ret;
418}
419
420static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
421 int offset)
422{
423 int ret = 0;
424 size_t clen;
425 unsigned long handle;
426 struct page *page;
427 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
428 struct zram_meta *meta = zram->meta;
429 struct zcomp_strm *zstrm;
430 bool locked = false;
431
432 page = bvec->bv_page;
433 if (is_partial_io(bvec)) {
434 /*
435 * This is a partial IO. We need to read the full page
436 * before to write the changes.
437 */
438 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
439 if (!uncmem) {
440 ret = -ENOMEM;
441 goto out;
442 }
443 ret = zram_decompress_page(zram, uncmem, index);
444 if (ret)
445 goto out;
446 }
447
448 zstrm = zcomp_strm_find(zram->comp);
449 locked = true;
450 user_mem = kmap_atomic(page);
451
452 if (is_partial_io(bvec)) {
453 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
454 bvec->bv_len);
455 kunmap_atomic(user_mem);
456 user_mem = NULL;
457 } else {
458 uncmem = user_mem;
459 }
460
461 if (page_zero_filled(uncmem)) {
462 kunmap_atomic(user_mem);
463 /* Free memory associated with this sector now. */
464 write_lock(&zram->meta->tb_lock);
465 zram_free_page(zram, index);
466 zram_set_flag(meta, index, ZRAM_ZERO);
467 write_unlock(&zram->meta->tb_lock);
468
469 atomic64_inc(&zram->stats.zero_pages);
470 ret = 0;
471 goto out;
472 }
473
474 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
475 if (!is_partial_io(bvec)) {
476 kunmap_atomic(user_mem);
477 user_mem = NULL;
478 uncmem = NULL;
479 }
480
481 if (unlikely(ret)) {
482 pr_err("Compression failed! err=%d\n", ret);
483 goto out;
484 }
485 src = zstrm->buffer;
486 if (unlikely(clen > max_zpage_size)) {
487 clen = PAGE_SIZE;
488 if (is_partial_io(bvec))
489 src = uncmem;
490 }
491
492 handle = zs_malloc(meta->mem_pool, clen);
493 if (!handle) {
494 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
495 index, clen);
496 ret = -ENOMEM;
497 goto out;
498 }
499 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
500
501 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
502 src = kmap_atomic(page);
503 copy_page(cmem, src);
504 kunmap_atomic(src);
505 } else {
506 memcpy(cmem, src, clen);
507 }
508
509 zcomp_strm_release(zram->comp, zstrm);
510 locked = false;
511 zs_unmap_object(meta->mem_pool, handle);
512
513 /*
514 * Free memory associated with this sector
515 * before overwriting unused sectors.
516 */
517 write_lock(&zram->meta->tb_lock);
518 zram_free_page(zram, index);
519
520 meta->table[index].handle = handle;
521 meta->table[index].size = clen;
522 write_unlock(&zram->meta->tb_lock);
523
524 /* Update stats */
525 atomic64_add(clen, &zram->stats.compr_data_size);
526 atomic64_inc(&zram->stats.pages_stored);
527out:
528 if (locked)
529 zcomp_strm_release(zram->comp, zstrm);
530 if (is_partial_io(bvec))
531 kfree(uncmem);
532 if (ret)
533 atomic64_inc(&zram->stats.failed_writes);
534 return ret;
535}
536
537static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
538 int offset, struct bio *bio)
539{
540 int ret;
541 int rw = bio_data_dir(bio);
542
543 if (rw == READ) {
544 atomic64_inc(&zram->stats.num_reads);
545 ret = zram_bvec_read(zram, bvec, index, offset, bio);
546 } else {
547 atomic64_inc(&zram->stats.num_writes);
548 ret = zram_bvec_write(zram, bvec, index, offset);
549 }
550
551 return ret;
552}
553
554/*
555 * zram_bio_discard - handler on discard request
556 * @index: physical block index in PAGE_SIZE units
557 * @offset: byte offset within physical block
558 */
559static void zram_bio_discard(struct zram *zram, u32 index,
560 int offset, struct bio *bio)
561{
562 size_t n = bio->bi_iter.bi_size;
563
564 /*
565 * zram manages data in physical block size units. Because logical block
566 * size isn't identical with physical block size on some arch, we
567 * could get a discard request pointing to a specific offset within a
568 * certain physical block. Although we can handle this request by
569 * reading that physiclal block and decompressing and partially zeroing
570 * and re-compressing and then re-storing it, this isn't reasonable
571 * because our intent with a discard request is to save memory. So
572 * skipping this logical block is appropriate here.
573 */
574 if (offset) {
575 if (n < offset)
576 return;
577
578 n -= offset;
579 index++;
580 }
581
582 while (n >= PAGE_SIZE) {
583 /*
584 * Discard request can be large so the lock hold times could be
585 * lengthy. So take the lock once per page.
586 */
587 write_lock(&zram->meta->tb_lock);
588 zram_free_page(zram, index);
589 write_unlock(&zram->meta->tb_lock);
590 index++;
591 n -= PAGE_SIZE;
592 }
593}
594
595static void zram_reset_device(struct zram *zram, bool reset_capacity)
596{
597 size_t index;
598 struct zram_meta *meta;
599
600 down_write(&zram->init_lock);
601 if (!init_done(zram)) {
602 up_write(&zram->init_lock);
603 return;
604 }
605
606 meta = zram->meta;
607 /* Free all pages that are still in this zram device */
608 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
609 unsigned long handle = meta->table[index].handle;
610 if (!handle)
611 continue;
612
613 zs_free(meta->mem_pool, handle);
614 }
615
616 zcomp_destroy(zram->comp);
617 zram->max_comp_streams = 1;
618
619 zram_meta_free(zram->meta);
620 zram->meta = NULL;
621 /* Reset stats */
622 memset(&zram->stats, 0, sizeof(zram->stats));
623
624 zram->disksize = 0;
625 if (reset_capacity)
626 set_capacity(zram->disk, 0);
627 up_write(&zram->init_lock);
628}
629
630static ssize_t disksize_store(struct device *dev,
631 struct device_attribute *attr, const char *buf, size_t len)
632{
633 u64 disksize;
634 struct zcomp *comp;
635 struct zram_meta *meta;
636 struct zram *zram = dev_to_zram(dev);
637 int err;
638
639 disksize = memparse(buf, NULL);
640 if (!disksize)
641 return -EINVAL;
642
643 disksize = PAGE_ALIGN(disksize);
644 meta = zram_meta_alloc(disksize);
645 if (!meta)
646 return -ENOMEM;
647
648 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
649 if (IS_ERR(comp)) {
650 pr_info("Cannot initialise %s compressing backend\n",
651 zram->compressor);
652 err = PTR_ERR(comp);
653 goto out_free_meta;
654 }
655
656 down_write(&zram->init_lock);
657 if (init_done(zram)) {
658 pr_info("Cannot change disksize for initialized device\n");
659 err = -EBUSY;
660 goto out_destroy_comp;
661 }
662
663 zram->meta = meta;
664 zram->comp = comp;
665 zram->disksize = disksize;
666 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
667 up_write(&zram->init_lock);
668 return len;
669
670out_destroy_comp:
671 up_write(&zram->init_lock);
672 zcomp_destroy(comp);
673out_free_meta:
674 zram_meta_free(meta);
675 return err;
676}
677
678static ssize_t reset_store(struct device *dev,
679 struct device_attribute *attr, const char *buf, size_t len)
680{
681 int ret;
682 unsigned short do_reset;
683 struct zram *zram;
684 struct block_device *bdev;
685
686 zram = dev_to_zram(dev);
687 bdev = bdget_disk(zram->disk, 0);
688
689 if (!bdev)
690 return -ENOMEM;
691
692 /* Do not reset an active device! */
693 if (bdev->bd_holders) {
694 ret = -EBUSY;
695 goto out;
696 }
697
698 ret = kstrtou16(buf, 10, &do_reset);
699 if (ret)
700 goto out;
701
702 if (!do_reset) {
703 ret = -EINVAL;
704 goto out;
705 }
706
707 /* Make sure all pending I/O is finished */
708 fsync_bdev(bdev);
709 bdput(bdev);
710
711 zram_reset_device(zram, true);
712 return len;
713
714out:
715 bdput(bdev);
716 return ret;
717}
718
719static void __zram_make_request(struct zram *zram, struct bio *bio)
720{
721 int offset;
722 u32 index;
723 struct bio_vec bvec;
724 struct bvec_iter iter;
725
726 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
727 offset = (bio->bi_iter.bi_sector &
728 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
729
730 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
731 zram_bio_discard(zram, index, offset, bio);
732 bio_endio(bio, 0);
733 return;
734 }
735
736 bio_for_each_segment(bvec, bio, iter) {
737 int max_transfer_size = PAGE_SIZE - offset;
738
739 if (bvec.bv_len > max_transfer_size) {
740 /*
741 * zram_bvec_rw() can only make operation on a single
742 * zram page. Split the bio vector.
743 */
744 struct bio_vec bv;
745
746 bv.bv_page = bvec.bv_page;
747 bv.bv_len = max_transfer_size;
748 bv.bv_offset = bvec.bv_offset;
749
750 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
751 goto out;
752
753 bv.bv_len = bvec.bv_len - max_transfer_size;
754 bv.bv_offset += max_transfer_size;
755 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
756 goto out;
757 } else
758 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
759 goto out;
760
761 update_position(&index, &offset, &bvec);
762 }
763
764 set_bit(BIO_UPTODATE, &bio->bi_flags);
765 bio_endio(bio, 0);
766 return;
767
768out:
769 bio_io_error(bio);
770}
771
772/*
773 * Handler function for all zram I/O requests.
774 */
775static void zram_make_request(struct request_queue *queue, struct bio *bio)
776{
777 struct zram *zram = queue->queuedata;
778
779 down_read(&zram->init_lock);
780 if (unlikely(!init_done(zram)))
781 goto error;
782
783 if (!valid_io_request(zram, bio)) {
784 atomic64_inc(&zram->stats.invalid_io);
785 goto error;
786 }
787
788 __zram_make_request(zram, bio);
789 up_read(&zram->init_lock);
790
791 return;
792
793error:
794 up_read(&zram->init_lock);
795 bio_io_error(bio);
796}
797
798static void zram_slot_free_notify(struct block_device *bdev,
799 unsigned long index)
800{
801 struct zram *zram;
802 struct zram_meta *meta;
803
804 zram = bdev->bd_disk->private_data;
805 meta = zram->meta;
806
807 write_lock(&meta->tb_lock);
808 zram_free_page(zram, index);
809 write_unlock(&meta->tb_lock);
810 atomic64_inc(&zram->stats.notify_free);
811}
812
813static const struct block_device_operations zram_devops = {
814 .swap_slot_free_notify = zram_slot_free_notify,
815 .owner = THIS_MODULE
816};
817
818static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
819 disksize_show, disksize_store);
820static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
821static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
822static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
823static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
824static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
825 max_comp_streams_show, max_comp_streams_store);
826static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
827 comp_algorithm_show, comp_algorithm_store);
828
829ZRAM_ATTR_RO(num_reads);
830ZRAM_ATTR_RO(num_writes);
831ZRAM_ATTR_RO(failed_reads);
832ZRAM_ATTR_RO(failed_writes);
833ZRAM_ATTR_RO(invalid_io);
834ZRAM_ATTR_RO(notify_free);
835ZRAM_ATTR_RO(zero_pages);
836ZRAM_ATTR_RO(compr_data_size);
837
838static struct attribute *zram_disk_attrs[] = {
839 &dev_attr_disksize.attr,
840 &dev_attr_initstate.attr,
841 &dev_attr_reset.attr,
842 &dev_attr_num_reads.attr,
843 &dev_attr_num_writes.attr,
844 &dev_attr_failed_reads.attr,
845 &dev_attr_failed_writes.attr,
846 &dev_attr_invalid_io.attr,
847 &dev_attr_notify_free.attr,
848 &dev_attr_zero_pages.attr,
849 &dev_attr_orig_data_size.attr,
850 &dev_attr_compr_data_size.attr,
851 &dev_attr_mem_used_total.attr,
852 &dev_attr_max_comp_streams.attr,
853 &dev_attr_comp_algorithm.attr,
854 NULL,
855};
856
857static struct attribute_group zram_disk_attr_group = {
858 .attrs = zram_disk_attrs,
859};
860
861static int create_device(struct zram *zram, int device_id)
862{
863 int ret = -ENOMEM;
864
865 init_rwsem(&zram->init_lock);
866
867 zram->queue = blk_alloc_queue(GFP_KERNEL);
868 if (!zram->queue) {
869 pr_err("Error allocating disk queue for device %d\n",
870 device_id);
871 goto out;
872 }
873
874 blk_queue_make_request(zram->queue, zram_make_request);
875 zram->queue->queuedata = zram;
876
877 /* gendisk structure */
878 zram->disk = alloc_disk(1);
879 if (!zram->disk) {
880 pr_warn("Error allocating disk structure for device %d\n",
881 device_id);
882 goto out_free_queue;
883 }
884
885 zram->disk->major = zram_major;
886 zram->disk->first_minor = device_id;
887 zram->disk->fops = &zram_devops;
888 zram->disk->queue = zram->queue;
889 zram->disk->private_data = zram;
890 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
891
892 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
893 set_capacity(zram->disk, 0);
894 /* zram devices sort of resembles non-rotational disks */
895 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
896 /*
897 * To ensure that we always get PAGE_SIZE aligned
898 * and n*PAGE_SIZED sized I/O requests.
899 */
900 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
901 blk_queue_logical_block_size(zram->disk->queue,
902 ZRAM_LOGICAL_BLOCK_SIZE);
903 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
904 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
905 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
906 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
907 /*
908 * zram_bio_discard() will clear all logical blocks if logical block
909 * size is identical with physical block size(PAGE_SIZE). But if it is
910 * different, we will skip discarding some parts of logical blocks in
911 * the part of the request range which isn't aligned to physical block
912 * size. So we can't ensure that all discarded logical blocks are
913 * zeroed.
914 */
915 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
916 zram->disk->queue->limits.discard_zeroes_data = 1;
917 else
918 zram->disk->queue->limits.discard_zeroes_data = 0;
919 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
920
921 add_disk(zram->disk);
922
923 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
924 &zram_disk_attr_group);
925 if (ret < 0) {
926 pr_warn("Error creating sysfs group");
927 goto out_free_disk;
928 }
929 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
930 zram->meta = NULL;
931 zram->max_comp_streams = 1;
932 return 0;
933
934out_free_disk:
935 del_gendisk(zram->disk);
936 put_disk(zram->disk);
937out_free_queue:
938 blk_cleanup_queue(zram->queue);
939out:
940 return ret;
941}
942
943static void destroy_device(struct zram *zram)
944{
945 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
946 &zram_disk_attr_group);
947
948 del_gendisk(zram->disk);
949 put_disk(zram->disk);
950
951 blk_cleanup_queue(zram->queue);
952}
953
954static int __init zram_init(void)
955{
956 int ret, dev_id;
957
958 if (num_devices > max_num_devices) {
959 pr_warn("Invalid value for num_devices: %u\n",
960 num_devices);
961 ret = -EINVAL;
962 goto out;
963 }
964
965 zram_major = register_blkdev(0, "zram");
966 if (zram_major <= 0) {
967 pr_warn("Unable to get major number\n");
968 ret = -EBUSY;
969 goto out;
970 }
971
972 /* Allocate the device array and initialize each one */
973 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
974 if (!zram_devices) {
975 ret = -ENOMEM;
976 goto unregister;
977 }
978
979 for (dev_id = 0; dev_id < num_devices; dev_id++) {
980 ret = create_device(&zram_devices[dev_id], dev_id);
981 if (ret)
982 goto free_devices;
983 }
984
985 pr_info("Created %u device(s) ...\n", num_devices);
986
987 return 0;
988
989free_devices:
990 while (dev_id)
991 destroy_device(&zram_devices[--dev_id]);
992 kfree(zram_devices);
993unregister:
994 unregister_blkdev(zram_major, "zram");
995out:
996 return ret;
997}
998
999static void __exit zram_exit(void)
1000{
1001 int i;
1002 struct zram *zram;
1003
1004 for (i = 0; i < num_devices; i++) {
1005 zram = &zram_devices[i];
1006
1007 destroy_device(zram);
1008 /*
1009 * Shouldn't access zram->disk after destroy_device
1010 * because destroy_device already released zram->disk.
1011 */
1012 zram_reset_device(zram, false);
1013 }
1014
1015 unregister_blkdev(zram_major, "zram");
1016
1017 kfree(zram_devices);
1018 pr_debug("Cleanup done!\n");
1019}
1020
1021module_init(zram_init);
1022module_exit(zram_exit);
1023
1024module_param(num_devices, uint, 0);
1025MODULE_PARM_DESC(num_devices, "Number of zram devices");
1026
1027MODULE_LICENSE("Dual BSD/GPL");
1028MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1029MODULE_DESCRIPTION("Compressed RAM Block Device");
1/*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
13 */
14
15#define KMSG_COMPONENT "zram"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/highmem.h>
26#include <linux/slab.h>
27#include <linux/backing-dev.h>
28#include <linux/string.h>
29#include <linux/vmalloc.h>
30#include <linux/err.h>
31#include <linux/idr.h>
32#include <linux/sysfs.h>
33#include <linux/debugfs.h>
34#include <linux/cpuhotplug.h>
35#include <linux/part_stat.h>
36#include <linux/kernel_read_file.h>
37
38#include "zram_drv.h"
39
40static DEFINE_IDR(zram_index_idr);
41/* idr index must be protected */
42static DEFINE_MUTEX(zram_index_mutex);
43
44static int zram_major;
45static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
46
47/* Module params (documentation at end) */
48static unsigned int num_devices = 1;
49/*
50 * Pages that compress to sizes equals or greater than this are stored
51 * uncompressed in memory.
52 */
53static size_t huge_class_size;
54
55static const struct block_device_operations zram_devops;
56
57static void zram_free_page(struct zram *zram, size_t index);
58static int zram_read_page(struct zram *zram, struct page *page, u32 index,
59 struct bio *parent);
60
61static int zram_slot_trylock(struct zram *zram, u32 index)
62{
63 return spin_trylock(&zram->table[index].lock);
64}
65
66static void zram_slot_lock(struct zram *zram, u32 index)
67{
68 spin_lock(&zram->table[index].lock);
69}
70
71static void zram_slot_unlock(struct zram *zram, u32 index)
72{
73 spin_unlock(&zram->table[index].lock);
74}
75
76static inline bool init_done(struct zram *zram)
77{
78 return zram->disksize;
79}
80
81static inline struct zram *dev_to_zram(struct device *dev)
82{
83 return (struct zram *)dev_to_disk(dev)->private_data;
84}
85
86static unsigned long zram_get_handle(struct zram *zram, u32 index)
87{
88 return zram->table[index].handle;
89}
90
91static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
92{
93 zram->table[index].handle = handle;
94}
95
96/* flag operations require table entry bit_spin_lock() being held */
97static bool zram_test_flag(struct zram *zram, u32 index,
98 enum zram_pageflags flag)
99{
100 return zram->table[index].flags & BIT(flag);
101}
102
103static void zram_set_flag(struct zram *zram, u32 index,
104 enum zram_pageflags flag)
105{
106 zram->table[index].flags |= BIT(flag);
107}
108
109static void zram_clear_flag(struct zram *zram, u32 index,
110 enum zram_pageflags flag)
111{
112 zram->table[index].flags &= ~BIT(flag);
113}
114
115static inline void zram_set_element(struct zram *zram, u32 index,
116 unsigned long element)
117{
118 zram->table[index].element = element;
119}
120
121static unsigned long zram_get_element(struct zram *zram, u32 index)
122{
123 return zram->table[index].element;
124}
125
126static size_t zram_get_obj_size(struct zram *zram, u32 index)
127{
128 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
129}
130
131static void zram_set_obj_size(struct zram *zram,
132 u32 index, size_t size)
133{
134 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
135
136 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
137}
138
139static inline bool zram_allocated(struct zram *zram, u32 index)
140{
141 return zram_get_obj_size(zram, index) ||
142 zram_test_flag(zram, index, ZRAM_SAME) ||
143 zram_test_flag(zram, index, ZRAM_WB);
144}
145
146#if PAGE_SIZE != 4096
147static inline bool is_partial_io(struct bio_vec *bvec)
148{
149 return bvec->bv_len != PAGE_SIZE;
150}
151#define ZRAM_PARTIAL_IO 1
152#else
153static inline bool is_partial_io(struct bio_vec *bvec)
154{
155 return false;
156}
157#endif
158
159static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
160{
161 prio &= ZRAM_COMP_PRIORITY_MASK;
162 /*
163 * Clear previous priority value first, in case if we recompress
164 * further an already recompressed page
165 */
166 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
167 ZRAM_COMP_PRIORITY_BIT1);
168 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
169}
170
171static inline u32 zram_get_priority(struct zram *zram, u32 index)
172{
173 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
174
175 return prio & ZRAM_COMP_PRIORITY_MASK;
176}
177
178static void zram_accessed(struct zram *zram, u32 index)
179{
180 zram_clear_flag(zram, index, ZRAM_IDLE);
181 zram_clear_flag(zram, index, ZRAM_PP_SLOT);
182#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
183 zram->table[index].ac_time = ktime_get_boottime();
184#endif
185}
186
187#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
188struct zram_pp_slot {
189 unsigned long index;
190 struct list_head entry;
191};
192
193/*
194 * A post-processing bucket is, essentially, a size class, this defines
195 * the range (in bytes) of pp-slots sizes in particular bucket.
196 */
197#define PP_BUCKET_SIZE_RANGE 64
198#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
199
200struct zram_pp_ctl {
201 struct list_head pp_buckets[NUM_PP_BUCKETS];
202};
203
204static struct zram_pp_ctl *init_pp_ctl(void)
205{
206 struct zram_pp_ctl *ctl;
207 u32 idx;
208
209 ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
210 if (!ctl)
211 return NULL;
212
213 for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
214 INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
215 return ctl;
216}
217
218static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
219{
220 list_del_init(&pps->entry);
221
222 zram_slot_lock(zram, pps->index);
223 zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
224 zram_slot_unlock(zram, pps->index);
225
226 kfree(pps);
227}
228
229static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
230{
231 u32 idx;
232
233 if (!ctl)
234 return;
235
236 for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
237 while (!list_empty(&ctl->pp_buckets[idx])) {
238 struct zram_pp_slot *pps;
239
240 pps = list_first_entry(&ctl->pp_buckets[idx],
241 struct zram_pp_slot,
242 entry);
243 release_pp_slot(zram, pps);
244 }
245 }
246
247 kfree(ctl);
248}
249
250static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
251 struct zram_pp_slot *pps)
252{
253 u32 idx;
254
255 idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
256 list_add(&pps->entry, &ctl->pp_buckets[idx]);
257
258 zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
259}
260
261static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
262{
263 struct zram_pp_slot *pps = NULL;
264 s32 idx = NUM_PP_BUCKETS - 1;
265
266 /* The higher the bucket id the more optimal slot post-processing is */
267 while (idx >= 0) {
268 pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
269 struct zram_pp_slot,
270 entry);
271 if (pps)
272 break;
273
274 idx--;
275 }
276 return pps;
277}
278#endif
279
280static inline void update_used_max(struct zram *zram,
281 const unsigned long pages)
282{
283 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
284
285 do {
286 if (cur_max >= pages)
287 return;
288 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
289 &cur_max, pages));
290}
291
292static inline void zram_fill_page(void *ptr, unsigned long len,
293 unsigned long value)
294{
295 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
296 memset_l(ptr, value, len / sizeof(unsigned long));
297}
298
299static bool page_same_filled(void *ptr, unsigned long *element)
300{
301 unsigned long *page;
302 unsigned long val;
303 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
304
305 page = (unsigned long *)ptr;
306 val = page[0];
307
308 if (val != page[last_pos])
309 return false;
310
311 for (pos = 1; pos < last_pos; pos++) {
312 if (val != page[pos])
313 return false;
314 }
315
316 *element = val;
317
318 return true;
319}
320
321static ssize_t initstate_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 u32 val;
325 struct zram *zram = dev_to_zram(dev);
326
327 down_read(&zram->init_lock);
328 val = init_done(zram);
329 up_read(&zram->init_lock);
330
331 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
332}
333
334static ssize_t disksize_show(struct device *dev,
335 struct device_attribute *attr, char *buf)
336{
337 struct zram *zram = dev_to_zram(dev);
338
339 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
340}
341
342static ssize_t mem_limit_store(struct device *dev,
343 struct device_attribute *attr, const char *buf, size_t len)
344{
345 u64 limit;
346 char *tmp;
347 struct zram *zram = dev_to_zram(dev);
348
349 limit = memparse(buf, &tmp);
350 if (buf == tmp) /* no chars parsed, invalid input */
351 return -EINVAL;
352
353 down_write(&zram->init_lock);
354 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
355 up_write(&zram->init_lock);
356
357 return len;
358}
359
360static ssize_t mem_used_max_store(struct device *dev,
361 struct device_attribute *attr, const char *buf, size_t len)
362{
363 int err;
364 unsigned long val;
365 struct zram *zram = dev_to_zram(dev);
366
367 err = kstrtoul(buf, 10, &val);
368 if (err || val != 0)
369 return -EINVAL;
370
371 down_read(&zram->init_lock);
372 if (init_done(zram)) {
373 atomic_long_set(&zram->stats.max_used_pages,
374 zs_get_total_pages(zram->mem_pool));
375 }
376 up_read(&zram->init_lock);
377
378 return len;
379}
380
381/*
382 * Mark all pages which are older than or equal to cutoff as IDLE.
383 * Callers should hold the zram init lock in read mode
384 */
385static void mark_idle(struct zram *zram, ktime_t cutoff)
386{
387 int is_idle = 1;
388 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
389 int index;
390
391 for (index = 0; index < nr_pages; index++) {
392 /*
393 * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
394 * post-processing (recompress, writeback) happens to the
395 * ZRAM_SAME slot.
396 *
397 * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
398 */
399 zram_slot_lock(zram, index);
400 if (!zram_allocated(zram, index) ||
401 zram_test_flag(zram, index, ZRAM_WB) ||
402 zram_test_flag(zram, index, ZRAM_SAME)) {
403 zram_slot_unlock(zram, index);
404 continue;
405 }
406
407#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
408 is_idle = !cutoff ||
409 ktime_after(cutoff, zram->table[index].ac_time);
410#endif
411 if (is_idle)
412 zram_set_flag(zram, index, ZRAM_IDLE);
413 else
414 zram_clear_flag(zram, index, ZRAM_IDLE);
415 zram_slot_unlock(zram, index);
416 }
417}
418
419static ssize_t idle_store(struct device *dev,
420 struct device_attribute *attr, const char *buf, size_t len)
421{
422 struct zram *zram = dev_to_zram(dev);
423 ktime_t cutoff_time = 0;
424 ssize_t rv = -EINVAL;
425
426 if (!sysfs_streq(buf, "all")) {
427 /*
428 * If it did not parse as 'all' try to treat it as an integer
429 * when we have memory tracking enabled.
430 */
431 u64 age_sec;
432
433 if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
434 cutoff_time = ktime_sub(ktime_get_boottime(),
435 ns_to_ktime(age_sec * NSEC_PER_SEC));
436 else
437 goto out;
438 }
439
440 down_read(&zram->init_lock);
441 if (!init_done(zram))
442 goto out_unlock;
443
444 /*
445 * A cutoff_time of 0 marks everything as idle, this is the
446 * "all" behavior.
447 */
448 mark_idle(zram, cutoff_time);
449 rv = len;
450
451out_unlock:
452 up_read(&zram->init_lock);
453out:
454 return rv;
455}
456
457#ifdef CONFIG_ZRAM_WRITEBACK
458static ssize_t writeback_limit_enable_store(struct device *dev,
459 struct device_attribute *attr, const char *buf, size_t len)
460{
461 struct zram *zram = dev_to_zram(dev);
462 u64 val;
463 ssize_t ret = -EINVAL;
464
465 if (kstrtoull(buf, 10, &val))
466 return ret;
467
468 down_read(&zram->init_lock);
469 spin_lock(&zram->wb_limit_lock);
470 zram->wb_limit_enable = val;
471 spin_unlock(&zram->wb_limit_lock);
472 up_read(&zram->init_lock);
473 ret = len;
474
475 return ret;
476}
477
478static ssize_t writeback_limit_enable_show(struct device *dev,
479 struct device_attribute *attr, char *buf)
480{
481 bool val;
482 struct zram *zram = dev_to_zram(dev);
483
484 down_read(&zram->init_lock);
485 spin_lock(&zram->wb_limit_lock);
486 val = zram->wb_limit_enable;
487 spin_unlock(&zram->wb_limit_lock);
488 up_read(&zram->init_lock);
489
490 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
491}
492
493static ssize_t writeback_limit_store(struct device *dev,
494 struct device_attribute *attr, const char *buf, size_t len)
495{
496 struct zram *zram = dev_to_zram(dev);
497 u64 val;
498 ssize_t ret = -EINVAL;
499
500 if (kstrtoull(buf, 10, &val))
501 return ret;
502
503 down_read(&zram->init_lock);
504 spin_lock(&zram->wb_limit_lock);
505 zram->bd_wb_limit = val;
506 spin_unlock(&zram->wb_limit_lock);
507 up_read(&zram->init_lock);
508 ret = len;
509
510 return ret;
511}
512
513static ssize_t writeback_limit_show(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 u64 val;
517 struct zram *zram = dev_to_zram(dev);
518
519 down_read(&zram->init_lock);
520 spin_lock(&zram->wb_limit_lock);
521 val = zram->bd_wb_limit;
522 spin_unlock(&zram->wb_limit_lock);
523 up_read(&zram->init_lock);
524
525 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
526}
527
528static void reset_bdev(struct zram *zram)
529{
530 if (!zram->backing_dev)
531 return;
532
533 /* hope filp_close flush all of IO */
534 filp_close(zram->backing_dev, NULL);
535 zram->backing_dev = NULL;
536 zram->bdev = NULL;
537 zram->disk->fops = &zram_devops;
538 kvfree(zram->bitmap);
539 zram->bitmap = NULL;
540}
541
542static ssize_t backing_dev_show(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 struct file *file;
546 struct zram *zram = dev_to_zram(dev);
547 char *p;
548 ssize_t ret;
549
550 down_read(&zram->init_lock);
551 file = zram->backing_dev;
552 if (!file) {
553 memcpy(buf, "none\n", 5);
554 up_read(&zram->init_lock);
555 return 5;
556 }
557
558 p = file_path(file, buf, PAGE_SIZE - 1);
559 if (IS_ERR(p)) {
560 ret = PTR_ERR(p);
561 goto out;
562 }
563
564 ret = strlen(p);
565 memmove(buf, p, ret);
566 buf[ret++] = '\n';
567out:
568 up_read(&zram->init_lock);
569 return ret;
570}
571
572static ssize_t backing_dev_store(struct device *dev,
573 struct device_attribute *attr, const char *buf, size_t len)
574{
575 char *file_name;
576 size_t sz;
577 struct file *backing_dev = NULL;
578 struct inode *inode;
579 unsigned int bitmap_sz;
580 unsigned long nr_pages, *bitmap = NULL;
581 int err;
582 struct zram *zram = dev_to_zram(dev);
583
584 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
585 if (!file_name)
586 return -ENOMEM;
587
588 down_write(&zram->init_lock);
589 if (init_done(zram)) {
590 pr_info("Can't setup backing device for initialized device\n");
591 err = -EBUSY;
592 goto out;
593 }
594
595 strscpy(file_name, buf, PATH_MAX);
596 /* ignore trailing newline */
597 sz = strlen(file_name);
598 if (sz > 0 && file_name[sz - 1] == '\n')
599 file_name[sz - 1] = 0x00;
600
601 backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
602 if (IS_ERR(backing_dev)) {
603 err = PTR_ERR(backing_dev);
604 backing_dev = NULL;
605 goto out;
606 }
607
608 inode = backing_dev->f_mapping->host;
609
610 /* Support only block device in this moment */
611 if (!S_ISBLK(inode->i_mode)) {
612 err = -ENOTBLK;
613 goto out;
614 }
615
616 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
617 /* Refuse to use zero sized device (also prevents self reference) */
618 if (!nr_pages) {
619 err = -EINVAL;
620 goto out;
621 }
622
623 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
624 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
625 if (!bitmap) {
626 err = -ENOMEM;
627 goto out;
628 }
629
630 reset_bdev(zram);
631
632 zram->bdev = I_BDEV(inode);
633 zram->backing_dev = backing_dev;
634 zram->bitmap = bitmap;
635 zram->nr_pages = nr_pages;
636 up_write(&zram->init_lock);
637
638 pr_info("setup backing device %s\n", file_name);
639 kfree(file_name);
640
641 return len;
642out:
643 kvfree(bitmap);
644
645 if (backing_dev)
646 filp_close(backing_dev, NULL);
647
648 up_write(&zram->init_lock);
649
650 kfree(file_name);
651
652 return err;
653}
654
655static unsigned long alloc_block_bdev(struct zram *zram)
656{
657 unsigned long blk_idx = 1;
658retry:
659 /* skip 0 bit to confuse zram.handle = 0 */
660 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
661 if (blk_idx == zram->nr_pages)
662 return 0;
663
664 if (test_and_set_bit(blk_idx, zram->bitmap))
665 goto retry;
666
667 atomic64_inc(&zram->stats.bd_count);
668 return blk_idx;
669}
670
671static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
672{
673 int was_set;
674
675 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
676 WARN_ON_ONCE(!was_set);
677 atomic64_dec(&zram->stats.bd_count);
678}
679
680static void read_from_bdev_async(struct zram *zram, struct page *page,
681 unsigned long entry, struct bio *parent)
682{
683 struct bio *bio;
684
685 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
686 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
687 __bio_add_page(bio, page, PAGE_SIZE, 0);
688 bio_chain(bio, parent);
689 submit_bio(bio);
690}
691
692#define PAGE_WB_SIG "page_index="
693
694#define PAGE_WRITEBACK 0
695#define HUGE_WRITEBACK (1<<0)
696#define IDLE_WRITEBACK (1<<1)
697#define INCOMPRESSIBLE_WRITEBACK (1<<2)
698
699static int scan_slots_for_writeback(struct zram *zram, u32 mode,
700 unsigned long nr_pages,
701 unsigned long index,
702 struct zram_pp_ctl *ctl)
703{
704 struct zram_pp_slot *pps = NULL;
705
706 for (; nr_pages != 0; index++, nr_pages--) {
707 if (!pps)
708 pps = kmalloc(sizeof(*pps), GFP_KERNEL);
709 if (!pps)
710 return -ENOMEM;
711
712 INIT_LIST_HEAD(&pps->entry);
713
714 zram_slot_lock(zram, index);
715 if (!zram_allocated(zram, index))
716 goto next;
717
718 if (zram_test_flag(zram, index, ZRAM_WB) ||
719 zram_test_flag(zram, index, ZRAM_SAME))
720 goto next;
721
722 if (mode & IDLE_WRITEBACK &&
723 !zram_test_flag(zram, index, ZRAM_IDLE))
724 goto next;
725 if (mode & HUGE_WRITEBACK &&
726 !zram_test_flag(zram, index, ZRAM_HUGE))
727 goto next;
728 if (mode & INCOMPRESSIBLE_WRITEBACK &&
729 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
730 goto next;
731
732 pps->index = index;
733 place_pp_slot(zram, ctl, pps);
734 pps = NULL;
735next:
736 zram_slot_unlock(zram, index);
737 }
738
739 kfree(pps);
740 return 0;
741}
742
743static ssize_t writeback_store(struct device *dev,
744 struct device_attribute *attr, const char *buf, size_t len)
745{
746 struct zram *zram = dev_to_zram(dev);
747 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
748 struct zram_pp_ctl *ctl = NULL;
749 struct zram_pp_slot *pps;
750 unsigned long index = 0;
751 struct bio bio;
752 struct bio_vec bio_vec;
753 struct page *page;
754 ssize_t ret = len;
755 int mode, err;
756 unsigned long blk_idx = 0;
757
758 if (sysfs_streq(buf, "idle"))
759 mode = IDLE_WRITEBACK;
760 else if (sysfs_streq(buf, "huge"))
761 mode = HUGE_WRITEBACK;
762 else if (sysfs_streq(buf, "huge_idle"))
763 mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
764 else if (sysfs_streq(buf, "incompressible"))
765 mode = INCOMPRESSIBLE_WRITEBACK;
766 else {
767 if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
768 return -EINVAL;
769
770 if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
771 index >= nr_pages)
772 return -EINVAL;
773
774 nr_pages = 1;
775 mode = PAGE_WRITEBACK;
776 }
777
778 down_read(&zram->init_lock);
779 if (!init_done(zram)) {
780 ret = -EINVAL;
781 goto release_init_lock;
782 }
783
784 /* Do not permit concurrent post-processing actions. */
785 if (atomic_xchg(&zram->pp_in_progress, 1)) {
786 up_read(&zram->init_lock);
787 return -EAGAIN;
788 }
789
790 if (!zram->backing_dev) {
791 ret = -ENODEV;
792 goto release_init_lock;
793 }
794
795 page = alloc_page(GFP_KERNEL);
796 if (!page) {
797 ret = -ENOMEM;
798 goto release_init_lock;
799 }
800
801 ctl = init_pp_ctl();
802 if (!ctl) {
803 ret = -ENOMEM;
804 goto release_init_lock;
805 }
806
807 scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
808
809 while ((pps = select_pp_slot(ctl))) {
810 spin_lock(&zram->wb_limit_lock);
811 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
812 spin_unlock(&zram->wb_limit_lock);
813 ret = -EIO;
814 break;
815 }
816 spin_unlock(&zram->wb_limit_lock);
817
818 if (!blk_idx) {
819 blk_idx = alloc_block_bdev(zram);
820 if (!blk_idx) {
821 ret = -ENOSPC;
822 break;
823 }
824 }
825
826 index = pps->index;
827 zram_slot_lock(zram, index);
828 /*
829 * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
830 * slots can change in the meantime. If slots are accessed or
831 * freed they lose ZRAM_PP_SLOT flag and hence we don't
832 * post-process them.
833 */
834 if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
835 goto next;
836 zram_slot_unlock(zram, index);
837
838 if (zram_read_page(zram, page, index, NULL)) {
839 release_pp_slot(zram, pps);
840 continue;
841 }
842
843 bio_init(&bio, zram->bdev, &bio_vec, 1,
844 REQ_OP_WRITE | REQ_SYNC);
845 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
846 __bio_add_page(&bio, page, PAGE_SIZE, 0);
847
848 /*
849 * XXX: A single page IO would be inefficient for write
850 * but it would be not bad as starter.
851 */
852 err = submit_bio_wait(&bio);
853 if (err) {
854 release_pp_slot(zram, pps);
855 /*
856 * BIO errors are not fatal, we continue and simply
857 * attempt to writeback the remaining objects (pages).
858 * At the same time we need to signal user-space that
859 * some writes (at least one, but also could be all of
860 * them) were not successful and we do so by returning
861 * the most recent BIO error.
862 */
863 ret = err;
864 continue;
865 }
866
867 atomic64_inc(&zram->stats.bd_writes);
868 zram_slot_lock(zram, index);
869 /*
870 * Same as above, we release slot lock during writeback so
871 * slot can change under us: slot_free() or slot_free() and
872 * reallocation (zram_write_page()). In both cases slot loses
873 * ZRAM_PP_SLOT flag. No concurrent post-processing can set
874 * ZRAM_PP_SLOT on such slots until current post-processing
875 * finishes.
876 */
877 if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
878 goto next;
879
880 zram_free_page(zram, index);
881 zram_set_flag(zram, index, ZRAM_WB);
882 zram_set_element(zram, index, blk_idx);
883 blk_idx = 0;
884 atomic64_inc(&zram->stats.pages_stored);
885 spin_lock(&zram->wb_limit_lock);
886 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
887 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
888 spin_unlock(&zram->wb_limit_lock);
889next:
890 zram_slot_unlock(zram, index);
891 release_pp_slot(zram, pps);
892 }
893
894 if (blk_idx)
895 free_block_bdev(zram, blk_idx);
896 __free_page(page);
897release_init_lock:
898 release_pp_ctl(zram, ctl);
899 atomic_set(&zram->pp_in_progress, 0);
900 up_read(&zram->init_lock);
901
902 return ret;
903}
904
905struct zram_work {
906 struct work_struct work;
907 struct zram *zram;
908 unsigned long entry;
909 struct page *page;
910 int error;
911};
912
913static void zram_sync_read(struct work_struct *work)
914{
915 struct zram_work *zw = container_of(work, struct zram_work, work);
916 struct bio_vec bv;
917 struct bio bio;
918
919 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
920 bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
921 __bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
922 zw->error = submit_bio_wait(&bio);
923}
924
925/*
926 * Block layer want one ->submit_bio to be active at a time, so if we use
927 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
928 * use a worker thread context.
929 */
930static int read_from_bdev_sync(struct zram *zram, struct page *page,
931 unsigned long entry)
932{
933 struct zram_work work;
934
935 work.page = page;
936 work.zram = zram;
937 work.entry = entry;
938
939 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
940 queue_work(system_unbound_wq, &work.work);
941 flush_work(&work.work);
942 destroy_work_on_stack(&work.work);
943
944 return work.error;
945}
946
947static int read_from_bdev(struct zram *zram, struct page *page,
948 unsigned long entry, struct bio *parent)
949{
950 atomic64_inc(&zram->stats.bd_reads);
951 if (!parent) {
952 if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
953 return -EIO;
954 return read_from_bdev_sync(zram, page, entry);
955 }
956 read_from_bdev_async(zram, page, entry, parent);
957 return 0;
958}
959#else
960static inline void reset_bdev(struct zram *zram) {};
961static int read_from_bdev(struct zram *zram, struct page *page,
962 unsigned long entry, struct bio *parent)
963{
964 return -EIO;
965}
966
967static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
968#endif
969
970#ifdef CONFIG_ZRAM_MEMORY_TRACKING
971
972static struct dentry *zram_debugfs_root;
973
974static void zram_debugfs_create(void)
975{
976 zram_debugfs_root = debugfs_create_dir("zram", NULL);
977}
978
979static void zram_debugfs_destroy(void)
980{
981 debugfs_remove_recursive(zram_debugfs_root);
982}
983
984static ssize_t read_block_state(struct file *file, char __user *buf,
985 size_t count, loff_t *ppos)
986{
987 char *kbuf;
988 ssize_t index, written = 0;
989 struct zram *zram = file->private_data;
990 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
991 struct timespec64 ts;
992
993 kbuf = kvmalloc(count, GFP_KERNEL);
994 if (!kbuf)
995 return -ENOMEM;
996
997 down_read(&zram->init_lock);
998 if (!init_done(zram)) {
999 up_read(&zram->init_lock);
1000 kvfree(kbuf);
1001 return -EINVAL;
1002 }
1003
1004 for (index = *ppos; index < nr_pages; index++) {
1005 int copied;
1006
1007 zram_slot_lock(zram, index);
1008 if (!zram_allocated(zram, index))
1009 goto next;
1010
1011 ts = ktime_to_timespec64(zram->table[index].ac_time);
1012 copied = snprintf(kbuf + written, count,
1013 "%12zd %12lld.%06lu %c%c%c%c%c%c\n",
1014 index, (s64)ts.tv_sec,
1015 ts.tv_nsec / NSEC_PER_USEC,
1016 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
1017 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
1018 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
1019 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
1020 zram_get_priority(zram, index) ? 'r' : '.',
1021 zram_test_flag(zram, index,
1022 ZRAM_INCOMPRESSIBLE) ? 'n' : '.');
1023
1024 if (count <= copied) {
1025 zram_slot_unlock(zram, index);
1026 break;
1027 }
1028 written += copied;
1029 count -= copied;
1030next:
1031 zram_slot_unlock(zram, index);
1032 *ppos += 1;
1033 }
1034
1035 up_read(&zram->init_lock);
1036 if (copy_to_user(buf, kbuf, written))
1037 written = -EFAULT;
1038 kvfree(kbuf);
1039
1040 return written;
1041}
1042
1043static const struct file_operations proc_zram_block_state_op = {
1044 .open = simple_open,
1045 .read = read_block_state,
1046 .llseek = default_llseek,
1047};
1048
1049static void zram_debugfs_register(struct zram *zram)
1050{
1051 if (!zram_debugfs_root)
1052 return;
1053
1054 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
1055 zram_debugfs_root);
1056 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
1057 zram, &proc_zram_block_state_op);
1058}
1059
1060static void zram_debugfs_unregister(struct zram *zram)
1061{
1062 debugfs_remove_recursive(zram->debugfs_dir);
1063}
1064#else
1065static void zram_debugfs_create(void) {};
1066static void zram_debugfs_destroy(void) {};
1067static void zram_debugfs_register(struct zram *zram) {};
1068static void zram_debugfs_unregister(struct zram *zram) {};
1069#endif
1070
1071/*
1072 * We switched to per-cpu streams and this attr is not needed anymore.
1073 * However, we will keep it around for some time, because:
1074 * a) we may revert per-cpu streams in the future
1075 * b) it's visible to user space and we need to follow our 2 years
1076 * retirement rule; but we already have a number of 'soon to be
1077 * altered' attrs, so max_comp_streams need to wait for the next
1078 * layoff cycle.
1079 */
1080static ssize_t max_comp_streams_show(struct device *dev,
1081 struct device_attribute *attr, char *buf)
1082{
1083 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
1084}
1085
1086static ssize_t max_comp_streams_store(struct device *dev,
1087 struct device_attribute *attr, const char *buf, size_t len)
1088{
1089 return len;
1090}
1091
1092static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
1093{
1094 /* Do not free statically defined compression algorithms */
1095 if (zram->comp_algs[prio] != default_compressor)
1096 kfree(zram->comp_algs[prio]);
1097
1098 zram->comp_algs[prio] = alg;
1099}
1100
1101static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
1102{
1103 ssize_t sz;
1104
1105 down_read(&zram->init_lock);
1106 sz = zcomp_available_show(zram->comp_algs[prio], buf);
1107 up_read(&zram->init_lock);
1108
1109 return sz;
1110}
1111
1112static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
1113{
1114 char *compressor;
1115 size_t sz;
1116
1117 sz = strlen(buf);
1118 if (sz >= CRYPTO_MAX_ALG_NAME)
1119 return -E2BIG;
1120
1121 compressor = kstrdup(buf, GFP_KERNEL);
1122 if (!compressor)
1123 return -ENOMEM;
1124
1125 /* ignore trailing newline */
1126 if (sz > 0 && compressor[sz - 1] == '\n')
1127 compressor[sz - 1] = 0x00;
1128
1129 if (!zcomp_available_algorithm(compressor)) {
1130 kfree(compressor);
1131 return -EINVAL;
1132 }
1133
1134 down_write(&zram->init_lock);
1135 if (init_done(zram)) {
1136 up_write(&zram->init_lock);
1137 kfree(compressor);
1138 pr_info("Can't change algorithm for initialized device\n");
1139 return -EBUSY;
1140 }
1141
1142 comp_algorithm_set(zram, prio, compressor);
1143 up_write(&zram->init_lock);
1144 return 0;
1145}
1146
1147static void comp_params_reset(struct zram *zram, u32 prio)
1148{
1149 struct zcomp_params *params = &zram->params[prio];
1150
1151 vfree(params->dict);
1152 params->level = ZCOMP_PARAM_NO_LEVEL;
1153 params->dict_sz = 0;
1154 params->dict = NULL;
1155}
1156
1157static int comp_params_store(struct zram *zram, u32 prio, s32 level,
1158 const char *dict_path)
1159{
1160 ssize_t sz = 0;
1161
1162 comp_params_reset(zram, prio);
1163
1164 if (dict_path) {
1165 sz = kernel_read_file_from_path(dict_path, 0,
1166 &zram->params[prio].dict,
1167 INT_MAX,
1168 NULL,
1169 READING_POLICY);
1170 if (sz < 0)
1171 return -EINVAL;
1172 }
1173
1174 zram->params[prio].dict_sz = sz;
1175 zram->params[prio].level = level;
1176 return 0;
1177}
1178
1179static ssize_t algorithm_params_store(struct device *dev,
1180 struct device_attribute *attr,
1181 const char *buf,
1182 size_t len)
1183{
1184 s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
1185 char *args, *param, *val, *algo = NULL, *dict_path = NULL;
1186 struct zram *zram = dev_to_zram(dev);
1187 int ret;
1188
1189 args = skip_spaces(buf);
1190 while (*args) {
1191 args = next_arg(args, ¶m, &val);
1192
1193 if (!val || !*val)
1194 return -EINVAL;
1195
1196 if (!strcmp(param, "priority")) {
1197 ret = kstrtoint(val, 10, &prio);
1198 if (ret)
1199 return ret;
1200 continue;
1201 }
1202
1203 if (!strcmp(param, "level")) {
1204 ret = kstrtoint(val, 10, &level);
1205 if (ret)
1206 return ret;
1207 continue;
1208 }
1209
1210 if (!strcmp(param, "algo")) {
1211 algo = val;
1212 continue;
1213 }
1214
1215 if (!strcmp(param, "dict")) {
1216 dict_path = val;
1217 continue;
1218 }
1219 }
1220
1221 /* Lookup priority by algorithm name */
1222 if (algo) {
1223 s32 p;
1224
1225 prio = -EINVAL;
1226 for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
1227 if (!zram->comp_algs[p])
1228 continue;
1229
1230 if (!strcmp(zram->comp_algs[p], algo)) {
1231 prio = p;
1232 break;
1233 }
1234 }
1235 }
1236
1237 if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
1238 return -EINVAL;
1239
1240 ret = comp_params_store(zram, prio, level, dict_path);
1241 return ret ? ret : len;
1242}
1243
1244static ssize_t comp_algorithm_show(struct device *dev,
1245 struct device_attribute *attr,
1246 char *buf)
1247{
1248 struct zram *zram = dev_to_zram(dev);
1249
1250 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
1251}
1252
1253static ssize_t comp_algorithm_store(struct device *dev,
1254 struct device_attribute *attr,
1255 const char *buf,
1256 size_t len)
1257{
1258 struct zram *zram = dev_to_zram(dev);
1259 int ret;
1260
1261 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
1262 return ret ? ret : len;
1263}
1264
1265#ifdef CONFIG_ZRAM_MULTI_COMP
1266static ssize_t recomp_algorithm_show(struct device *dev,
1267 struct device_attribute *attr,
1268 char *buf)
1269{
1270 struct zram *zram = dev_to_zram(dev);
1271 ssize_t sz = 0;
1272 u32 prio;
1273
1274 for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
1275 if (!zram->comp_algs[prio])
1276 continue;
1277
1278 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
1279 sz += __comp_algorithm_show(zram, prio, buf + sz);
1280 }
1281
1282 return sz;
1283}
1284
1285static ssize_t recomp_algorithm_store(struct device *dev,
1286 struct device_attribute *attr,
1287 const char *buf,
1288 size_t len)
1289{
1290 struct zram *zram = dev_to_zram(dev);
1291 int prio = ZRAM_SECONDARY_COMP;
1292 char *args, *param, *val;
1293 char *alg = NULL;
1294 int ret;
1295
1296 args = skip_spaces(buf);
1297 while (*args) {
1298 args = next_arg(args, ¶m, &val);
1299
1300 if (!val || !*val)
1301 return -EINVAL;
1302
1303 if (!strcmp(param, "algo")) {
1304 alg = val;
1305 continue;
1306 }
1307
1308 if (!strcmp(param, "priority")) {
1309 ret = kstrtoint(val, 10, &prio);
1310 if (ret)
1311 return ret;
1312 continue;
1313 }
1314 }
1315
1316 if (!alg)
1317 return -EINVAL;
1318
1319 if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS)
1320 return -EINVAL;
1321
1322 ret = __comp_algorithm_store(zram, prio, alg);
1323 return ret ? ret : len;
1324}
1325#endif
1326
1327static ssize_t compact_store(struct device *dev,
1328 struct device_attribute *attr, const char *buf, size_t len)
1329{
1330 struct zram *zram = dev_to_zram(dev);
1331
1332 down_read(&zram->init_lock);
1333 if (!init_done(zram)) {
1334 up_read(&zram->init_lock);
1335 return -EINVAL;
1336 }
1337
1338 zs_compact(zram->mem_pool);
1339 up_read(&zram->init_lock);
1340
1341 return len;
1342}
1343
1344static ssize_t io_stat_show(struct device *dev,
1345 struct device_attribute *attr, char *buf)
1346{
1347 struct zram *zram = dev_to_zram(dev);
1348 ssize_t ret;
1349
1350 down_read(&zram->init_lock);
1351 ret = scnprintf(buf, PAGE_SIZE,
1352 "%8llu %8llu 0 %8llu\n",
1353 (u64)atomic64_read(&zram->stats.failed_reads),
1354 (u64)atomic64_read(&zram->stats.failed_writes),
1355 (u64)atomic64_read(&zram->stats.notify_free));
1356 up_read(&zram->init_lock);
1357
1358 return ret;
1359}
1360
1361static ssize_t mm_stat_show(struct device *dev,
1362 struct device_attribute *attr, char *buf)
1363{
1364 struct zram *zram = dev_to_zram(dev);
1365 struct zs_pool_stats pool_stats;
1366 u64 orig_size, mem_used = 0;
1367 long max_used;
1368 ssize_t ret;
1369
1370 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1371
1372 down_read(&zram->init_lock);
1373 if (init_done(zram)) {
1374 mem_used = zs_get_total_pages(zram->mem_pool);
1375 zs_pool_stats(zram->mem_pool, &pool_stats);
1376 }
1377
1378 orig_size = atomic64_read(&zram->stats.pages_stored);
1379 max_used = atomic_long_read(&zram->stats.max_used_pages);
1380
1381 ret = scnprintf(buf, PAGE_SIZE,
1382 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
1383 orig_size << PAGE_SHIFT,
1384 (u64)atomic64_read(&zram->stats.compr_data_size),
1385 mem_used << PAGE_SHIFT,
1386 zram->limit_pages << PAGE_SHIFT,
1387 max_used << PAGE_SHIFT,
1388 (u64)atomic64_read(&zram->stats.same_pages),
1389 atomic_long_read(&pool_stats.pages_compacted),
1390 (u64)atomic64_read(&zram->stats.huge_pages),
1391 (u64)atomic64_read(&zram->stats.huge_pages_since));
1392 up_read(&zram->init_lock);
1393
1394 return ret;
1395}
1396
1397#ifdef CONFIG_ZRAM_WRITEBACK
1398#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
1399static ssize_t bd_stat_show(struct device *dev,
1400 struct device_attribute *attr, char *buf)
1401{
1402 struct zram *zram = dev_to_zram(dev);
1403 ssize_t ret;
1404
1405 down_read(&zram->init_lock);
1406 ret = scnprintf(buf, PAGE_SIZE,
1407 "%8llu %8llu %8llu\n",
1408 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1409 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1410 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1411 up_read(&zram->init_lock);
1412
1413 return ret;
1414}
1415#endif
1416
1417static ssize_t debug_stat_show(struct device *dev,
1418 struct device_attribute *attr, char *buf)
1419{
1420 int version = 1;
1421 struct zram *zram = dev_to_zram(dev);
1422 ssize_t ret;
1423
1424 down_read(&zram->init_lock);
1425 ret = scnprintf(buf, PAGE_SIZE,
1426 "version: %d\n%8llu %8llu\n",
1427 version,
1428 (u64)atomic64_read(&zram->stats.writestall),
1429 (u64)atomic64_read(&zram->stats.miss_free));
1430 up_read(&zram->init_lock);
1431
1432 return ret;
1433}
1434
1435static DEVICE_ATTR_RO(io_stat);
1436static DEVICE_ATTR_RO(mm_stat);
1437#ifdef CONFIG_ZRAM_WRITEBACK
1438static DEVICE_ATTR_RO(bd_stat);
1439#endif
1440static DEVICE_ATTR_RO(debug_stat);
1441
1442static void zram_meta_free(struct zram *zram, u64 disksize)
1443{
1444 size_t num_pages = disksize >> PAGE_SHIFT;
1445 size_t index;
1446
1447 if (!zram->table)
1448 return;
1449
1450 /* Free all pages that are still in this zram device */
1451 for (index = 0; index < num_pages; index++)
1452 zram_free_page(zram, index);
1453
1454 zs_destroy_pool(zram->mem_pool);
1455 vfree(zram->table);
1456 zram->table = NULL;
1457}
1458
1459static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1460{
1461 size_t num_pages, index;
1462
1463 num_pages = disksize >> PAGE_SHIFT;
1464 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1465 if (!zram->table)
1466 return false;
1467
1468 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1469 if (!zram->mem_pool) {
1470 vfree(zram->table);
1471 zram->table = NULL;
1472 return false;
1473 }
1474
1475 if (!huge_class_size)
1476 huge_class_size = zs_huge_class_size(zram->mem_pool);
1477
1478 for (index = 0; index < num_pages; index++)
1479 spin_lock_init(&zram->table[index].lock);
1480 return true;
1481}
1482
1483/*
1484 * To protect concurrent access to the same index entry,
1485 * caller should hold this table index entry's bit_spinlock to
1486 * indicate this index entry is accessing.
1487 */
1488static void zram_free_page(struct zram *zram, size_t index)
1489{
1490 unsigned long handle;
1491
1492#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
1493 zram->table[index].ac_time = 0;
1494#endif
1495
1496 zram_clear_flag(zram, index, ZRAM_IDLE);
1497 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1498 zram_clear_flag(zram, index, ZRAM_PP_SLOT);
1499 zram_set_priority(zram, index, 0);
1500
1501 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1502 zram_clear_flag(zram, index, ZRAM_HUGE);
1503 atomic64_dec(&zram->stats.huge_pages);
1504 }
1505
1506 if (zram_test_flag(zram, index, ZRAM_WB)) {
1507 zram_clear_flag(zram, index, ZRAM_WB);
1508 free_block_bdev(zram, zram_get_element(zram, index));
1509 goto out;
1510 }
1511
1512 /*
1513 * No memory is allocated for same element filled pages.
1514 * Simply clear same page flag.
1515 */
1516 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1517 zram_clear_flag(zram, index, ZRAM_SAME);
1518 atomic64_dec(&zram->stats.same_pages);
1519 goto out;
1520 }
1521
1522 handle = zram_get_handle(zram, index);
1523 if (!handle)
1524 return;
1525
1526 zs_free(zram->mem_pool, handle);
1527
1528 atomic64_sub(zram_get_obj_size(zram, index),
1529 &zram->stats.compr_data_size);
1530out:
1531 atomic64_dec(&zram->stats.pages_stored);
1532 zram_set_handle(zram, index, 0);
1533 zram_set_obj_size(zram, index, 0);
1534}
1535
1536/*
1537 * Reads (decompresses if needed) a page from zspool (zsmalloc).
1538 * Corresponding ZRAM slot should be locked.
1539 */
1540static int zram_read_from_zspool(struct zram *zram, struct page *page,
1541 u32 index)
1542{
1543 struct zcomp_strm *zstrm;
1544 unsigned long handle;
1545 unsigned int size;
1546 void *src, *dst;
1547 u32 prio;
1548 int ret;
1549
1550 handle = zram_get_handle(zram, index);
1551 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1552 unsigned long value;
1553 void *mem;
1554
1555 value = handle ? zram_get_element(zram, index) : 0;
1556 mem = kmap_local_page(page);
1557 zram_fill_page(mem, PAGE_SIZE, value);
1558 kunmap_local(mem);
1559 return 0;
1560 }
1561
1562 size = zram_get_obj_size(zram, index);
1563
1564 if (size != PAGE_SIZE) {
1565 prio = zram_get_priority(zram, index);
1566 zstrm = zcomp_stream_get(zram->comps[prio]);
1567 }
1568
1569 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1570 if (size == PAGE_SIZE) {
1571 dst = kmap_local_page(page);
1572 copy_page(dst, src);
1573 kunmap_local(dst);
1574 ret = 0;
1575 } else {
1576 dst = kmap_local_page(page);
1577 ret = zcomp_decompress(zram->comps[prio], zstrm,
1578 src, size, dst);
1579 kunmap_local(dst);
1580 zcomp_stream_put(zram->comps[prio]);
1581 }
1582 zs_unmap_object(zram->mem_pool, handle);
1583 return ret;
1584}
1585
1586static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1587 struct bio *parent)
1588{
1589 int ret;
1590
1591 zram_slot_lock(zram, index);
1592 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1593 /* Slot should be locked through out the function call */
1594 ret = zram_read_from_zspool(zram, page, index);
1595 zram_slot_unlock(zram, index);
1596 } else {
1597 /*
1598 * The slot should be unlocked before reading from the backing
1599 * device.
1600 */
1601 zram_slot_unlock(zram, index);
1602
1603 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1604 parent);
1605 }
1606
1607 /* Should NEVER happen. Return bio error if it does. */
1608 if (WARN_ON(ret < 0))
1609 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1610
1611 return ret;
1612}
1613
1614/*
1615 * Use a temporary buffer to decompress the page, as the decompressor
1616 * always expects a full page for the output.
1617 */
1618static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1619 u32 index, int offset)
1620{
1621 struct page *page = alloc_page(GFP_NOIO);
1622 int ret;
1623
1624 if (!page)
1625 return -ENOMEM;
1626 ret = zram_read_page(zram, page, index, NULL);
1627 if (likely(!ret))
1628 memcpy_to_bvec(bvec, page_address(page) + offset);
1629 __free_page(page);
1630 return ret;
1631}
1632
1633static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1634 u32 index, int offset, struct bio *bio)
1635{
1636 if (is_partial_io(bvec))
1637 return zram_bvec_read_partial(zram, bvec, index, offset);
1638 return zram_read_page(zram, bvec->bv_page, index, bio);
1639}
1640
1641static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1642{
1643 int ret = 0;
1644 unsigned long alloced_pages;
1645 unsigned long handle = -ENOMEM;
1646 unsigned int comp_len = 0;
1647 void *src, *dst, *mem;
1648 struct zcomp_strm *zstrm;
1649 unsigned long element = 0;
1650 enum zram_pageflags flags = 0;
1651
1652 mem = kmap_local_page(page);
1653 if (page_same_filled(mem, &element)) {
1654 kunmap_local(mem);
1655 /* Free memory associated with this sector now. */
1656 flags = ZRAM_SAME;
1657 atomic64_inc(&zram->stats.same_pages);
1658 goto out;
1659 }
1660 kunmap_local(mem);
1661
1662compress_again:
1663 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1664 src = kmap_local_page(page);
1665 ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
1666 src, &comp_len);
1667 kunmap_local(src);
1668
1669 if (unlikely(ret)) {
1670 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1671 pr_err("Compression failed! err=%d\n", ret);
1672 zs_free(zram->mem_pool, handle);
1673 return ret;
1674 }
1675
1676 if (comp_len >= huge_class_size)
1677 comp_len = PAGE_SIZE;
1678 /*
1679 * handle allocation has 2 paths:
1680 * a) fast path is executed with preemption disabled (for
1681 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1682 * since we can't sleep;
1683 * b) slow path enables preemption and attempts to allocate
1684 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1685 * put per-cpu compression stream and, thus, to re-do
1686 * the compression once handle is allocated.
1687 *
1688 * if we have a 'non-null' handle here then we are coming
1689 * from the slow path and handle has already been allocated.
1690 */
1691 if (IS_ERR_VALUE(handle))
1692 handle = zs_malloc(zram->mem_pool, comp_len,
1693 __GFP_KSWAPD_RECLAIM |
1694 __GFP_NOWARN |
1695 __GFP_HIGHMEM |
1696 __GFP_MOVABLE);
1697 if (IS_ERR_VALUE(handle)) {
1698 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1699 atomic64_inc(&zram->stats.writestall);
1700 handle = zs_malloc(zram->mem_pool, comp_len,
1701 GFP_NOIO | __GFP_HIGHMEM |
1702 __GFP_MOVABLE);
1703 if (IS_ERR_VALUE(handle))
1704 return PTR_ERR((void *)handle);
1705
1706 if (comp_len != PAGE_SIZE)
1707 goto compress_again;
1708 /*
1709 * If the page is not compressible, you need to acquire the
1710 * lock and execute the code below. The zcomp_stream_get()
1711 * call is needed to disable the cpu hotplug and grab the
1712 * zstrm buffer back. It is necessary that the dereferencing
1713 * of the zstrm variable below occurs correctly.
1714 */
1715 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1716 }
1717
1718 alloced_pages = zs_get_total_pages(zram->mem_pool);
1719 update_used_max(zram, alloced_pages);
1720
1721 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1722 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1723 zs_free(zram->mem_pool, handle);
1724 return -ENOMEM;
1725 }
1726
1727 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1728
1729 src = zstrm->buffer;
1730 if (comp_len == PAGE_SIZE)
1731 src = kmap_local_page(page);
1732 memcpy(dst, src, comp_len);
1733 if (comp_len == PAGE_SIZE)
1734 kunmap_local(src);
1735
1736 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1737 zs_unmap_object(zram->mem_pool, handle);
1738 atomic64_add(comp_len, &zram->stats.compr_data_size);
1739out:
1740 /*
1741 * Free memory associated with this sector
1742 * before overwriting unused sectors.
1743 */
1744 zram_slot_lock(zram, index);
1745 zram_free_page(zram, index);
1746
1747 if (comp_len == PAGE_SIZE) {
1748 zram_set_flag(zram, index, ZRAM_HUGE);
1749 atomic64_inc(&zram->stats.huge_pages);
1750 atomic64_inc(&zram->stats.huge_pages_since);
1751 }
1752
1753 if (flags) {
1754 zram_set_flag(zram, index, flags);
1755 zram_set_element(zram, index, element);
1756 } else {
1757 zram_set_handle(zram, index, handle);
1758 zram_set_obj_size(zram, index, comp_len);
1759 }
1760 zram_slot_unlock(zram, index);
1761
1762 /* Update stats */
1763 atomic64_inc(&zram->stats.pages_stored);
1764 return ret;
1765}
1766
1767/*
1768 * This is a partial IO. Read the full page before writing the changes.
1769 */
1770static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1771 u32 index, int offset, struct bio *bio)
1772{
1773 struct page *page = alloc_page(GFP_NOIO);
1774 int ret;
1775
1776 if (!page)
1777 return -ENOMEM;
1778
1779 ret = zram_read_page(zram, page, index, bio);
1780 if (!ret) {
1781 memcpy_from_bvec(page_address(page) + offset, bvec);
1782 ret = zram_write_page(zram, page, index);
1783 }
1784 __free_page(page);
1785 return ret;
1786}
1787
1788static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1789 u32 index, int offset, struct bio *bio)
1790{
1791 if (is_partial_io(bvec))
1792 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1793 return zram_write_page(zram, bvec->bv_page, index);
1794}
1795
1796#ifdef CONFIG_ZRAM_MULTI_COMP
1797#define RECOMPRESS_IDLE (1 << 0)
1798#define RECOMPRESS_HUGE (1 << 1)
1799
1800static int scan_slots_for_recompress(struct zram *zram, u32 mode,
1801 struct zram_pp_ctl *ctl)
1802{
1803 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1804 struct zram_pp_slot *pps = NULL;
1805 unsigned long index;
1806
1807 for (index = 0; index < nr_pages; index++) {
1808 if (!pps)
1809 pps = kmalloc(sizeof(*pps), GFP_KERNEL);
1810 if (!pps)
1811 return -ENOMEM;
1812
1813 INIT_LIST_HEAD(&pps->entry);
1814
1815 zram_slot_lock(zram, index);
1816 if (!zram_allocated(zram, index))
1817 goto next;
1818
1819 if (mode & RECOMPRESS_IDLE &&
1820 !zram_test_flag(zram, index, ZRAM_IDLE))
1821 goto next;
1822
1823 if (mode & RECOMPRESS_HUGE &&
1824 !zram_test_flag(zram, index, ZRAM_HUGE))
1825 goto next;
1826
1827 if (zram_test_flag(zram, index, ZRAM_WB) ||
1828 zram_test_flag(zram, index, ZRAM_SAME) ||
1829 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1830 goto next;
1831
1832 pps->index = index;
1833 place_pp_slot(zram, ctl, pps);
1834 pps = NULL;
1835next:
1836 zram_slot_unlock(zram, index);
1837 }
1838
1839 kfree(pps);
1840 return 0;
1841}
1842
1843/*
1844 * This function will decompress (unless it's ZRAM_HUGE) the page and then
1845 * attempt to compress it using provided compression algorithm priority
1846 * (which is potentially more effective).
1847 *
1848 * Corresponding ZRAM slot should be locked.
1849 */
1850static int recompress_slot(struct zram *zram, u32 index, struct page *page,
1851 u64 *num_recomp_pages, u32 threshold, u32 prio,
1852 u32 prio_max)
1853{
1854 struct zcomp_strm *zstrm = NULL;
1855 unsigned long handle_old;
1856 unsigned long handle_new;
1857 unsigned int comp_len_old;
1858 unsigned int comp_len_new;
1859 unsigned int class_index_old;
1860 unsigned int class_index_new;
1861 u32 num_recomps = 0;
1862 void *src, *dst;
1863 int ret;
1864
1865 handle_old = zram_get_handle(zram, index);
1866 if (!handle_old)
1867 return -EINVAL;
1868
1869 comp_len_old = zram_get_obj_size(zram, index);
1870 /*
1871 * Do not recompress objects that are already "small enough".
1872 */
1873 if (comp_len_old < threshold)
1874 return 0;
1875
1876 ret = zram_read_from_zspool(zram, page, index);
1877 if (ret)
1878 return ret;
1879
1880 /*
1881 * We touched this entry so mark it as non-IDLE. This makes sure that
1882 * we don't preserve IDLE flag and don't incorrectly pick this entry
1883 * for different post-processing type (e.g. writeback).
1884 */
1885 zram_clear_flag(zram, index, ZRAM_IDLE);
1886
1887 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1888 /*
1889 * Iterate the secondary comp algorithms list (in order of priority)
1890 * and try to recompress the page.
1891 */
1892 for (; prio < prio_max; prio++) {
1893 if (!zram->comps[prio])
1894 continue;
1895
1896 /*
1897 * Skip if the object is already re-compressed with a higher
1898 * priority algorithm (or same algorithm).
1899 */
1900 if (prio <= zram_get_priority(zram, index))
1901 continue;
1902
1903 num_recomps++;
1904 zstrm = zcomp_stream_get(zram->comps[prio]);
1905 src = kmap_local_page(page);
1906 ret = zcomp_compress(zram->comps[prio], zstrm,
1907 src, &comp_len_new);
1908 kunmap_local(src);
1909
1910 if (ret) {
1911 zcomp_stream_put(zram->comps[prio]);
1912 return ret;
1913 }
1914
1915 class_index_new = zs_lookup_class_index(zram->mem_pool,
1916 comp_len_new);
1917
1918 /* Continue until we make progress */
1919 if (class_index_new >= class_index_old ||
1920 (threshold && comp_len_new >= threshold)) {
1921 zcomp_stream_put(zram->comps[prio]);
1922 continue;
1923 }
1924
1925 /* Recompression was successful so break out */
1926 break;
1927 }
1928
1929 /*
1930 * We did not try to recompress, e.g. when we have only one
1931 * secondary algorithm and the page is already recompressed
1932 * using that algorithm
1933 */
1934 if (!zstrm)
1935 return 0;
1936
1937 /*
1938 * Decrement the limit (if set) on pages we can recompress, even
1939 * when current recompression was unsuccessful or did not compress
1940 * the page below the threshold, because we still spent resources
1941 * on it.
1942 */
1943 if (*num_recomp_pages)
1944 *num_recomp_pages -= 1;
1945
1946 if (class_index_new >= class_index_old) {
1947 /*
1948 * Secondary algorithms failed to re-compress the page
1949 * in a way that would save memory, mark the object as
1950 * incompressible so that we will not try to compress
1951 * it again.
1952 *
1953 * We need to make sure that all secondary algorithms have
1954 * failed, so we test if the number of recompressions matches
1955 * the number of active secondary algorithms.
1956 */
1957 if (num_recomps == zram->num_active_comps - 1)
1958 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1959 return 0;
1960 }
1961
1962 /* Successful recompression but above threshold */
1963 if (threshold && comp_len_new >= threshold)
1964 return 0;
1965
1966 /*
1967 * No direct reclaim (slow path) for handle allocation and no
1968 * re-compression attempt (unlike in zram_write_bvec()) since
1969 * we already have stored that object in zsmalloc. If we cannot
1970 * alloc memory for recompressed object then we bail out and
1971 * simply keep the old (existing) object in zsmalloc.
1972 */
1973 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1974 __GFP_KSWAPD_RECLAIM |
1975 __GFP_NOWARN |
1976 __GFP_HIGHMEM |
1977 __GFP_MOVABLE);
1978 if (IS_ERR_VALUE(handle_new)) {
1979 zcomp_stream_put(zram->comps[prio]);
1980 return PTR_ERR((void *)handle_new);
1981 }
1982
1983 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1984 memcpy(dst, zstrm->buffer, comp_len_new);
1985 zcomp_stream_put(zram->comps[prio]);
1986
1987 zs_unmap_object(zram->mem_pool, handle_new);
1988
1989 zram_free_page(zram, index);
1990 zram_set_handle(zram, index, handle_new);
1991 zram_set_obj_size(zram, index, comp_len_new);
1992 zram_set_priority(zram, index, prio);
1993
1994 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1995 atomic64_inc(&zram->stats.pages_stored);
1996
1997 return 0;
1998}
1999
2000static ssize_t recompress_store(struct device *dev,
2001 struct device_attribute *attr,
2002 const char *buf, size_t len)
2003{
2004 u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
2005 struct zram *zram = dev_to_zram(dev);
2006 char *args, *param, *val, *algo = NULL;
2007 u64 num_recomp_pages = ULLONG_MAX;
2008 struct zram_pp_ctl *ctl = NULL;
2009 struct zram_pp_slot *pps;
2010 u32 mode = 0, threshold = 0;
2011 struct page *page;
2012 ssize_t ret;
2013
2014 args = skip_spaces(buf);
2015 while (*args) {
2016 args = next_arg(args, ¶m, &val);
2017
2018 if (!val || !*val)
2019 return -EINVAL;
2020
2021 if (!strcmp(param, "type")) {
2022 if (!strcmp(val, "idle"))
2023 mode = RECOMPRESS_IDLE;
2024 if (!strcmp(val, "huge"))
2025 mode = RECOMPRESS_HUGE;
2026 if (!strcmp(val, "huge_idle"))
2027 mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE;
2028 continue;
2029 }
2030
2031 if (!strcmp(param, "max_pages")) {
2032 /*
2033 * Limit the number of entries (pages) we attempt to
2034 * recompress.
2035 */
2036 ret = kstrtoull(val, 10, &num_recomp_pages);
2037 if (ret)
2038 return ret;
2039 continue;
2040 }
2041
2042 if (!strcmp(param, "threshold")) {
2043 /*
2044 * We will re-compress only idle objects equal or
2045 * greater in size than watermark.
2046 */
2047 ret = kstrtouint(val, 10, &threshold);
2048 if (ret)
2049 return ret;
2050 continue;
2051 }
2052
2053 if (!strcmp(param, "algo")) {
2054 algo = val;
2055 continue;
2056 }
2057
2058 if (!strcmp(param, "priority")) {
2059 ret = kstrtouint(val, 10, &prio);
2060 if (ret)
2061 return ret;
2062
2063 if (prio == ZRAM_PRIMARY_COMP)
2064 prio = ZRAM_SECONDARY_COMP;
2065
2066 prio_max = min(prio + 1, ZRAM_MAX_COMPS);
2067 continue;
2068 }
2069 }
2070
2071 if (threshold >= huge_class_size)
2072 return -EINVAL;
2073
2074 down_read(&zram->init_lock);
2075 if (!init_done(zram)) {
2076 ret = -EINVAL;
2077 goto release_init_lock;
2078 }
2079
2080 /* Do not permit concurrent post-processing actions. */
2081 if (atomic_xchg(&zram->pp_in_progress, 1)) {
2082 up_read(&zram->init_lock);
2083 return -EAGAIN;
2084 }
2085
2086 if (algo) {
2087 bool found = false;
2088
2089 for (; prio < ZRAM_MAX_COMPS; prio++) {
2090 if (!zram->comp_algs[prio])
2091 continue;
2092
2093 if (!strcmp(zram->comp_algs[prio], algo)) {
2094 prio_max = min(prio + 1, ZRAM_MAX_COMPS);
2095 found = true;
2096 break;
2097 }
2098 }
2099
2100 if (!found) {
2101 ret = -EINVAL;
2102 goto release_init_lock;
2103 }
2104 }
2105
2106 page = alloc_page(GFP_KERNEL);
2107 if (!page) {
2108 ret = -ENOMEM;
2109 goto release_init_lock;
2110 }
2111
2112 ctl = init_pp_ctl();
2113 if (!ctl) {
2114 ret = -ENOMEM;
2115 goto release_init_lock;
2116 }
2117
2118 scan_slots_for_recompress(zram, mode, ctl);
2119
2120 ret = len;
2121 while ((pps = select_pp_slot(ctl))) {
2122 int err = 0;
2123
2124 if (!num_recomp_pages)
2125 break;
2126
2127 zram_slot_lock(zram, pps->index);
2128 if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
2129 goto next;
2130
2131 err = recompress_slot(zram, pps->index, page,
2132 &num_recomp_pages, threshold,
2133 prio, prio_max);
2134next:
2135 zram_slot_unlock(zram, pps->index);
2136 release_pp_slot(zram, pps);
2137
2138 if (err) {
2139 ret = err;
2140 break;
2141 }
2142
2143 cond_resched();
2144 }
2145
2146 __free_page(page);
2147
2148release_init_lock:
2149 release_pp_ctl(zram, ctl);
2150 atomic_set(&zram->pp_in_progress, 0);
2151 up_read(&zram->init_lock);
2152 return ret;
2153}
2154#endif
2155
2156static void zram_bio_discard(struct zram *zram, struct bio *bio)
2157{
2158 size_t n = bio->bi_iter.bi_size;
2159 u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2160 u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2161 SECTOR_SHIFT;
2162
2163 /*
2164 * zram manages data in physical block size units. Because logical block
2165 * size isn't identical with physical block size on some arch, we
2166 * could get a discard request pointing to a specific offset within a
2167 * certain physical block. Although we can handle this request by
2168 * reading that physiclal block and decompressing and partially zeroing
2169 * and re-compressing and then re-storing it, this isn't reasonable
2170 * because our intent with a discard request is to save memory. So
2171 * skipping this logical block is appropriate here.
2172 */
2173 if (offset) {
2174 if (n <= (PAGE_SIZE - offset))
2175 return;
2176
2177 n -= (PAGE_SIZE - offset);
2178 index++;
2179 }
2180
2181 while (n >= PAGE_SIZE) {
2182 zram_slot_lock(zram, index);
2183 zram_free_page(zram, index);
2184 zram_slot_unlock(zram, index);
2185 atomic64_inc(&zram->stats.notify_free);
2186 index++;
2187 n -= PAGE_SIZE;
2188 }
2189
2190 bio_endio(bio);
2191}
2192
2193static void zram_bio_read(struct zram *zram, struct bio *bio)
2194{
2195 unsigned long start_time = bio_start_io_acct(bio);
2196 struct bvec_iter iter = bio->bi_iter;
2197
2198 do {
2199 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2200 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2201 SECTOR_SHIFT;
2202 struct bio_vec bv = bio_iter_iovec(bio, iter);
2203
2204 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
2205
2206 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
2207 atomic64_inc(&zram->stats.failed_reads);
2208 bio->bi_status = BLK_STS_IOERR;
2209 break;
2210 }
2211 flush_dcache_page(bv.bv_page);
2212
2213 zram_slot_lock(zram, index);
2214 zram_accessed(zram, index);
2215 zram_slot_unlock(zram, index);
2216
2217 bio_advance_iter_single(bio, &iter, bv.bv_len);
2218 } while (iter.bi_size);
2219
2220 bio_end_io_acct(bio, start_time);
2221 bio_endio(bio);
2222}
2223
2224static void zram_bio_write(struct zram *zram, struct bio *bio)
2225{
2226 unsigned long start_time = bio_start_io_acct(bio);
2227 struct bvec_iter iter = bio->bi_iter;
2228
2229 do {
2230 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
2231 u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
2232 SECTOR_SHIFT;
2233 struct bio_vec bv = bio_iter_iovec(bio, iter);
2234
2235 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
2236
2237 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
2238 atomic64_inc(&zram->stats.failed_writes);
2239 bio->bi_status = BLK_STS_IOERR;
2240 break;
2241 }
2242
2243 zram_slot_lock(zram, index);
2244 zram_accessed(zram, index);
2245 zram_slot_unlock(zram, index);
2246
2247 bio_advance_iter_single(bio, &iter, bv.bv_len);
2248 } while (iter.bi_size);
2249
2250 bio_end_io_acct(bio, start_time);
2251 bio_endio(bio);
2252}
2253
2254/*
2255 * Handler function for all zram I/O requests.
2256 */
2257static void zram_submit_bio(struct bio *bio)
2258{
2259 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
2260
2261 switch (bio_op(bio)) {
2262 case REQ_OP_READ:
2263 zram_bio_read(zram, bio);
2264 break;
2265 case REQ_OP_WRITE:
2266 zram_bio_write(zram, bio);
2267 break;
2268 case REQ_OP_DISCARD:
2269 case REQ_OP_WRITE_ZEROES:
2270 zram_bio_discard(zram, bio);
2271 break;
2272 default:
2273 WARN_ON_ONCE(1);
2274 bio_endio(bio);
2275 }
2276}
2277
2278static void zram_slot_free_notify(struct block_device *bdev,
2279 unsigned long index)
2280{
2281 struct zram *zram;
2282
2283 zram = bdev->bd_disk->private_data;
2284
2285 atomic64_inc(&zram->stats.notify_free);
2286 if (!zram_slot_trylock(zram, index)) {
2287 atomic64_inc(&zram->stats.miss_free);
2288 return;
2289 }
2290
2291 zram_free_page(zram, index);
2292 zram_slot_unlock(zram, index);
2293}
2294
2295static void zram_comp_params_reset(struct zram *zram)
2296{
2297 u32 prio;
2298
2299 for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2300 comp_params_reset(zram, prio);
2301 }
2302}
2303
2304static void zram_destroy_comps(struct zram *zram)
2305{
2306 u32 prio;
2307
2308 for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2309 struct zcomp *comp = zram->comps[prio];
2310
2311 zram->comps[prio] = NULL;
2312 if (!comp)
2313 continue;
2314 zcomp_destroy(comp);
2315 zram->num_active_comps--;
2316 }
2317
2318 for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2319 /* Do not free statically defined compression algorithms */
2320 if (zram->comp_algs[prio] != default_compressor)
2321 kfree(zram->comp_algs[prio]);
2322 zram->comp_algs[prio] = NULL;
2323 }
2324
2325 zram_comp_params_reset(zram);
2326}
2327
2328static void zram_reset_device(struct zram *zram)
2329{
2330 down_write(&zram->init_lock);
2331
2332 zram->limit_pages = 0;
2333
2334 set_capacity_and_notify(zram->disk, 0);
2335 part_stat_set_all(zram->disk->part0, 0);
2336
2337 /* I/O operation under all of CPU are done so let's free */
2338 zram_meta_free(zram, zram->disksize);
2339 zram->disksize = 0;
2340 zram_destroy_comps(zram);
2341 memset(&zram->stats, 0, sizeof(zram->stats));
2342 atomic_set(&zram->pp_in_progress, 0);
2343 reset_bdev(zram);
2344
2345 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2346 up_write(&zram->init_lock);
2347}
2348
2349static ssize_t disksize_store(struct device *dev,
2350 struct device_attribute *attr, const char *buf, size_t len)
2351{
2352 u64 disksize;
2353 struct zcomp *comp;
2354 struct zram *zram = dev_to_zram(dev);
2355 int err;
2356 u32 prio;
2357
2358 disksize = memparse(buf, NULL);
2359 if (!disksize)
2360 return -EINVAL;
2361
2362 down_write(&zram->init_lock);
2363 if (init_done(zram)) {
2364 pr_info("Cannot change disksize for initialized device\n");
2365 err = -EBUSY;
2366 goto out_unlock;
2367 }
2368
2369 disksize = PAGE_ALIGN(disksize);
2370 if (!zram_meta_alloc(zram, disksize)) {
2371 err = -ENOMEM;
2372 goto out_unlock;
2373 }
2374
2375 for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
2376 if (!zram->comp_algs[prio])
2377 continue;
2378
2379 comp = zcomp_create(zram->comp_algs[prio],
2380 &zram->params[prio]);
2381 if (IS_ERR(comp)) {
2382 pr_err("Cannot initialise %s compressing backend\n",
2383 zram->comp_algs[prio]);
2384 err = PTR_ERR(comp);
2385 goto out_free_comps;
2386 }
2387
2388 zram->comps[prio] = comp;
2389 zram->num_active_comps++;
2390 }
2391 zram->disksize = disksize;
2392 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2393 up_write(&zram->init_lock);
2394
2395 return len;
2396
2397out_free_comps:
2398 zram_destroy_comps(zram);
2399 zram_meta_free(zram, disksize);
2400out_unlock:
2401 up_write(&zram->init_lock);
2402 return err;
2403}
2404
2405static ssize_t reset_store(struct device *dev,
2406 struct device_attribute *attr, const char *buf, size_t len)
2407{
2408 int ret;
2409 unsigned short do_reset;
2410 struct zram *zram;
2411 struct gendisk *disk;
2412
2413 ret = kstrtou16(buf, 10, &do_reset);
2414 if (ret)
2415 return ret;
2416
2417 if (!do_reset)
2418 return -EINVAL;
2419
2420 zram = dev_to_zram(dev);
2421 disk = zram->disk;
2422
2423 mutex_lock(&disk->open_mutex);
2424 /* Do not reset an active device or claimed device */
2425 if (disk_openers(disk) || zram->claim) {
2426 mutex_unlock(&disk->open_mutex);
2427 return -EBUSY;
2428 }
2429
2430 /* From now on, anyone can't open /dev/zram[0-9] */
2431 zram->claim = true;
2432 mutex_unlock(&disk->open_mutex);
2433
2434 /* Make sure all the pending I/O are finished */
2435 sync_blockdev(disk->part0);
2436 zram_reset_device(zram);
2437
2438 mutex_lock(&disk->open_mutex);
2439 zram->claim = false;
2440 mutex_unlock(&disk->open_mutex);
2441
2442 return len;
2443}
2444
2445static int zram_open(struct gendisk *disk, blk_mode_t mode)
2446{
2447 struct zram *zram = disk->private_data;
2448
2449 WARN_ON(!mutex_is_locked(&disk->open_mutex));
2450
2451 /* zram was claimed to reset so open request fails */
2452 if (zram->claim)
2453 return -EBUSY;
2454 return 0;
2455}
2456
2457static const struct block_device_operations zram_devops = {
2458 .open = zram_open,
2459 .submit_bio = zram_submit_bio,
2460 .swap_slot_free_notify = zram_slot_free_notify,
2461 .owner = THIS_MODULE
2462};
2463
2464static DEVICE_ATTR_WO(compact);
2465static DEVICE_ATTR_RW(disksize);
2466static DEVICE_ATTR_RO(initstate);
2467static DEVICE_ATTR_WO(reset);
2468static DEVICE_ATTR_WO(mem_limit);
2469static DEVICE_ATTR_WO(mem_used_max);
2470static DEVICE_ATTR_WO(idle);
2471static DEVICE_ATTR_RW(max_comp_streams);
2472static DEVICE_ATTR_RW(comp_algorithm);
2473#ifdef CONFIG_ZRAM_WRITEBACK
2474static DEVICE_ATTR_RW(backing_dev);
2475static DEVICE_ATTR_WO(writeback);
2476static DEVICE_ATTR_RW(writeback_limit);
2477static DEVICE_ATTR_RW(writeback_limit_enable);
2478#endif
2479#ifdef CONFIG_ZRAM_MULTI_COMP
2480static DEVICE_ATTR_RW(recomp_algorithm);
2481static DEVICE_ATTR_WO(recompress);
2482#endif
2483static DEVICE_ATTR_WO(algorithm_params);
2484
2485static struct attribute *zram_disk_attrs[] = {
2486 &dev_attr_disksize.attr,
2487 &dev_attr_initstate.attr,
2488 &dev_attr_reset.attr,
2489 &dev_attr_compact.attr,
2490 &dev_attr_mem_limit.attr,
2491 &dev_attr_mem_used_max.attr,
2492 &dev_attr_idle.attr,
2493 &dev_attr_max_comp_streams.attr,
2494 &dev_attr_comp_algorithm.attr,
2495#ifdef CONFIG_ZRAM_WRITEBACK
2496 &dev_attr_backing_dev.attr,
2497 &dev_attr_writeback.attr,
2498 &dev_attr_writeback_limit.attr,
2499 &dev_attr_writeback_limit_enable.attr,
2500#endif
2501 &dev_attr_io_stat.attr,
2502 &dev_attr_mm_stat.attr,
2503#ifdef CONFIG_ZRAM_WRITEBACK
2504 &dev_attr_bd_stat.attr,
2505#endif
2506 &dev_attr_debug_stat.attr,
2507#ifdef CONFIG_ZRAM_MULTI_COMP
2508 &dev_attr_recomp_algorithm.attr,
2509 &dev_attr_recompress.attr,
2510#endif
2511 &dev_attr_algorithm_params.attr,
2512 NULL,
2513};
2514
2515ATTRIBUTE_GROUPS(zram_disk);
2516
2517/*
2518 * Allocate and initialize new zram device. the function returns
2519 * '>= 0' device_id upon success, and negative value otherwise.
2520 */
2521static int zram_add(void)
2522{
2523 struct queue_limits lim = {
2524 .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE,
2525 /*
2526 * To ensure that we always get PAGE_SIZE aligned and
2527 * n*PAGE_SIZED sized I/O requests.
2528 */
2529 .physical_block_size = PAGE_SIZE,
2530 .io_min = PAGE_SIZE,
2531 .io_opt = PAGE_SIZE,
2532 .max_hw_discard_sectors = UINT_MAX,
2533 /*
2534 * zram_bio_discard() will clear all logical blocks if logical
2535 * block size is identical with physical block size(PAGE_SIZE).
2536 * But if it is different, we will skip discarding some parts of
2537 * logical blocks in the part of the request range which isn't
2538 * aligned to physical block size. So we can't ensure that all
2539 * discarded logical blocks are zeroed.
2540 */
2541#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
2542 .max_write_zeroes_sectors = UINT_MAX,
2543#endif
2544 .features = BLK_FEAT_STABLE_WRITES |
2545 BLK_FEAT_SYNCHRONOUS,
2546 };
2547 struct zram *zram;
2548 int ret, device_id;
2549
2550 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2551 if (!zram)
2552 return -ENOMEM;
2553
2554 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2555 if (ret < 0)
2556 goto out_free_dev;
2557 device_id = ret;
2558
2559 init_rwsem(&zram->init_lock);
2560#ifdef CONFIG_ZRAM_WRITEBACK
2561 spin_lock_init(&zram->wb_limit_lock);
2562#endif
2563
2564 /* gendisk structure */
2565 zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
2566 if (IS_ERR(zram->disk)) {
2567 pr_err("Error allocating disk structure for device %d\n",
2568 device_id);
2569 ret = PTR_ERR(zram->disk);
2570 goto out_free_idr;
2571 }
2572
2573 zram->disk->major = zram_major;
2574 zram->disk->first_minor = device_id;
2575 zram->disk->minors = 1;
2576 zram->disk->flags |= GENHD_FL_NO_PART;
2577 zram->disk->fops = &zram_devops;
2578 zram->disk->private_data = zram;
2579 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2580 atomic_set(&zram->pp_in_progress, 0);
2581 zram_comp_params_reset(zram);
2582 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2583
2584 /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2585 set_capacity(zram->disk, 0);
2586 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2587 if (ret)
2588 goto out_cleanup_disk;
2589
2590 zram_debugfs_register(zram);
2591 pr_info("Added device: %s\n", zram->disk->disk_name);
2592 return device_id;
2593
2594out_cleanup_disk:
2595 put_disk(zram->disk);
2596out_free_idr:
2597 idr_remove(&zram_index_idr, device_id);
2598out_free_dev:
2599 kfree(zram);
2600 return ret;
2601}
2602
2603static int zram_remove(struct zram *zram)
2604{
2605 bool claimed;
2606
2607 mutex_lock(&zram->disk->open_mutex);
2608 if (disk_openers(zram->disk)) {
2609 mutex_unlock(&zram->disk->open_mutex);
2610 return -EBUSY;
2611 }
2612
2613 claimed = zram->claim;
2614 if (!claimed)
2615 zram->claim = true;
2616 mutex_unlock(&zram->disk->open_mutex);
2617
2618 zram_debugfs_unregister(zram);
2619
2620 if (claimed) {
2621 /*
2622 * If we were claimed by reset_store(), del_gendisk() will
2623 * wait until reset_store() is done, so nothing need to do.
2624 */
2625 ;
2626 } else {
2627 /* Make sure all the pending I/O are finished */
2628 sync_blockdev(zram->disk->part0);
2629 zram_reset_device(zram);
2630 }
2631
2632 pr_info("Removed device: %s\n", zram->disk->disk_name);
2633
2634 del_gendisk(zram->disk);
2635
2636 /* del_gendisk drains pending reset_store */
2637 WARN_ON_ONCE(claimed && zram->claim);
2638
2639 /*
2640 * disksize_store() may be called in between zram_reset_device()
2641 * and del_gendisk(), so run the last reset to avoid leaking
2642 * anything allocated with disksize_store()
2643 */
2644 zram_reset_device(zram);
2645
2646 put_disk(zram->disk);
2647 kfree(zram);
2648 return 0;
2649}
2650
2651/* zram-control sysfs attributes */
2652
2653/*
2654 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2655 * sense that reading from this file does alter the state of your system -- it
2656 * creates a new un-initialized zram device and returns back this device's
2657 * device_id (or an error code if it fails to create a new device).
2658 */
2659static ssize_t hot_add_show(const struct class *class,
2660 const struct class_attribute *attr,
2661 char *buf)
2662{
2663 int ret;
2664
2665 mutex_lock(&zram_index_mutex);
2666 ret = zram_add();
2667 mutex_unlock(&zram_index_mutex);
2668
2669 if (ret < 0)
2670 return ret;
2671 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2672}
2673/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
2674static struct class_attribute class_attr_hot_add =
2675 __ATTR(hot_add, 0400, hot_add_show, NULL);
2676
2677static ssize_t hot_remove_store(const struct class *class,
2678 const struct class_attribute *attr,
2679 const char *buf,
2680 size_t count)
2681{
2682 struct zram *zram;
2683 int ret, dev_id;
2684
2685 /* dev_id is gendisk->first_minor, which is `int' */
2686 ret = kstrtoint(buf, 10, &dev_id);
2687 if (ret)
2688 return ret;
2689 if (dev_id < 0)
2690 return -EINVAL;
2691
2692 mutex_lock(&zram_index_mutex);
2693
2694 zram = idr_find(&zram_index_idr, dev_id);
2695 if (zram) {
2696 ret = zram_remove(zram);
2697 if (!ret)
2698 idr_remove(&zram_index_idr, dev_id);
2699 } else {
2700 ret = -ENODEV;
2701 }
2702
2703 mutex_unlock(&zram_index_mutex);
2704 return ret ? ret : count;
2705}
2706static CLASS_ATTR_WO(hot_remove);
2707
2708static struct attribute *zram_control_class_attrs[] = {
2709 &class_attr_hot_add.attr,
2710 &class_attr_hot_remove.attr,
2711 NULL,
2712};
2713ATTRIBUTE_GROUPS(zram_control_class);
2714
2715static struct class zram_control_class = {
2716 .name = "zram-control",
2717 .class_groups = zram_control_class_groups,
2718};
2719
2720static int zram_remove_cb(int id, void *ptr, void *data)
2721{
2722 WARN_ON_ONCE(zram_remove(ptr));
2723 return 0;
2724}
2725
2726static void destroy_devices(void)
2727{
2728 class_unregister(&zram_control_class);
2729 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2730 zram_debugfs_destroy();
2731 idr_destroy(&zram_index_idr);
2732 unregister_blkdev(zram_major, "zram");
2733 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2734}
2735
2736static int __init zram_init(void)
2737{
2738 struct zram_table_entry zram_te;
2739 int ret;
2740
2741 BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
2742
2743 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2744 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2745 if (ret < 0)
2746 return ret;
2747
2748 ret = class_register(&zram_control_class);
2749 if (ret) {
2750 pr_err("Unable to register zram-control class\n");
2751 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2752 return ret;
2753 }
2754
2755 zram_debugfs_create();
2756 zram_major = register_blkdev(0, "zram");
2757 if (zram_major <= 0) {
2758 pr_err("Unable to get major number\n");
2759 class_unregister(&zram_control_class);
2760 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2761 return -EBUSY;
2762 }
2763
2764 while (num_devices != 0) {
2765 mutex_lock(&zram_index_mutex);
2766 ret = zram_add();
2767 mutex_unlock(&zram_index_mutex);
2768 if (ret < 0)
2769 goto out_error;
2770 num_devices--;
2771 }
2772
2773 return 0;
2774
2775out_error:
2776 destroy_devices();
2777 return ret;
2778}
2779
2780static void __exit zram_exit(void)
2781{
2782 destroy_devices();
2783}
2784
2785module_init(zram_init);
2786module_exit(zram_exit);
2787
2788module_param(num_devices, uint, 0);
2789MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2790
2791MODULE_LICENSE("Dual BSD/GPL");
2792MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2793MODULE_DESCRIPTION("Compressed RAM Block Device");