Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.15
  1/*
  2 * Ram backed block device driver.
  3 *
  4 * Copyright (C) 2007 Nick Piggin
  5 * Copyright (C) 2007 Novell Inc.
  6 *
  7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
  8 * of their respective owners.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/moduleparam.h>
 14#include <linux/major.h>
 15#include <linux/blkdev.h>
 16#include <linux/bio.h>
 17#include <linux/highmem.h>
 18#include <linux/mutex.h>
 19#include <linux/radix-tree.h>
 20#include <linux/fs.h>
 21#include <linux/slab.h>
 
 
 
 22
 23#include <asm/uaccess.h>
 24
 25#define SECTOR_SHIFT		9
 26#define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
 27#define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
 28
 29/*
 30 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
 31 * the pages containing the block device's contents. A brd page's ->index is
 32 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
 33 * with, the kernel's pagecache or buffer cache (which sit above our block
 34 * device).
 35 */
 36struct brd_device {
 37	int		brd_number;
 38
 39	struct request_queue	*brd_queue;
 40	struct gendisk		*brd_disk;
 41	struct list_head	brd_list;
 42
 43	/*
 44	 * Backing store of pages and lock to protect it. This is the contents
 45	 * of the block device.
 46	 */
 47	spinlock_t		brd_lock;
 48	struct radix_tree_root	brd_pages;
 49};
 50
 51/*
 52 * Look up and return a brd's page for a given sector.
 53 */
 54static DEFINE_MUTEX(brd_mutex);
 55static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
 56{
 57	pgoff_t idx;
 58	struct page *page;
 59
 60	/*
 61	 * The page lifetime is protected by the fact that we have opened the
 62	 * device node -- brd pages will never be deleted under us, so we
 63	 * don't need any further locking or refcounting.
 64	 *
 65	 * This is strictly true for the radix-tree nodes as well (ie. we
 66	 * don't actually need the rcu_read_lock()), however that is not a
 67	 * documented feature of the radix-tree API so it is better to be
 68	 * safe here (we don't have total exclusion from radix tree updates
 69	 * here, only deletes).
 70	 */
 71	rcu_read_lock();
 72	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
 73	page = radix_tree_lookup(&brd->brd_pages, idx);
 74	rcu_read_unlock();
 75
 76	BUG_ON(page && page->index != idx);
 77
 78	return page;
 79}
 80
 81/*
 82 * Look up and return a brd's page for a given sector.
 83 * If one does not exist, allocate an empty page, and insert that. Then
 84 * return it.
 85 */
 86static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 87{
 88	pgoff_t idx;
 89	struct page *page;
 90	gfp_t gfp_flags;
 91
 92	page = brd_lookup_page(brd, sector);
 93	if (page)
 94		return page;
 95
 96	/*
 97	 * Must use NOIO because we don't want to recurse back into the
 98	 * block or filesystem layers from page reclaim.
 99	 *
100	 * Cannot support XIP and highmem, because our ->direct_access
101	 * routine for XIP must return memory that is always addressable.
102	 * If XIP was reworked to use pfns and kmap throughout, this
103	 * restriction might be able to be lifted.
104	 */
105	gfp_flags = GFP_NOIO | __GFP_ZERO;
106#ifndef CONFIG_BLK_DEV_XIP
107	gfp_flags |= __GFP_HIGHMEM;
108#endif
109	page = alloc_page(gfp_flags);
110	if (!page)
111		return NULL;
112
113	if (radix_tree_preload(GFP_NOIO)) {
114		__free_page(page);
115		return NULL;
116	}
117
118	spin_lock(&brd->brd_lock);
119	idx = sector >> PAGE_SECTORS_SHIFT;
120	page->index = idx;
121	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
122		__free_page(page);
123		page = radix_tree_lookup(&brd->brd_pages, idx);
124		BUG_ON(!page);
125		BUG_ON(page->index != idx);
126	}
127	spin_unlock(&brd->brd_lock);
128
129	radix_tree_preload_end();
130
131	return page;
132}
133
134static void brd_free_page(struct brd_device *brd, sector_t sector)
135{
136	struct page *page;
137	pgoff_t idx;
138
139	spin_lock(&brd->brd_lock);
140	idx = sector >> PAGE_SECTORS_SHIFT;
141	page = radix_tree_delete(&brd->brd_pages, idx);
142	spin_unlock(&brd->brd_lock);
143	if (page)
144		__free_page(page);
145}
146
147static void brd_zero_page(struct brd_device *brd, sector_t sector)
148{
149	struct page *page;
150
151	page = brd_lookup_page(brd, sector);
152	if (page)
153		clear_highpage(page);
154}
155
156/*
157 * Free all backing store pages and radix tree. This must only be called when
158 * there are no other users of the device.
159 */
160#define FREE_BATCH 16
161static void brd_free_pages(struct brd_device *brd)
162{
163	unsigned long pos = 0;
164	struct page *pages[FREE_BATCH];
165	int nr_pages;
166
167	do {
168		int i;
169
170		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
171				(void **)pages, pos, FREE_BATCH);
172
173		for (i = 0; i < nr_pages; i++) {
174			void *ret;
175
176			BUG_ON(pages[i]->index < pos);
177			pos = pages[i]->index;
178			ret = radix_tree_delete(&brd->brd_pages, pos);
179			BUG_ON(!ret || ret != pages[i]);
180			__free_page(pages[i]);
181		}
182
183		pos++;
184
185		/*
186		 * This assumes radix_tree_gang_lookup always returns as
187		 * many pages as possible. If the radix-tree code changes,
188		 * so will this have to.
189		 */
190	} while (nr_pages == FREE_BATCH);
191}
192
193/*
194 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
195 */
196static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
197{
198	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
199	size_t copy;
200
201	copy = min_t(size_t, n, PAGE_SIZE - offset);
202	if (!brd_insert_page(brd, sector))
203		return -ENOMEM;
204	if (copy < n) {
205		sector += copy >> SECTOR_SHIFT;
206		if (!brd_insert_page(brd, sector))
207			return -ENOMEM;
208	}
209	return 0;
210}
211
212static void discard_from_brd(struct brd_device *brd,
213			sector_t sector, size_t n)
214{
215	while (n >= PAGE_SIZE) {
216		/*
217		 * Don't want to actually discard pages here because
218		 * re-allocating the pages can result in writeback
219		 * deadlocks under heavy load.
220		 */
221		if (0)
222			brd_free_page(brd, sector);
223		else
224			brd_zero_page(brd, sector);
225		sector += PAGE_SIZE >> SECTOR_SHIFT;
226		n -= PAGE_SIZE;
227	}
228}
229
230/*
231 * Copy n bytes from src to the brd starting at sector. Does not sleep.
232 */
233static void copy_to_brd(struct brd_device *brd, const void *src,
234			sector_t sector, size_t n)
235{
236	struct page *page;
237	void *dst;
238	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
239	size_t copy;
240
241	copy = min_t(size_t, n, PAGE_SIZE - offset);
242	page = brd_lookup_page(brd, sector);
243	BUG_ON(!page);
244
245	dst = kmap_atomic(page);
246	memcpy(dst + offset, src, copy);
247	kunmap_atomic(dst);
248
249	if (copy < n) {
250		src += copy;
251		sector += copy >> SECTOR_SHIFT;
252		copy = n - copy;
253		page = brd_lookup_page(brd, sector);
254		BUG_ON(!page);
255
256		dst = kmap_atomic(page);
257		memcpy(dst, src, copy);
258		kunmap_atomic(dst);
259	}
260}
261
262/*
263 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
264 */
265static void copy_from_brd(void *dst, struct brd_device *brd,
266			sector_t sector, size_t n)
267{
268	struct page *page;
269	void *src;
270	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
271	size_t copy;
272
273	copy = min_t(size_t, n, PAGE_SIZE - offset);
274	page = brd_lookup_page(brd, sector);
275	if (page) {
276		src = kmap_atomic(page);
277		memcpy(dst, src + offset, copy);
278		kunmap_atomic(src);
279	} else
280		memset(dst, 0, copy);
281
282	if (copy < n) {
283		dst += copy;
284		sector += copy >> SECTOR_SHIFT;
285		copy = n - copy;
286		page = brd_lookup_page(brd, sector);
287		if (page) {
288			src = kmap_atomic(page);
289			memcpy(dst, src, copy);
290			kunmap_atomic(src);
291		} else
292			memset(dst, 0, copy);
293	}
294}
295
296/*
297 * Process a single bvec of a bio.
298 */
299static int brd_do_bvec(struct brd_device *brd, struct page *page,
300			unsigned int len, unsigned int off, int rw,
301			sector_t sector)
302{
303	void *mem;
304	int err = 0;
305
306	if (rw != READ) {
307		err = copy_to_brd_setup(brd, sector, len);
308		if (err)
309			goto out;
310	}
311
312	mem = kmap_atomic(page);
313	if (rw == READ) {
314		copy_from_brd(mem + off, brd, sector, len);
315		flush_dcache_page(page);
316	} else {
317		flush_dcache_page(page);
318		copy_to_brd(brd, mem + off, sector, len);
319	}
320	kunmap_atomic(mem);
321
322out:
323	return err;
324}
325
326static void brd_make_request(struct request_queue *q, struct bio *bio)
327{
328	struct block_device *bdev = bio->bi_bdev;
329	struct brd_device *brd = bdev->bd_disk->private_data;
330	int rw;
331	struct bio_vec bvec;
332	sector_t sector;
333	struct bvec_iter iter;
334	int err = -EIO;
335
336	sector = bio->bi_iter.bi_sector;
337	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338		goto out;
339
340	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341		err = 0;
 
 
342		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343		goto out;
344	}
345
346	rw = bio_rw(bio);
347	if (rw == READA)
348		rw = READ;
349
350	bio_for_each_segment(bvec, bio, iter) {
351		unsigned int len = bvec.bv_len;
352		err = brd_do_bvec(brd, bvec.bv_page, len,
353					bvec.bv_offset, rw, sector);
 
 
354		if (err)
355			break;
356		sector += len >> SECTOR_SHIFT;
357	}
358
359out:
360	bio_endio(bio, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
361}
362
363#ifdef CONFIG_BLK_DEV_XIP
364static int brd_direct_access(struct block_device *bdev, sector_t sector,
365			void **kaddr, unsigned long *pfn)
366{
367	struct brd_device *brd = bdev->bd_disk->private_data;
368	struct page *page;
369
370	if (!brd)
371		return -ENODEV;
372	if (sector & (PAGE_SECTORS-1))
373		return -EINVAL;
374	if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
375		return -ERANGE;
376	page = brd_insert_page(brd, sector);
377	if (!page)
378		return -ENOMEM;
379	*kaddr = page_address(page);
380	*pfn = page_to_pfn(page);
381
382	return 0;
383}
 
 
384#endif
385
386static int brd_ioctl(struct block_device *bdev, fmode_t mode,
387			unsigned int cmd, unsigned long arg)
388{
389	int error;
390	struct brd_device *brd = bdev->bd_disk->private_data;
391
392	if (cmd != BLKFLSBUF)
393		return -ENOTTY;
394
395	/*
396	 * ram device BLKFLSBUF has special semantics, we want to actually
397	 * release and destroy the ramdisk data.
398	 */
399	mutex_lock(&brd_mutex);
400	mutex_lock(&bdev->bd_mutex);
401	error = -EBUSY;
402	if (bdev->bd_openers <= 1) {
403		/*
404		 * Kill the cache first, so it isn't written back to the
405		 * device.
406		 *
407		 * Another thread might instantiate more buffercache here,
408		 * but there is not much we can do to close that race.
409		 */
410		kill_bdev(bdev);
411		brd_free_pages(brd);
412		error = 0;
413	}
414	mutex_unlock(&bdev->bd_mutex);
415	mutex_unlock(&brd_mutex);
416
417	return error;
418}
419
420static const struct block_device_operations brd_fops = {
421	.owner =		THIS_MODULE,
422	.ioctl =		brd_ioctl,
423#ifdef CONFIG_BLK_DEV_XIP
424	.direct_access =	brd_direct_access,
425#endif
426};
427
428/*
429 * And now the modules code and kernel interface.
430 */
431static int rd_nr;
432int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
433static int max_part;
434static int part_shift;
435module_param(rd_nr, int, S_IRUGO);
436MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
437module_param(rd_size, int, S_IRUGO);
 
 
438MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
 
 
439module_param(max_part, int, S_IRUGO);
440MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
 
441MODULE_LICENSE("GPL");
442MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
443MODULE_ALIAS("rd");
444
445#ifndef MODULE
446/* Legacy boot options - nonmodular */
447static int __init ramdisk_size(char *str)
448{
449	rd_size = simple_strtol(str, NULL, 0);
450	return 1;
451}
452__setup("ramdisk_size=", ramdisk_size);
453#endif
454
455/*
456 * The device scheme is derived from loop.c. Keep them in synch where possible
457 * (should share code eventually).
458 */
459static LIST_HEAD(brd_devices);
460static DEFINE_MUTEX(brd_devices_mutex);
461
462static struct brd_device *brd_alloc(int i)
463{
464	struct brd_device *brd;
465	struct gendisk *disk;
466
467	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
468	if (!brd)
469		goto out;
470	brd->brd_number		= i;
471	spin_lock_init(&brd->brd_lock);
472	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
473
474	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
475	if (!brd->brd_queue)
476		goto out_free_dev;
 
477	blk_queue_make_request(brd->brd_queue, brd_make_request);
478	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
479	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
480
 
 
 
 
 
 
 
 
481	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
482	brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
483	brd->brd_queue->limits.discard_zeroes_data = 1;
484	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
485
486	disk = brd->brd_disk = alloc_disk(1 << part_shift);
 
 
487	if (!disk)
488		goto out_free_queue;
489	disk->major		= RAMDISK_MAJOR;
490	disk->first_minor	= i << part_shift;
491	disk->fops		= &brd_fops;
492	disk->private_data	= brd;
493	disk->queue		= brd->brd_queue;
494	disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
495	sprintf(disk->disk_name, "ram%d", i);
496	set_capacity(disk, rd_size * 2);
497
498	return brd;
499
500out_free_queue:
501	blk_cleanup_queue(brd->brd_queue);
502out_free_dev:
503	kfree(brd);
504out:
505	return NULL;
506}
507
508static void brd_free(struct brd_device *brd)
509{
510	put_disk(brd->brd_disk);
511	blk_cleanup_queue(brd->brd_queue);
512	brd_free_pages(brd);
513	kfree(brd);
514}
515
516static struct brd_device *brd_init_one(int i)
517{
518	struct brd_device *brd;
519
 
520	list_for_each_entry(brd, &brd_devices, brd_list) {
521		if (brd->brd_number == i)
522			goto out;
523	}
524
525	brd = brd_alloc(i);
526	if (brd) {
527		add_disk(brd->brd_disk);
528		list_add_tail(&brd->brd_list, &brd_devices);
529	}
 
530out:
531	return brd;
532}
533
534static void brd_del_one(struct brd_device *brd)
535{
536	list_del(&brd->brd_list);
537	del_gendisk(brd->brd_disk);
538	brd_free(brd);
539}
540
541static struct kobject *brd_probe(dev_t dev, int *part, void *data)
542{
543	struct brd_device *brd;
544	struct kobject *kobj;
 
545
546	mutex_lock(&brd_devices_mutex);
547	brd = brd_init_one(MINOR(dev) >> part_shift);
548	kobj = brd ? get_disk(brd->brd_disk) : NULL;
549	mutex_unlock(&brd_devices_mutex);
550
551	*part = 0;
 
 
552	return kobj;
553}
554
555static int __init brd_init(void)
556{
557	int i, nr;
558	unsigned long range;
559	struct brd_device *brd, *next;
 
560
561	/*
562	 * brd module now has a feature to instantiate underlying device
563	 * structure on-demand, provided that there is an access dev node.
564	 * However, this will not work well with user space tool that doesn't
565	 * know about such "feature".  In order to not break any existing
566	 * tool, we do the following:
567	 *
568	 * (1) if rd_nr is specified, create that many upfront, and this
569	 *     also becomes a hard limit.
570	 * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
571	 *     (default 16) rd device on module load, user can further
572	 *     extend brd device by create dev node themselves and have
573	 *     kernel automatically instantiate actual device on-demand.
 
 
 
574	 */
575
576	part_shift = 0;
577	if (max_part > 0) {
578		part_shift = fls(max_part);
579
580		/*
581		 * Adjust max_part according to part_shift as it is exported
582		 * to user space so that user can decide correct minor number
583		 * if [s]he want to create more devices.
584		 *
585		 * Note that -1 is required because partition 0 is reserved
586		 * for the whole disk.
587		 */
588		max_part = (1UL << part_shift) - 1;
589	}
590
591	if ((1UL << part_shift) > DISK_MAX_PARTS)
592		return -EINVAL;
593
594	if (rd_nr > 1UL << (MINORBITS - part_shift))
595		return -EINVAL;
596
597	if (rd_nr) {
598		nr = rd_nr;
599		range = rd_nr << part_shift;
600	} else {
601		nr = CONFIG_BLK_DEV_RAM_COUNT;
602		range = 1UL << MINORBITS;
603	}
604
605	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
606		return -EIO;
607
608	for (i = 0; i < nr; i++) {
 
 
 
609		brd = brd_alloc(i);
610		if (!brd)
611			goto out_free;
612		list_add_tail(&brd->brd_list, &brd_devices);
613	}
614
615	/* point of no return */
616
617	list_for_each_entry(brd, &brd_devices, brd_list)
618		add_disk(brd->brd_disk);
619
620	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
621				  THIS_MODULE, brd_probe, NULL, NULL);
622
623	printk(KERN_INFO "brd: module loaded\n");
624	return 0;
625
626out_free:
627	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
628		list_del(&brd->brd_list);
629		brd_free(brd);
630	}
631	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
632
 
633	return -ENOMEM;
634}
635
636static void __exit brd_exit(void)
637{
638	unsigned long range;
639	struct brd_device *brd, *next;
640
641	range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
642
643	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
644		brd_del_one(brd);
645
646	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
647	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 
 
648}
649
650module_init(brd_init);
651module_exit(brd_exit);
652
v4.10.11
  1/*
  2 * Ram backed block device driver.
  3 *
  4 * Copyright (C) 2007 Nick Piggin
  5 * Copyright (C) 2007 Novell Inc.
  6 *
  7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
  8 * of their respective owners.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/moduleparam.h>
 14#include <linux/major.h>
 15#include <linux/blkdev.h>
 16#include <linux/bio.h>
 17#include <linux/highmem.h>
 18#include <linux/mutex.h>
 19#include <linux/radix-tree.h>
 20#include <linux/fs.h>
 21#include <linux/slab.h>
 22#ifdef CONFIG_BLK_DEV_RAM_DAX
 23#include <linux/pfn_t.h>
 24#endif
 25
 26#include <linux/uaccess.h>
 27
 28#define SECTOR_SHIFT		9
 29#define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
 30#define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
 31
 32/*
 33 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
 34 * the pages containing the block device's contents. A brd page's ->index is
 35 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
 36 * with, the kernel's pagecache or buffer cache (which sit above our block
 37 * device).
 38 */
 39struct brd_device {
 40	int		brd_number;
 41
 42	struct request_queue	*brd_queue;
 43	struct gendisk		*brd_disk;
 44	struct list_head	brd_list;
 45
 46	/*
 47	 * Backing store of pages and lock to protect it. This is the contents
 48	 * of the block device.
 49	 */
 50	spinlock_t		brd_lock;
 51	struct radix_tree_root	brd_pages;
 52};
 53
 54/*
 55 * Look up and return a brd's page for a given sector.
 56 */
 57static DEFINE_MUTEX(brd_mutex);
 58static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
 59{
 60	pgoff_t idx;
 61	struct page *page;
 62
 63	/*
 64	 * The page lifetime is protected by the fact that we have opened the
 65	 * device node -- brd pages will never be deleted under us, so we
 66	 * don't need any further locking or refcounting.
 67	 *
 68	 * This is strictly true for the radix-tree nodes as well (ie. we
 69	 * don't actually need the rcu_read_lock()), however that is not a
 70	 * documented feature of the radix-tree API so it is better to be
 71	 * safe here (we don't have total exclusion from radix tree updates
 72	 * here, only deletes).
 73	 */
 74	rcu_read_lock();
 75	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
 76	page = radix_tree_lookup(&brd->brd_pages, idx);
 77	rcu_read_unlock();
 78
 79	BUG_ON(page && page->index != idx);
 80
 81	return page;
 82}
 83
 84/*
 85 * Look up and return a brd's page for a given sector.
 86 * If one does not exist, allocate an empty page, and insert that. Then
 87 * return it.
 88 */
 89static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 90{
 91	pgoff_t idx;
 92	struct page *page;
 93	gfp_t gfp_flags;
 94
 95	page = brd_lookup_page(brd, sector);
 96	if (page)
 97		return page;
 98
 99	/*
100	 * Must use NOIO because we don't want to recurse back into the
101	 * block or filesystem layers from page reclaim.
102	 *
103	 * Cannot support DAX and highmem, because our ->direct_access
104	 * routine for DAX must return memory that is always addressable.
105	 * If DAX was reworked to use pfns and kmap throughout, this
106	 * restriction might be able to be lifted.
107	 */
108	gfp_flags = GFP_NOIO | __GFP_ZERO;
109#ifndef CONFIG_BLK_DEV_RAM_DAX
110	gfp_flags |= __GFP_HIGHMEM;
111#endif
112	page = alloc_page(gfp_flags);
113	if (!page)
114		return NULL;
115
116	if (radix_tree_preload(GFP_NOIO)) {
117		__free_page(page);
118		return NULL;
119	}
120
121	spin_lock(&brd->brd_lock);
122	idx = sector >> PAGE_SECTORS_SHIFT;
123	page->index = idx;
124	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
125		__free_page(page);
126		page = radix_tree_lookup(&brd->brd_pages, idx);
127		BUG_ON(!page);
128		BUG_ON(page->index != idx);
129	}
130	spin_unlock(&brd->brd_lock);
131
132	radix_tree_preload_end();
133
134	return page;
135}
136
137static void brd_free_page(struct brd_device *brd, sector_t sector)
138{
139	struct page *page;
140	pgoff_t idx;
141
142	spin_lock(&brd->brd_lock);
143	idx = sector >> PAGE_SECTORS_SHIFT;
144	page = radix_tree_delete(&brd->brd_pages, idx);
145	spin_unlock(&brd->brd_lock);
146	if (page)
147		__free_page(page);
148}
149
150static void brd_zero_page(struct brd_device *brd, sector_t sector)
151{
152	struct page *page;
153
154	page = brd_lookup_page(brd, sector);
155	if (page)
156		clear_highpage(page);
157}
158
159/*
160 * Free all backing store pages and radix tree. This must only be called when
161 * there are no other users of the device.
162 */
163#define FREE_BATCH 16
164static void brd_free_pages(struct brd_device *brd)
165{
166	unsigned long pos = 0;
167	struct page *pages[FREE_BATCH];
168	int nr_pages;
169
170	do {
171		int i;
172
173		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
174				(void **)pages, pos, FREE_BATCH);
175
176		for (i = 0; i < nr_pages; i++) {
177			void *ret;
178
179			BUG_ON(pages[i]->index < pos);
180			pos = pages[i]->index;
181			ret = radix_tree_delete(&brd->brd_pages, pos);
182			BUG_ON(!ret || ret != pages[i]);
183			__free_page(pages[i]);
184		}
185
186		pos++;
187
188		/*
189		 * This assumes radix_tree_gang_lookup always returns as
190		 * many pages as possible. If the radix-tree code changes,
191		 * so will this have to.
192		 */
193	} while (nr_pages == FREE_BATCH);
194}
195
196/*
197 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
198 */
199static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
200{
201	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
202	size_t copy;
203
204	copy = min_t(size_t, n, PAGE_SIZE - offset);
205	if (!brd_insert_page(brd, sector))
206		return -ENOSPC;
207	if (copy < n) {
208		sector += copy >> SECTOR_SHIFT;
209		if (!brd_insert_page(brd, sector))
210			return -ENOSPC;
211	}
212	return 0;
213}
214
215static void discard_from_brd(struct brd_device *brd,
216			sector_t sector, size_t n)
217{
218	while (n >= PAGE_SIZE) {
219		/*
220		 * Don't want to actually discard pages here because
221		 * re-allocating the pages can result in writeback
222		 * deadlocks under heavy load.
223		 */
224		if (0)
225			brd_free_page(brd, sector);
226		else
227			brd_zero_page(brd, sector);
228		sector += PAGE_SIZE >> SECTOR_SHIFT;
229		n -= PAGE_SIZE;
230	}
231}
232
233/*
234 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 */
236static void copy_to_brd(struct brd_device *brd, const void *src,
237			sector_t sector, size_t n)
238{
239	struct page *page;
240	void *dst;
241	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
242	size_t copy;
243
244	copy = min_t(size_t, n, PAGE_SIZE - offset);
245	page = brd_lookup_page(brd, sector);
246	BUG_ON(!page);
247
248	dst = kmap_atomic(page);
249	memcpy(dst + offset, src, copy);
250	kunmap_atomic(dst);
251
252	if (copy < n) {
253		src += copy;
254		sector += copy >> SECTOR_SHIFT;
255		copy = n - copy;
256		page = brd_lookup_page(brd, sector);
257		BUG_ON(!page);
258
259		dst = kmap_atomic(page);
260		memcpy(dst, src, copy);
261		kunmap_atomic(dst);
262	}
263}
264
265/*
266 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
267 */
268static void copy_from_brd(void *dst, struct brd_device *brd,
269			sector_t sector, size_t n)
270{
271	struct page *page;
272	void *src;
273	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
274	size_t copy;
275
276	copy = min_t(size_t, n, PAGE_SIZE - offset);
277	page = brd_lookup_page(brd, sector);
278	if (page) {
279		src = kmap_atomic(page);
280		memcpy(dst, src + offset, copy);
281		kunmap_atomic(src);
282	} else
283		memset(dst, 0, copy);
284
285	if (copy < n) {
286		dst += copy;
287		sector += copy >> SECTOR_SHIFT;
288		copy = n - copy;
289		page = brd_lookup_page(brd, sector);
290		if (page) {
291			src = kmap_atomic(page);
292			memcpy(dst, src, copy);
293			kunmap_atomic(src);
294		} else
295			memset(dst, 0, copy);
296	}
297}
298
299/*
300 * Process a single bvec of a bio.
301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page,
303			unsigned int len, unsigned int off, bool is_write,
304			sector_t sector)
305{
306	void *mem;
307	int err = 0;
308
309	if (is_write) {
310		err = copy_to_brd_setup(brd, sector, len);
311		if (err)
312			goto out;
313	}
314
315	mem = kmap_atomic(page);
316	if (!is_write) {
317		copy_from_brd(mem + off, brd, sector, len);
318		flush_dcache_page(page);
319	} else {
320		flush_dcache_page(page);
321		copy_to_brd(brd, mem + off, sector, len);
322	}
323	kunmap_atomic(mem);
324
325out:
326	return err;
327}
328
329static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
330{
331	struct block_device *bdev = bio->bi_bdev;
332	struct brd_device *brd = bdev->bd_disk->private_data;
 
333	struct bio_vec bvec;
334	sector_t sector;
335	struct bvec_iter iter;
 
336
337	sector = bio->bi_iter.bi_sector;
338	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
339		goto io_error;
340
341	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
342		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
343		    bio->bi_iter.bi_size & ~PAGE_MASK)
344			goto io_error;
345		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
346		goto out;
347	}
348
 
 
 
 
349	bio_for_each_segment(bvec, bio, iter) {
350		unsigned int len = bvec.bv_len;
351		int err;
352
353		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
354					op_is_write(bio_op(bio)), sector);
355		if (err)
356			goto io_error;
357		sector += len >> SECTOR_SHIFT;
358	}
359
360out:
361	bio_endio(bio);
362	return BLK_QC_T_NONE;
363io_error:
364	bio_io_error(bio);
365	return BLK_QC_T_NONE;
366}
367
368static int brd_rw_page(struct block_device *bdev, sector_t sector,
369		       struct page *page, bool is_write)
370{
371	struct brd_device *brd = bdev->bd_disk->private_data;
372	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
373	page_endio(page, is_write, err);
374	return err;
375}
376
377#ifdef CONFIG_BLK_DEV_RAM_DAX
378static long brd_direct_access(struct block_device *bdev, sector_t sector,
379			void **kaddr, pfn_t *pfn, long size)
380{
381	struct brd_device *brd = bdev->bd_disk->private_data;
382	struct page *page;
383
384	if (!brd)
385		return -ENODEV;
 
 
 
 
386	page = brd_insert_page(brd, sector);
387	if (!page)
388		return -ENOSPC;
389	*kaddr = page_address(page);
390	*pfn = page_to_pfn_t(page);
391
392	return PAGE_SIZE;
393}
394#else
395#define brd_direct_access NULL
396#endif
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398static const struct block_device_operations brd_fops = {
399	.owner =		THIS_MODULE,
400	.rw_page =		brd_rw_page,
 
401	.direct_access =	brd_direct_access,
 
402};
403
404/*
405 * And now the modules code and kernel interface.
406 */
407static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
 
 
 
408module_param(rd_nr, int, S_IRUGO);
409MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
410
411unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
412module_param(rd_size, ulong, S_IRUGO);
413MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
414
415static int max_part = 1;
416module_param(max_part, int, S_IRUGO);
417MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
418
419MODULE_LICENSE("GPL");
420MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
421MODULE_ALIAS("rd");
422
423#ifndef MODULE
424/* Legacy boot options - nonmodular */
425static int __init ramdisk_size(char *str)
426{
427	rd_size = simple_strtol(str, NULL, 0);
428	return 1;
429}
430__setup("ramdisk_size=", ramdisk_size);
431#endif
432
433/*
434 * The device scheme is derived from loop.c. Keep them in synch where possible
435 * (should share code eventually).
436 */
437static LIST_HEAD(brd_devices);
438static DEFINE_MUTEX(brd_devices_mutex);
439
440static struct brd_device *brd_alloc(int i)
441{
442	struct brd_device *brd;
443	struct gendisk *disk;
444
445	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
446	if (!brd)
447		goto out;
448	brd->brd_number		= i;
449	spin_lock_init(&brd->brd_lock);
450	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
451
452	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
453	if (!brd->brd_queue)
454		goto out_free_dev;
455
456	blk_queue_make_request(brd->brd_queue, brd_make_request);
457	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
458	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
459
460	/* This is so fdisk will align partitions on 4k, because of
461	 * direct_access API needing 4k alignment, returning a PFN
462	 * (This is only a problem on very small devices <= 4M,
463	 *  otherwise fdisk will align on 1M. Regardless this call
464	 *  is harmless)
465	 */
466	blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
467
468	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
469	blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
470	brd->brd_queue->limits.discard_zeroes_data = 1;
471	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
472#ifdef CONFIG_BLK_DEV_RAM_DAX
473	queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
474#endif
475	disk = brd->brd_disk = alloc_disk(max_part);
476	if (!disk)
477		goto out_free_queue;
478	disk->major		= RAMDISK_MAJOR;
479	disk->first_minor	= i * max_part;
480	disk->fops		= &brd_fops;
481	disk->private_data	= brd;
482	disk->queue		= brd->brd_queue;
483	disk->flags		= GENHD_FL_EXT_DEVT;
484	sprintf(disk->disk_name, "ram%d", i);
485	set_capacity(disk, rd_size * 2);
486
487	return brd;
488
489out_free_queue:
490	blk_cleanup_queue(brd->brd_queue);
491out_free_dev:
492	kfree(brd);
493out:
494	return NULL;
495}
496
497static void brd_free(struct brd_device *brd)
498{
499	put_disk(brd->brd_disk);
500	blk_cleanup_queue(brd->brd_queue);
501	brd_free_pages(brd);
502	kfree(brd);
503}
504
505static struct brd_device *brd_init_one(int i, bool *new)
506{
507	struct brd_device *brd;
508
509	*new = false;
510	list_for_each_entry(brd, &brd_devices, brd_list) {
511		if (brd->brd_number == i)
512			goto out;
513	}
514
515	brd = brd_alloc(i);
516	if (brd) {
517		add_disk(brd->brd_disk);
518		list_add_tail(&brd->brd_list, &brd_devices);
519	}
520	*new = true;
521out:
522	return brd;
523}
524
525static void brd_del_one(struct brd_device *brd)
526{
527	list_del(&brd->brd_list);
528	del_gendisk(brd->brd_disk);
529	brd_free(brd);
530}
531
532static struct kobject *brd_probe(dev_t dev, int *part, void *data)
533{
534	struct brd_device *brd;
535	struct kobject *kobj;
536	bool new;
537
538	mutex_lock(&brd_devices_mutex);
539	brd = brd_init_one(MINOR(dev) / max_part, &new);
540	kobj = brd ? get_disk(brd->brd_disk) : NULL;
541	mutex_unlock(&brd_devices_mutex);
542
543	if (new)
544		*part = 0;
545
546	return kobj;
547}
548
549static int __init brd_init(void)
550{
 
 
551	struct brd_device *brd, *next;
552	int i;
553
554	/*
555	 * brd module now has a feature to instantiate underlying device
556	 * structure on-demand, provided that there is an access dev node.
 
 
 
557	 *
558	 * (1) if rd_nr is specified, create that many upfront. else
559	 *     it defaults to CONFIG_BLK_DEV_RAM_COUNT
560	 * (2) User can further extend brd devices by create dev node themselves
561	 *     and have kernel automatically instantiate actual device
562	 *     on-demand. Example:
563	 *		mknod /path/devnod_name b 1 X	# 1 is the rd major
564	 *		fdisk -l /path/devnod_name
565	 *	If (X / max_part) was not already created it will be created
566	 *	dynamically.
567	 */
568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
570		return -EIO;
571
572	if (unlikely(!max_part))
573		max_part = 1;
574
575	for (i = 0; i < rd_nr; i++) {
576		brd = brd_alloc(i);
577		if (!brd)
578			goto out_free;
579		list_add_tail(&brd->brd_list, &brd_devices);
580	}
581
582	/* point of no return */
583
584	list_for_each_entry(brd, &brd_devices, brd_list)
585		add_disk(brd->brd_disk);
586
587	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
588				  THIS_MODULE, brd_probe, NULL, NULL);
589
590	pr_info("brd: module loaded\n");
591	return 0;
592
593out_free:
594	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
595		list_del(&brd->brd_list);
596		brd_free(brd);
597	}
598	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
599
600	pr_info("brd: module NOT loaded !!!\n");
601	return -ENOMEM;
602}
603
604static void __exit brd_exit(void)
605{
 
606	struct brd_device *brd, *next;
607
 
 
608	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
609		brd_del_one(brd);
610
611	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
612	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
613
614	pr_info("brd: module unloaded\n");
615}
616
617module_init(brd_init);
618module_exit(brd_exit);
619