Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Ram backed block device driver.
  3 *
  4 * Copyright (C) 2007 Nick Piggin
  5 * Copyright (C) 2007 Novell Inc.
  6 *
  7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
  8 * of their respective owners.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/moduleparam.h>
 14#include <linux/major.h>
 15#include <linux/blkdev.h>
 16#include <linux/bio.h>
 17#include <linux/highmem.h>
 18#include <linux/mutex.h>
 19#include <linux/radix-tree.h>
 20#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
 21#include <linux/slab.h>
 
 
 
 22
 23#include <asm/uaccess.h>
 24
 25#define SECTOR_SHIFT		9
 26#define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
 27#define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
 28
 29/*
 30 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
 31 * the pages containing the block device's contents. A brd page's ->index is
 32 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
 33 * with, the kernel's pagecache or buffer cache (which sit above our block
 34 * device).
 35 */
 36struct brd_device {
 37	int		brd_number;
 38
 39	struct request_queue	*brd_queue;
 40	struct gendisk		*brd_disk;
 41	struct list_head	brd_list;
 42
 43	/*
 44	 * Backing store of pages and lock to protect it. This is the contents
 45	 * of the block device.
 46	 */
 47	spinlock_t		brd_lock;
 48	struct radix_tree_root	brd_pages;
 49};
 50
 51/*
 52 * Look up and return a brd's page for a given sector.
 53 */
 54static DEFINE_MUTEX(brd_mutex);
 55static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
 56{
 57	pgoff_t idx;
 58	struct page *page;
 59
 60	/*
 61	 * The page lifetime is protected by the fact that we have opened the
 62	 * device node -- brd pages will never be deleted under us, so we
 63	 * don't need any further locking or refcounting.
 64	 *
 65	 * This is strictly true for the radix-tree nodes as well (ie. we
 66	 * don't actually need the rcu_read_lock()), however that is not a
 67	 * documented feature of the radix-tree API so it is better to be
 68	 * safe here (we don't have total exclusion from radix tree updates
 69	 * here, only deletes).
 70	 */
 71	rcu_read_lock();
 72	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
 73	page = radix_tree_lookup(&brd->brd_pages, idx);
 74	rcu_read_unlock();
 75
 76	BUG_ON(page && page->index != idx);
 77
 78	return page;
 79}
 80
 81/*
 82 * Look up and return a brd's page for a given sector.
 83 * If one does not exist, allocate an empty page, and insert that. Then
 84 * return it.
 85 */
 86static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 87{
 88	pgoff_t idx;
 89	struct page *page;
 90	gfp_t gfp_flags;
 91
 92	page = brd_lookup_page(brd, sector);
 93	if (page)
 94		return page;
 95
 96	/*
 97	 * Must use NOIO because we don't want to recurse back into the
 98	 * block or filesystem layers from page reclaim.
 99	 *
100	 * Cannot support XIP and highmem, because our ->direct_access
101	 * routine for XIP must return memory that is always addressable.
102	 * If XIP was reworked to use pfns and kmap throughout, this
103	 * restriction might be able to be lifted.
104	 */
105	gfp_flags = GFP_NOIO | __GFP_ZERO;
106#ifndef CONFIG_BLK_DEV_XIP
107	gfp_flags |= __GFP_HIGHMEM;
108#endif
109	page = alloc_page(gfp_flags);
110	if (!page)
111		return NULL;
112
113	if (radix_tree_preload(GFP_NOIO)) {
114		__free_page(page);
115		return NULL;
116	}
117
118	spin_lock(&brd->brd_lock);
119	idx = sector >> PAGE_SECTORS_SHIFT;
 
120	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
121		__free_page(page);
122		page = radix_tree_lookup(&brd->brd_pages, idx);
123		BUG_ON(!page);
124		BUG_ON(page->index != idx);
125	} else
126		page->index = idx;
127	spin_unlock(&brd->brd_lock);
128
129	radix_tree_preload_end();
130
131	return page;
132}
133
134static void brd_free_page(struct brd_device *brd, sector_t sector)
135{
136	struct page *page;
137	pgoff_t idx;
138
139	spin_lock(&brd->brd_lock);
140	idx = sector >> PAGE_SECTORS_SHIFT;
141	page = radix_tree_delete(&brd->brd_pages, idx);
142	spin_unlock(&brd->brd_lock);
143	if (page)
144		__free_page(page);
145}
146
147static void brd_zero_page(struct brd_device *brd, sector_t sector)
148{
149	struct page *page;
150
151	page = brd_lookup_page(brd, sector);
152	if (page)
153		clear_highpage(page);
154}
155
156/*
157 * Free all backing store pages and radix tree. This must only be called when
158 * there are no other users of the device.
159 */
160#define FREE_BATCH 16
161static void brd_free_pages(struct brd_device *brd)
162{
163	unsigned long pos = 0;
164	struct page *pages[FREE_BATCH];
165	int nr_pages;
166
167	do {
168		int i;
169
170		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
171				(void **)pages, pos, FREE_BATCH);
172
173		for (i = 0; i < nr_pages; i++) {
174			void *ret;
175
176			BUG_ON(pages[i]->index < pos);
177			pos = pages[i]->index;
178			ret = radix_tree_delete(&brd->brd_pages, pos);
179			BUG_ON(!ret || ret != pages[i]);
180			__free_page(pages[i]);
181		}
182
183		pos++;
184
185		/*
186		 * This assumes radix_tree_gang_lookup always returns as
187		 * many pages as possible. If the radix-tree code changes,
188		 * so will this have to.
189		 */
190	} while (nr_pages == FREE_BATCH);
191}
192
193/*
194 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
195 */
196static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
197{
198	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
199	size_t copy;
200
201	copy = min_t(size_t, n, PAGE_SIZE - offset);
202	if (!brd_insert_page(brd, sector))
203		return -ENOMEM;
204	if (copy < n) {
205		sector += copy >> SECTOR_SHIFT;
206		if (!brd_insert_page(brd, sector))
207			return -ENOMEM;
208	}
209	return 0;
210}
211
212static void discard_from_brd(struct brd_device *brd,
213			sector_t sector, size_t n)
214{
215	while (n >= PAGE_SIZE) {
216		/*
217		 * Don't want to actually discard pages here because
218		 * re-allocating the pages can result in writeback
219		 * deadlocks under heavy load.
220		 */
221		if (0)
222			brd_free_page(brd, sector);
223		else
224			brd_zero_page(brd, sector);
225		sector += PAGE_SIZE >> SECTOR_SHIFT;
226		n -= PAGE_SIZE;
227	}
228}
229
230/*
231 * Copy n bytes from src to the brd starting at sector. Does not sleep.
232 */
233static void copy_to_brd(struct brd_device *brd, const void *src,
234			sector_t sector, size_t n)
235{
236	struct page *page;
237	void *dst;
238	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
239	size_t copy;
240
241	copy = min_t(size_t, n, PAGE_SIZE - offset);
242	page = brd_lookup_page(brd, sector);
243	BUG_ON(!page);
244
245	dst = kmap_atomic(page, KM_USER1);
246	memcpy(dst + offset, src, copy);
247	kunmap_atomic(dst, KM_USER1);
248
249	if (copy < n) {
250		src += copy;
251		sector += copy >> SECTOR_SHIFT;
252		copy = n - copy;
253		page = brd_lookup_page(brd, sector);
254		BUG_ON(!page);
255
256		dst = kmap_atomic(page, KM_USER1);
257		memcpy(dst, src, copy);
258		kunmap_atomic(dst, KM_USER1);
259	}
260}
261
262/*
263 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
264 */
265static void copy_from_brd(void *dst, struct brd_device *brd,
266			sector_t sector, size_t n)
267{
268	struct page *page;
269	void *src;
270	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
271	size_t copy;
272
273	copy = min_t(size_t, n, PAGE_SIZE - offset);
274	page = brd_lookup_page(brd, sector);
275	if (page) {
276		src = kmap_atomic(page, KM_USER1);
277		memcpy(dst, src + offset, copy);
278		kunmap_atomic(src, KM_USER1);
279	} else
280		memset(dst, 0, copy);
281
282	if (copy < n) {
283		dst += copy;
284		sector += copy >> SECTOR_SHIFT;
285		copy = n - copy;
286		page = brd_lookup_page(brd, sector);
287		if (page) {
288			src = kmap_atomic(page, KM_USER1);
289			memcpy(dst, src, copy);
290			kunmap_atomic(src, KM_USER1);
291		} else
292			memset(dst, 0, copy);
293	}
294}
295
296/*
297 * Process a single bvec of a bio.
298 */
299static int brd_do_bvec(struct brd_device *brd, struct page *page,
300			unsigned int len, unsigned int off, int rw,
301			sector_t sector)
302{
303	void *mem;
304	int err = 0;
305
306	if (rw != READ) {
307		err = copy_to_brd_setup(brd, sector, len);
308		if (err)
309			goto out;
310	}
311
312	mem = kmap_atomic(page, KM_USER0);
313	if (rw == READ) {
314		copy_from_brd(mem + off, brd, sector, len);
315		flush_dcache_page(page);
316	} else {
317		flush_dcache_page(page);
318		copy_to_brd(brd, mem + off, sector, len);
319	}
320	kunmap_atomic(mem, KM_USER0);
321
322out:
323	return err;
324}
325
326static int brd_make_request(struct request_queue *q, struct bio *bio)
327{
328	struct block_device *bdev = bio->bi_bdev;
329	struct brd_device *brd = bdev->bd_disk->private_data;
330	int rw;
331	struct bio_vec *bvec;
332	sector_t sector;
333	int i;
334	int err = -EIO;
335
336	sector = bio->bi_sector;
337	if (sector + (bio->bi_size >> SECTOR_SHIFT) >
338						get_capacity(bdev->bd_disk))
339		goto out;
340
341	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
342		err = 0;
343		discard_from_brd(brd, sector, bio->bi_size);
 
 
344		goto out;
345	}
346
347	rw = bio_rw(bio);
348	if (rw == READA)
349		rw = READ;
350
351	bio_for_each_segment(bvec, bio, i) {
352		unsigned int len = bvec->bv_len;
353		err = brd_do_bvec(brd, bvec->bv_page, len,
354					bvec->bv_offset, rw, sector);
 
 
355		if (err)
356			break;
357		sector += len >> SECTOR_SHIFT;
358	}
359
360out:
361	bio_endio(bio, err);
 
 
 
 
 
362
363	return 0;
 
 
 
 
 
 
364}
365
366#ifdef CONFIG_BLK_DEV_XIP
367static int brd_direct_access(struct block_device *bdev, sector_t sector,
368			void **kaddr, unsigned long *pfn)
369{
370	struct brd_device *brd = bdev->bd_disk->private_data;
371	struct page *page;
372
373	if (!brd)
374		return -ENODEV;
375	if (sector & (PAGE_SECTORS-1))
376		return -EINVAL;
377	if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
378		return -ERANGE;
379	page = brd_insert_page(brd, sector);
380	if (!page)
381		return -ENOMEM;
382	*kaddr = page_address(page);
383	*pfn = page_to_pfn(page);
384
385	return 0;
386}
 
 
387#endif
388
389static int brd_ioctl(struct block_device *bdev, fmode_t mode,
390			unsigned int cmd, unsigned long arg)
391{
392	int error;
393	struct brd_device *brd = bdev->bd_disk->private_data;
394
395	if (cmd != BLKFLSBUF)
396		return -ENOTTY;
397
398	/*
399	 * ram device BLKFLSBUF has special semantics, we want to actually
400	 * release and destroy the ramdisk data.
401	 */
402	mutex_lock(&brd_mutex);
403	mutex_lock(&bdev->bd_mutex);
404	error = -EBUSY;
405	if (bdev->bd_openers <= 1) {
406		/*
407		 * Invalidate the cache first, so it isn't written
408		 * back to the device.
409		 *
410		 * Another thread might instantiate more buffercache here,
411		 * but there is not much we can do to close that race.
412		 */
413		invalidate_bh_lrus();
414		truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
415		brd_free_pages(brd);
416		error = 0;
417	}
418	mutex_unlock(&bdev->bd_mutex);
419	mutex_unlock(&brd_mutex);
420
421	return error;
422}
423
424static const struct block_device_operations brd_fops = {
425	.owner =		THIS_MODULE,
 
426	.ioctl =		brd_ioctl,
427#ifdef CONFIG_BLK_DEV_XIP
428	.direct_access =	brd_direct_access,
429#endif
430};
431
432/*
433 * And now the modules code and kernel interface.
434 */
435static int rd_nr;
436int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
437static int max_part;
438static int part_shift;
439module_param(rd_nr, int, S_IRUGO);
440MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
 
 
441module_param(rd_size, int, S_IRUGO);
442MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
 
 
443module_param(max_part, int, S_IRUGO);
444MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
 
445MODULE_LICENSE("GPL");
446MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
447MODULE_ALIAS("rd");
448
449#ifndef MODULE
450/* Legacy boot options - nonmodular */
451static int __init ramdisk_size(char *str)
452{
453	rd_size = simple_strtol(str, NULL, 0);
454	return 1;
455}
456__setup("ramdisk_size=", ramdisk_size);
457#endif
458
459/*
460 * The device scheme is derived from loop.c. Keep them in synch where possible
461 * (should share code eventually).
462 */
463static LIST_HEAD(brd_devices);
464static DEFINE_MUTEX(brd_devices_mutex);
465
466static struct brd_device *brd_alloc(int i)
467{
468	struct brd_device *brd;
469	struct gendisk *disk;
470
471	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
472	if (!brd)
473		goto out;
474	brd->brd_number		= i;
475	spin_lock_init(&brd->brd_lock);
476	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
477
478	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
479	if (!brd->brd_queue)
480		goto out_free_dev;
 
481	blk_queue_make_request(brd->brd_queue, brd_make_request);
482	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
483	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
484
 
 
 
 
 
 
 
 
485	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
486	brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
487	brd->brd_queue->limits.discard_zeroes_data = 1;
488	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
489
490	disk = brd->brd_disk = alloc_disk(1 << part_shift);
491	if (!disk)
492		goto out_free_queue;
493	disk->major		= RAMDISK_MAJOR;
494	disk->first_minor	= i << part_shift;
495	disk->fops		= &brd_fops;
496	disk->private_data	= brd;
497	disk->queue		= brd->brd_queue;
498	disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
499	sprintf(disk->disk_name, "ram%d", i);
500	set_capacity(disk, rd_size * 2);
501
502	return brd;
503
504out_free_queue:
505	blk_cleanup_queue(brd->brd_queue);
506out_free_dev:
507	kfree(brd);
508out:
509	return NULL;
510}
511
512static void brd_free(struct brd_device *brd)
513{
514	put_disk(brd->brd_disk);
515	blk_cleanup_queue(brd->brd_queue);
516	brd_free_pages(brd);
517	kfree(brd);
518}
519
520static struct brd_device *brd_init_one(int i)
521{
522	struct brd_device *brd;
523
 
524	list_for_each_entry(brd, &brd_devices, brd_list) {
525		if (brd->brd_number == i)
526			goto out;
527	}
528
529	brd = brd_alloc(i);
530	if (brd) {
531		add_disk(brd->brd_disk);
532		list_add_tail(&brd->brd_list, &brd_devices);
533	}
 
534out:
535	return brd;
536}
537
538static void brd_del_one(struct brd_device *brd)
539{
540	list_del(&brd->brd_list);
541	del_gendisk(brd->brd_disk);
542	brd_free(brd);
543}
544
545static struct kobject *brd_probe(dev_t dev, int *part, void *data)
546{
547	struct brd_device *brd;
548	struct kobject *kobj;
 
549
550	mutex_lock(&brd_devices_mutex);
551	brd = brd_init_one(MINOR(dev) >> part_shift);
552	kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
553	mutex_unlock(&brd_devices_mutex);
554
555	*part = 0;
 
 
556	return kobj;
557}
558
559static int __init brd_init(void)
560{
561	int i, nr;
562	unsigned long range;
563	struct brd_device *brd, *next;
 
564
565	/*
566	 * brd module now has a feature to instantiate underlying device
567	 * structure on-demand, provided that there is an access dev node.
568	 * However, this will not work well with user space tool that doesn't
569	 * know about such "feature".  In order to not break any existing
570	 * tool, we do the following:
571	 *
572	 * (1) if rd_nr is specified, create that many upfront, and this
573	 *     also becomes a hard limit.
574	 * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
575	 *     (default 16) rd device on module load, user can further
576	 *     extend brd device by create dev node themselves and have
577	 *     kernel automatically instantiate actual device on-demand.
 
 
 
578	 */
579
580	part_shift = 0;
581	if (max_part > 0) {
582		part_shift = fls(max_part);
583
584		/*
585		 * Adjust max_part according to part_shift as it is exported
586		 * to user space so that user can decide correct minor number
587		 * if [s]he want to create more devices.
588		 *
589		 * Note that -1 is required because partition 0 is reserved
590		 * for the whole disk.
591		 */
592		max_part = (1UL << part_shift) - 1;
593	}
594
595	if ((1UL << part_shift) > DISK_MAX_PARTS)
596		return -EINVAL;
597
598	if (rd_nr > 1UL << (MINORBITS - part_shift))
599		return -EINVAL;
600
601	if (rd_nr) {
602		nr = rd_nr;
603		range = rd_nr << part_shift;
604	} else {
605		nr = CONFIG_BLK_DEV_RAM_COUNT;
606		range = 1UL << MINORBITS;
607	}
608
609	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
610		return -EIO;
611
612	for (i = 0; i < nr; i++) {
 
 
 
613		brd = brd_alloc(i);
614		if (!brd)
615			goto out_free;
616		list_add_tail(&brd->brd_list, &brd_devices);
617	}
618
619	/* point of no return */
620
621	list_for_each_entry(brd, &brd_devices, brd_list)
622		add_disk(brd->brd_disk);
623
624	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
625				  THIS_MODULE, brd_probe, NULL, NULL);
626
627	printk(KERN_INFO "brd: module loaded\n");
628	return 0;
629
630out_free:
631	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
632		list_del(&brd->brd_list);
633		brd_free(brd);
634	}
635	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
636
 
637	return -ENOMEM;
638}
639
640static void __exit brd_exit(void)
641{
642	unsigned long range;
643	struct brd_device *brd, *next;
644
645	range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
646
647	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
648		brd_del_one(brd);
649
650	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
651	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 
 
652}
653
654module_init(brd_init);
655module_exit(brd_exit);
656
v4.6
  1/*
  2 * Ram backed block device driver.
  3 *
  4 * Copyright (C) 2007 Nick Piggin
  5 * Copyright (C) 2007 Novell Inc.
  6 *
  7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
  8 * of their respective owners.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/moduleparam.h>
 14#include <linux/major.h>
 15#include <linux/blkdev.h>
 16#include <linux/bio.h>
 17#include <linux/highmem.h>
 18#include <linux/mutex.h>
 19#include <linux/radix-tree.h>
 20#include <linux/fs.h>
 21#include <linux/slab.h>
 22#ifdef CONFIG_BLK_DEV_RAM_DAX
 23#include <linux/pfn_t.h>
 24#endif
 25
 26#include <asm/uaccess.h>
 27
 28#define SECTOR_SHIFT		9
 29#define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
 30#define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
 31
 32/*
 33 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
 34 * the pages containing the block device's contents. A brd page's ->index is
 35 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
 36 * with, the kernel's pagecache or buffer cache (which sit above our block
 37 * device).
 38 */
 39struct brd_device {
 40	int		brd_number;
 41
 42	struct request_queue	*brd_queue;
 43	struct gendisk		*brd_disk;
 44	struct list_head	brd_list;
 45
 46	/*
 47	 * Backing store of pages and lock to protect it. This is the contents
 48	 * of the block device.
 49	 */
 50	spinlock_t		brd_lock;
 51	struct radix_tree_root	brd_pages;
 52};
 53
 54/*
 55 * Look up and return a brd's page for a given sector.
 56 */
 57static DEFINE_MUTEX(brd_mutex);
 58static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
 59{
 60	pgoff_t idx;
 61	struct page *page;
 62
 63	/*
 64	 * The page lifetime is protected by the fact that we have opened the
 65	 * device node -- brd pages will never be deleted under us, so we
 66	 * don't need any further locking or refcounting.
 67	 *
 68	 * This is strictly true for the radix-tree nodes as well (ie. we
 69	 * don't actually need the rcu_read_lock()), however that is not a
 70	 * documented feature of the radix-tree API so it is better to be
 71	 * safe here (we don't have total exclusion from radix tree updates
 72	 * here, only deletes).
 73	 */
 74	rcu_read_lock();
 75	idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
 76	page = radix_tree_lookup(&brd->brd_pages, idx);
 77	rcu_read_unlock();
 78
 79	BUG_ON(page && page->index != idx);
 80
 81	return page;
 82}
 83
 84/*
 85 * Look up and return a brd's page for a given sector.
 86 * If one does not exist, allocate an empty page, and insert that. Then
 87 * return it.
 88 */
 89static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 90{
 91	pgoff_t idx;
 92	struct page *page;
 93	gfp_t gfp_flags;
 94
 95	page = brd_lookup_page(brd, sector);
 96	if (page)
 97		return page;
 98
 99	/*
100	 * Must use NOIO because we don't want to recurse back into the
101	 * block or filesystem layers from page reclaim.
102	 *
103	 * Cannot support DAX and highmem, because our ->direct_access
104	 * routine for DAX must return memory that is always addressable.
105	 * If DAX was reworked to use pfns and kmap throughout, this
106	 * restriction might be able to be lifted.
107	 */
108	gfp_flags = GFP_NOIO | __GFP_ZERO;
109#ifndef CONFIG_BLK_DEV_RAM_DAX
110	gfp_flags |= __GFP_HIGHMEM;
111#endif
112	page = alloc_page(gfp_flags);
113	if (!page)
114		return NULL;
115
116	if (radix_tree_preload(GFP_NOIO)) {
117		__free_page(page);
118		return NULL;
119	}
120
121	spin_lock(&brd->brd_lock);
122	idx = sector >> PAGE_SECTORS_SHIFT;
123	page->index = idx;
124	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
125		__free_page(page);
126		page = radix_tree_lookup(&brd->brd_pages, idx);
127		BUG_ON(!page);
128		BUG_ON(page->index != idx);
129	}
 
130	spin_unlock(&brd->brd_lock);
131
132	radix_tree_preload_end();
133
134	return page;
135}
136
137static void brd_free_page(struct brd_device *brd, sector_t sector)
138{
139	struct page *page;
140	pgoff_t idx;
141
142	spin_lock(&brd->brd_lock);
143	idx = sector >> PAGE_SECTORS_SHIFT;
144	page = radix_tree_delete(&brd->brd_pages, idx);
145	spin_unlock(&brd->brd_lock);
146	if (page)
147		__free_page(page);
148}
149
150static void brd_zero_page(struct brd_device *brd, sector_t sector)
151{
152	struct page *page;
153
154	page = brd_lookup_page(brd, sector);
155	if (page)
156		clear_highpage(page);
157}
158
159/*
160 * Free all backing store pages and radix tree. This must only be called when
161 * there are no other users of the device.
162 */
163#define FREE_BATCH 16
164static void brd_free_pages(struct brd_device *brd)
165{
166	unsigned long pos = 0;
167	struct page *pages[FREE_BATCH];
168	int nr_pages;
169
170	do {
171		int i;
172
173		nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
174				(void **)pages, pos, FREE_BATCH);
175
176		for (i = 0; i < nr_pages; i++) {
177			void *ret;
178
179			BUG_ON(pages[i]->index < pos);
180			pos = pages[i]->index;
181			ret = radix_tree_delete(&brd->brd_pages, pos);
182			BUG_ON(!ret || ret != pages[i]);
183			__free_page(pages[i]);
184		}
185
186		pos++;
187
188		/*
189		 * This assumes radix_tree_gang_lookup always returns as
190		 * many pages as possible. If the radix-tree code changes,
191		 * so will this have to.
192		 */
193	} while (nr_pages == FREE_BATCH);
194}
195
196/*
197 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
198 */
199static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
200{
201	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
202	size_t copy;
203
204	copy = min_t(size_t, n, PAGE_SIZE - offset);
205	if (!brd_insert_page(brd, sector))
206		return -ENOSPC;
207	if (copy < n) {
208		sector += copy >> SECTOR_SHIFT;
209		if (!brd_insert_page(brd, sector))
210			return -ENOSPC;
211	}
212	return 0;
213}
214
215static void discard_from_brd(struct brd_device *brd,
216			sector_t sector, size_t n)
217{
218	while (n >= PAGE_SIZE) {
219		/*
220		 * Don't want to actually discard pages here because
221		 * re-allocating the pages can result in writeback
222		 * deadlocks under heavy load.
223		 */
224		if (0)
225			brd_free_page(brd, sector);
226		else
227			brd_zero_page(brd, sector);
228		sector += PAGE_SIZE >> SECTOR_SHIFT;
229		n -= PAGE_SIZE;
230	}
231}
232
233/*
234 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 */
236static void copy_to_brd(struct brd_device *brd, const void *src,
237			sector_t sector, size_t n)
238{
239	struct page *page;
240	void *dst;
241	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
242	size_t copy;
243
244	copy = min_t(size_t, n, PAGE_SIZE - offset);
245	page = brd_lookup_page(brd, sector);
246	BUG_ON(!page);
247
248	dst = kmap_atomic(page);
249	memcpy(dst + offset, src, copy);
250	kunmap_atomic(dst);
251
252	if (copy < n) {
253		src += copy;
254		sector += copy >> SECTOR_SHIFT;
255		copy = n - copy;
256		page = brd_lookup_page(brd, sector);
257		BUG_ON(!page);
258
259		dst = kmap_atomic(page);
260		memcpy(dst, src, copy);
261		kunmap_atomic(dst);
262	}
263}
264
265/*
266 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
267 */
268static void copy_from_brd(void *dst, struct brd_device *brd,
269			sector_t sector, size_t n)
270{
271	struct page *page;
272	void *src;
273	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
274	size_t copy;
275
276	copy = min_t(size_t, n, PAGE_SIZE - offset);
277	page = brd_lookup_page(brd, sector);
278	if (page) {
279		src = kmap_atomic(page);
280		memcpy(dst, src + offset, copy);
281		kunmap_atomic(src);
282	} else
283		memset(dst, 0, copy);
284
285	if (copy < n) {
286		dst += copy;
287		sector += copy >> SECTOR_SHIFT;
288		copy = n - copy;
289		page = brd_lookup_page(brd, sector);
290		if (page) {
291			src = kmap_atomic(page);
292			memcpy(dst, src, copy);
293			kunmap_atomic(src);
294		} else
295			memset(dst, 0, copy);
296	}
297}
298
299/*
300 * Process a single bvec of a bio.
301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page,
303			unsigned int len, unsigned int off, int rw,
304			sector_t sector)
305{
306	void *mem;
307	int err = 0;
308
309	if (rw != READ) {
310		err = copy_to_brd_setup(brd, sector, len);
311		if (err)
312			goto out;
313	}
314
315	mem = kmap_atomic(page);
316	if (rw == READ) {
317		copy_from_brd(mem + off, brd, sector, len);
318		flush_dcache_page(page);
319	} else {
320		flush_dcache_page(page);
321		copy_to_brd(brd, mem + off, sector, len);
322	}
323	kunmap_atomic(mem);
324
325out:
326	return err;
327}
328
329static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
330{
331	struct block_device *bdev = bio->bi_bdev;
332	struct brd_device *brd = bdev->bd_disk->private_data;
333	int rw;
334	struct bio_vec bvec;
335	sector_t sector;
336	struct bvec_iter iter;
 
337
338	sector = bio->bi_iter.bi_sector;
339	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
340		goto io_error;
 
341
342	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
343		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
344		    bio->bi_iter.bi_size & ~PAGE_MASK)
345			goto io_error;
346		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
347		goto out;
348	}
349
350	rw = bio_rw(bio);
351	if (rw == READA)
352		rw = READ;
353
354	bio_for_each_segment(bvec, bio, iter) {
355		unsigned int len = bvec.bv_len;
356		int err;
357
358		err = brd_do_bvec(brd, bvec.bv_page, len,
359					bvec.bv_offset, rw, sector);
360		if (err)
361			goto io_error;
362		sector += len >> SECTOR_SHIFT;
363	}
364
365out:
366	bio_endio(bio);
367	return BLK_QC_T_NONE;
368io_error:
369	bio_io_error(bio);
370	return BLK_QC_T_NONE;
371}
372
373static int brd_rw_page(struct block_device *bdev, sector_t sector,
374		       struct page *page, int rw)
375{
376	struct brd_device *brd = bdev->bd_disk->private_data;
377	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
378	page_endio(page, rw & WRITE, err);
379	return err;
380}
381
382#ifdef CONFIG_BLK_DEV_RAM_DAX
383static long brd_direct_access(struct block_device *bdev, sector_t sector,
384			void __pmem **kaddr, pfn_t *pfn)
385{
386	struct brd_device *brd = bdev->bd_disk->private_data;
387	struct page *page;
388
389	if (!brd)
390		return -ENODEV;
 
 
 
 
391	page = brd_insert_page(brd, sector);
392	if (!page)
393		return -ENOSPC;
394	*kaddr = (void __pmem *)page_address(page);
395	*pfn = page_to_pfn_t(page);
396
397	return PAGE_SIZE;
398}
399#else
400#define brd_direct_access NULL
401#endif
402
403static int brd_ioctl(struct block_device *bdev, fmode_t mode,
404			unsigned int cmd, unsigned long arg)
405{
406	int error;
407	struct brd_device *brd = bdev->bd_disk->private_data;
408
409	if (cmd != BLKFLSBUF)
410		return -ENOTTY;
411
412	/*
413	 * ram device BLKFLSBUF has special semantics, we want to actually
414	 * release and destroy the ramdisk data.
415	 */
416	mutex_lock(&brd_mutex);
417	mutex_lock(&bdev->bd_mutex);
418	error = -EBUSY;
419	if (bdev->bd_openers <= 1) {
420		/*
421		 * Kill the cache first, so it isn't written back to the
422		 * device.
423		 *
424		 * Another thread might instantiate more buffercache here,
425		 * but there is not much we can do to close that race.
426		 */
427		kill_bdev(bdev);
 
428		brd_free_pages(brd);
429		error = 0;
430	}
431	mutex_unlock(&bdev->bd_mutex);
432	mutex_unlock(&brd_mutex);
433
434	return error;
435}
436
437static const struct block_device_operations brd_fops = {
438	.owner =		THIS_MODULE,
439	.rw_page =		brd_rw_page,
440	.ioctl =		brd_ioctl,
 
441	.direct_access =	brd_direct_access,
 
442};
443
444/*
445 * And now the modules code and kernel interface.
446 */
447static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
 
 
 
448module_param(rd_nr, int, S_IRUGO);
449MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
450
451int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
452module_param(rd_size, int, S_IRUGO);
453MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
454
455static int max_part = 1;
456module_param(max_part, int, S_IRUGO);
457MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
458
459MODULE_LICENSE("GPL");
460MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
461MODULE_ALIAS("rd");
462
463#ifndef MODULE
464/* Legacy boot options - nonmodular */
465static int __init ramdisk_size(char *str)
466{
467	rd_size = simple_strtol(str, NULL, 0);
468	return 1;
469}
470__setup("ramdisk_size=", ramdisk_size);
471#endif
472
473/*
474 * The device scheme is derived from loop.c. Keep them in synch where possible
475 * (should share code eventually).
476 */
477static LIST_HEAD(brd_devices);
478static DEFINE_MUTEX(brd_devices_mutex);
479
480static struct brd_device *brd_alloc(int i)
481{
482	struct brd_device *brd;
483	struct gendisk *disk;
484
485	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
486	if (!brd)
487		goto out;
488	brd->brd_number		= i;
489	spin_lock_init(&brd->brd_lock);
490	INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
491
492	brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
493	if (!brd->brd_queue)
494		goto out_free_dev;
495
496	blk_queue_make_request(brd->brd_queue, brd_make_request);
497	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
498	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
499
500	/* This is so fdisk will align partitions on 4k, because of
501	 * direct_access API needing 4k alignment, returning a PFN
502	 * (This is only a problem on very small devices <= 4M,
503	 *  otherwise fdisk will align on 1M. Regardless this call
504	 *  is harmless)
505	 */
506	blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
507
508	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
509	blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
510	brd->brd_queue->limits.discard_zeroes_data = 1;
511	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
512
513	disk = brd->brd_disk = alloc_disk(max_part);
514	if (!disk)
515		goto out_free_queue;
516	disk->major		= RAMDISK_MAJOR;
517	disk->first_minor	= i * max_part;
518	disk->fops		= &brd_fops;
519	disk->private_data	= brd;
520	disk->queue		= brd->brd_queue;
521	disk->flags		= GENHD_FL_EXT_DEVT;
522	sprintf(disk->disk_name, "ram%d", i);
523	set_capacity(disk, rd_size * 2);
524
525	return brd;
526
527out_free_queue:
528	blk_cleanup_queue(brd->brd_queue);
529out_free_dev:
530	kfree(brd);
531out:
532	return NULL;
533}
534
535static void brd_free(struct brd_device *brd)
536{
537	put_disk(brd->brd_disk);
538	blk_cleanup_queue(brd->brd_queue);
539	brd_free_pages(brd);
540	kfree(brd);
541}
542
543static struct brd_device *brd_init_one(int i, bool *new)
544{
545	struct brd_device *brd;
546
547	*new = false;
548	list_for_each_entry(brd, &brd_devices, brd_list) {
549		if (brd->brd_number == i)
550			goto out;
551	}
552
553	brd = brd_alloc(i);
554	if (brd) {
555		add_disk(brd->brd_disk);
556		list_add_tail(&brd->brd_list, &brd_devices);
557	}
558	*new = true;
559out:
560	return brd;
561}
562
563static void brd_del_one(struct brd_device *brd)
564{
565	list_del(&brd->brd_list);
566	del_gendisk(brd->brd_disk);
567	brd_free(brd);
568}
569
570static struct kobject *brd_probe(dev_t dev, int *part, void *data)
571{
572	struct brd_device *brd;
573	struct kobject *kobj;
574	bool new;
575
576	mutex_lock(&brd_devices_mutex);
577	brd = brd_init_one(MINOR(dev) / max_part, &new);
578	kobj = brd ? get_disk(brd->brd_disk) : NULL;
579	mutex_unlock(&brd_devices_mutex);
580
581	if (new)
582		*part = 0;
583
584	return kobj;
585}
586
587static int __init brd_init(void)
588{
 
 
589	struct brd_device *brd, *next;
590	int i;
591
592	/*
593	 * brd module now has a feature to instantiate underlying device
594	 * structure on-demand, provided that there is an access dev node.
 
 
 
595	 *
596	 * (1) if rd_nr is specified, create that many upfront. else
597	 *     it defaults to CONFIG_BLK_DEV_RAM_COUNT
598	 * (2) User can further extend brd devices by create dev node themselves
599	 *     and have kernel automatically instantiate actual device
600	 *     on-demand. Example:
601	 *		mknod /path/devnod_name b 1 X	# 1 is the rd major
602	 *		fdisk -l /path/devnod_name
603	 *	If (X / max_part) was not already created it will be created
604	 *	dynamically.
605	 */
606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
608		return -EIO;
609
610	if (unlikely(!max_part))
611		max_part = 1;
612
613	for (i = 0; i < rd_nr; i++) {
614		brd = brd_alloc(i);
615		if (!brd)
616			goto out_free;
617		list_add_tail(&brd->brd_list, &brd_devices);
618	}
619
620	/* point of no return */
621
622	list_for_each_entry(brd, &brd_devices, brd_list)
623		add_disk(brd->brd_disk);
624
625	blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
626				  THIS_MODULE, brd_probe, NULL, NULL);
627
628	pr_info("brd: module loaded\n");
629	return 0;
630
631out_free:
632	list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
633		list_del(&brd->brd_list);
634		brd_free(brd);
635	}
636	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
637
638	pr_info("brd: module NOT loaded !!!\n");
639	return -ENOMEM;
640}
641
642static void __exit brd_exit(void)
643{
 
644	struct brd_device *brd, *next;
645
 
 
646	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
647		brd_del_one(brd);
648
649	blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
650	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
651
652	pr_info("brd: module unloaded\n");
653}
654
655module_init(brd_init);
656module_exit(brd_exit);
657