Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/pagemap.h>
  6#include <linux/module.h>
  7#include <linux/mount.h>
  8#include <linux/pseudo_fs.h>
  9#include <linux/magic.h>
 10#include <linux/genhd.h>
 11#include <linux/pfn_t.h>
 12#include <linux/cdev.h>
 13#include <linux/hash.h>
 14#include <linux/slab.h>
 15#include <linux/uio.h>
 16#include <linux/dax.h>
 17#include <linux/fs.h>
 18#include "dax-private.h"
 19
 20static dev_t dax_devt;
 21DEFINE_STATIC_SRCU(dax_srcu);
 22static struct vfsmount *dax_mnt;
 23static DEFINE_IDA(dax_minor_ida);
 24static struct kmem_cache *dax_cache __read_mostly;
 25static struct super_block *dax_superblock __read_mostly;
 26
 27#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
 28static struct hlist_head dax_host_list[DAX_HASH_SIZE];
 29static DEFINE_SPINLOCK(dax_host_lock);
 30
 31int dax_read_lock(void)
 32{
 33	return srcu_read_lock(&dax_srcu);
 34}
 35EXPORT_SYMBOL_GPL(dax_read_lock);
 36
 37void dax_read_unlock(int id)
 38{
 39	srcu_read_unlock(&dax_srcu, id);
 40}
 41EXPORT_SYMBOL_GPL(dax_read_unlock);
 42
 43#ifdef CONFIG_BLOCK
 44#include <linux/blkdev.h>
 45
 46int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
 47		pgoff_t *pgoff)
 48{
 49	phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
 50
 51	if (pgoff)
 52		*pgoff = PHYS_PFN(phys_off);
 53	if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
 54		return -EINVAL;
 55	return 0;
 56}
 57EXPORT_SYMBOL(bdev_dax_pgoff);
 58
 59#if IS_ENABLED(CONFIG_FS_DAX)
 60struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
 61{
 62	if (!blk_queue_dax(bdev->bd_disk->queue))
 63		return NULL;
 64	return dax_get_by_host(bdev->bd_disk->disk_name);
 65}
 66EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
 67#endif
 68
 69bool __generic_fsdax_supported(struct dax_device *dax_dev,
 70		struct block_device *bdev, int blocksize, sector_t start,
 71		sector_t sectors)
 72{
 73	bool dax_enabled = false;
 74	pgoff_t pgoff, pgoff_end;
 75	char buf[BDEVNAME_SIZE];
 76	void *kaddr, *end_kaddr;
 77	pfn_t pfn, end_pfn;
 78	sector_t last_page;
 79	long len, len2;
 80	int err, id;
 81
 82	if (blocksize != PAGE_SIZE) {
 83		pr_info("%s: error: unsupported blocksize for dax\n",
 84				bdevname(bdev, buf));
 85		return false;
 86	}
 87
 88	if (!dax_dev) {
 89		pr_debug("%s: error: dax unsupported by block device\n",
 90				bdevname(bdev, buf));
 91		return false;
 92	}
 93
 94	err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
 95	if (err) {
 96		pr_info("%s: error: unaligned partition for dax\n",
 97				bdevname(bdev, buf));
 98		return false;
 99	}
100
101	last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
102	err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
103	if (err) {
104		pr_info("%s: error: unaligned partition for dax\n",
105				bdevname(bdev, buf));
106		return false;
107	}
108
109	id = dax_read_lock();
110	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
111	len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
 
112
113	if (len < 1 || len2 < 1) {
114		pr_info("%s: error: dax access failed (%ld)\n",
115				bdevname(bdev, buf), len < 1 ? len : len2);
116		dax_read_unlock(id);
117		return false;
118	}
119
120	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
121		/*
122		 * An arch that has enabled the pmem api should also
123		 * have its drivers support pfn_t_devmap()
124		 *
125		 * This is a developer warning and should not trigger in
126		 * production. dax_flush() will crash since it depends
127		 * on being able to do (page_address(pfn_to_page())).
128		 */
129		WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
130		dax_enabled = true;
131	} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
132		struct dev_pagemap *pgmap, *end_pgmap;
133
134		pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
135		end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
136		if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
137				&& pfn_t_to_page(pfn)->pgmap == pgmap
138				&& pfn_t_to_page(end_pfn)->pgmap == pgmap
139				&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
140				&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
141			dax_enabled = true;
142		put_dev_pagemap(pgmap);
143		put_dev_pagemap(end_pgmap);
144
145	}
146	dax_read_unlock(id);
147
148	if (!dax_enabled) {
149		pr_info("%s: error: dax support not enabled\n",
150				bdevname(bdev, buf));
151		return false;
152	}
153	return true;
154}
155EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
156
157/**
158 * __bdev_dax_supported() - Check if the device supports dax for filesystem
159 * @bdev: block device to check
160 * @blocksize: The block size of the device
161 *
162 * This is a library function for filesystems to check if the block device
163 * can be mounted with dax option.
164 *
165 * Return: true if supported, false if unsupported
166 */
167bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
168{
169	struct dax_device *dax_dev;
170	struct request_queue *q;
171	char buf[BDEVNAME_SIZE];
172	bool ret;
173	int id;
174
175	q = bdev_get_queue(bdev);
176	if (!q || !blk_queue_dax(q)) {
177		pr_debug("%s: error: request queue doesn't support dax\n",
178				bdevname(bdev, buf));
179		return false;
180	}
181
182	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
183	if (!dax_dev) {
184		pr_debug("%s: error: device does not support dax\n",
185				bdevname(bdev, buf));
186		return false;
187	}
188
189	id = dax_read_lock();
190	ret = dax_supported(dax_dev, bdev, blocksize, 0,
191			i_size_read(bdev->bd_inode) / 512);
192	dax_read_unlock(id);
193
194	put_dax(dax_dev);
195
196	return ret;
197}
198EXPORT_SYMBOL_GPL(__bdev_dax_supported);
199#endif
200
201enum dax_device_flags {
202	/* !alive + rcu grace period == no new operations / mappings */
203	DAXDEV_ALIVE,
204	/* gate whether dax_flush() calls the low level flush routine */
205	DAXDEV_WRITE_CACHE,
206	/* flag to check if device supports synchronous flush */
207	DAXDEV_SYNC,
208};
209
210/**
211 * struct dax_device - anchor object for dax services
212 * @inode: core vfs
213 * @cdev: optional character interface for "device dax"
214 * @host: optional name for lookups where the device path is not available
215 * @private: dax driver private data
216 * @flags: state and boolean properties
217 */
218struct dax_device {
219	struct hlist_node list;
220	struct inode inode;
221	struct cdev cdev;
222	const char *host;
223	void *private;
224	unsigned long flags;
225	const struct dax_operations *ops;
226};
227
228static ssize_t write_cache_show(struct device *dev,
229		struct device_attribute *attr, char *buf)
230{
231	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
232	ssize_t rc;
233
234	WARN_ON_ONCE(!dax_dev);
235	if (!dax_dev)
236		return -ENXIO;
237
238	rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
239	put_dax(dax_dev);
240	return rc;
241}
242
243static ssize_t write_cache_store(struct device *dev,
244		struct device_attribute *attr, const char *buf, size_t len)
245{
246	bool write_cache;
247	int rc = strtobool(buf, &write_cache);
248	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
249
250	WARN_ON_ONCE(!dax_dev);
251	if (!dax_dev)
252		return -ENXIO;
253
254	if (rc)
255		len = rc;
256	else
257		dax_write_cache(dax_dev, write_cache);
258
259	put_dax(dax_dev);
260	return len;
261}
262static DEVICE_ATTR_RW(write_cache);
263
264static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
265{
266	struct device *dev = container_of(kobj, typeof(*dev), kobj);
267	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
268
269	WARN_ON_ONCE(!dax_dev);
270	if (!dax_dev)
271		return 0;
272
273#ifndef CONFIG_ARCH_HAS_PMEM_API
274	if (a == &dev_attr_write_cache.attr)
275		return 0;
276#endif
277	return a->mode;
278}
279
280static struct attribute *dax_attributes[] = {
281	&dev_attr_write_cache.attr,
282	NULL,
283};
284
285struct attribute_group dax_attribute_group = {
286	.name = "dax",
287	.attrs = dax_attributes,
288	.is_visible = dax_visible,
289};
290EXPORT_SYMBOL_GPL(dax_attribute_group);
291
292/**
293 * dax_direct_access() - translate a device pgoff to an absolute pfn
294 * @dax_dev: a dax_device instance representing the logical memory range
295 * @pgoff: offset in pages from the start of the device to translate
296 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
297 * @kaddr: output parameter that returns a virtual address mapping of pfn
298 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
299 *
300 * Return: negative errno if an error occurs, otherwise the number of
301 * pages accessible at the device relative @pgoff.
302 */
303long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
304		void **kaddr, pfn_t *pfn)
305{
306	long avail;
307
308	if (!dax_dev)
309		return -EOPNOTSUPP;
310
311	if (!dax_alive(dax_dev))
312		return -ENXIO;
313
314	if (nr_pages < 0)
315		return nr_pages;
316
317	avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
318			kaddr, pfn);
319	if (!avail)
320		return -ERANGE;
321	return min(avail, nr_pages);
322}
323EXPORT_SYMBOL_GPL(dax_direct_access);
324
325bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
326		int blocksize, sector_t start, sector_t len)
327{
328	if (!dax_dev)
329		return false;
330
331	if (!dax_alive(dax_dev))
332		return false;
333
334	return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
335}
336EXPORT_SYMBOL_GPL(dax_supported);
337
338size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
339		size_t bytes, struct iov_iter *i)
340{
341	if (!dax_alive(dax_dev))
342		return 0;
343
344	return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
345}
346EXPORT_SYMBOL_GPL(dax_copy_from_iter);
347
348size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
349		size_t bytes, struct iov_iter *i)
350{
351	if (!dax_alive(dax_dev))
352		return 0;
353
354	return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
355}
356EXPORT_SYMBOL_GPL(dax_copy_to_iter);
357
358int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
359			size_t nr_pages)
360{
361	if (!dax_alive(dax_dev))
362		return -ENXIO;
363	/*
364	 * There are no callers that want to zero more than one page as of now.
365	 * Once users are there, this check can be removed after the
366	 * device mapper code has been updated to split ranges across targets.
367	 */
368	if (nr_pages != 1)
369		return -EIO;
370
371	return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
372}
373EXPORT_SYMBOL_GPL(dax_zero_page_range);
374
375#ifdef CONFIG_ARCH_HAS_PMEM_API
376void arch_wb_cache_pmem(void *addr, size_t size);
377void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
378{
379	if (unlikely(!dax_write_cache_enabled(dax_dev)))
380		return;
381
382	arch_wb_cache_pmem(addr, size);
383}
384#else
385void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
386{
387}
388#endif
389EXPORT_SYMBOL_GPL(dax_flush);
390
391void dax_write_cache(struct dax_device *dax_dev, bool wc)
392{
393	if (wc)
394		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
395	else
396		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
397}
398EXPORT_SYMBOL_GPL(dax_write_cache);
399
400bool dax_write_cache_enabled(struct dax_device *dax_dev)
401{
402	return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
403}
404EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
405
406bool __dax_synchronous(struct dax_device *dax_dev)
407{
408	return test_bit(DAXDEV_SYNC, &dax_dev->flags);
409}
410EXPORT_SYMBOL_GPL(__dax_synchronous);
411
412void __set_dax_synchronous(struct dax_device *dax_dev)
413{
414	set_bit(DAXDEV_SYNC, &dax_dev->flags);
415}
416EXPORT_SYMBOL_GPL(__set_dax_synchronous);
417
418bool dax_alive(struct dax_device *dax_dev)
419{
420	lockdep_assert_held(&dax_srcu);
421	return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
422}
423EXPORT_SYMBOL_GPL(dax_alive);
424
425static int dax_host_hash(const char *host)
426{
427	return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
428}
429
430/*
431 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
432 * that any fault handlers or operations that might have seen
433 * dax_alive(), have completed.  Any operations that start after
434 * synchronize_srcu() has run will abort upon seeing !dax_alive().
435 */
436void kill_dax(struct dax_device *dax_dev)
437{
438	if (!dax_dev)
439		return;
440
441	clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
442
443	synchronize_srcu(&dax_srcu);
444
445	spin_lock(&dax_host_lock);
446	hlist_del_init(&dax_dev->list);
447	spin_unlock(&dax_host_lock);
448}
449EXPORT_SYMBOL_GPL(kill_dax);
450
451void run_dax(struct dax_device *dax_dev)
452{
453	set_bit(DAXDEV_ALIVE, &dax_dev->flags);
454}
455EXPORT_SYMBOL_GPL(run_dax);
456
457static struct inode *dax_alloc_inode(struct super_block *sb)
458{
459	struct dax_device *dax_dev;
460	struct inode *inode;
461
462	dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
463	if (!dax_dev)
464		return NULL;
465
466	inode = &dax_dev->inode;
467	inode->i_rdev = 0;
468	return inode;
469}
470
471static struct dax_device *to_dax_dev(struct inode *inode)
472{
473	return container_of(inode, struct dax_device, inode);
474}
475
476static void dax_free_inode(struct inode *inode)
477{
478	struct dax_device *dax_dev = to_dax_dev(inode);
479	kfree(dax_dev->host);
480	dax_dev->host = NULL;
481	if (inode->i_rdev)
482		ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
483	kmem_cache_free(dax_cache, dax_dev);
484}
485
486static void dax_destroy_inode(struct inode *inode)
487{
488	struct dax_device *dax_dev = to_dax_dev(inode);
489	WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
490			"kill_dax() must be called before final iput()\n");
491}
492
493static const struct super_operations dax_sops = {
494	.statfs = simple_statfs,
495	.alloc_inode = dax_alloc_inode,
496	.destroy_inode = dax_destroy_inode,
497	.free_inode = dax_free_inode,
498	.drop_inode = generic_delete_inode,
499};
500
501static int dax_init_fs_context(struct fs_context *fc)
502{
503	struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
504	if (!ctx)
505		return -ENOMEM;
506	ctx->ops = &dax_sops;
507	return 0;
508}
509
510static struct file_system_type dax_fs_type = {
511	.name		= "dax",
512	.init_fs_context = dax_init_fs_context,
513	.kill_sb	= kill_anon_super,
514};
515
516static int dax_test(struct inode *inode, void *data)
517{
518	dev_t devt = *(dev_t *) data;
519
520	return inode->i_rdev == devt;
521}
522
523static int dax_set(struct inode *inode, void *data)
524{
525	dev_t devt = *(dev_t *) data;
526
527	inode->i_rdev = devt;
528	return 0;
529}
530
531static struct dax_device *dax_dev_get(dev_t devt)
532{
533	struct dax_device *dax_dev;
534	struct inode *inode;
535
536	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
537			dax_test, dax_set, &devt);
538
539	if (!inode)
540		return NULL;
541
542	dax_dev = to_dax_dev(inode);
543	if (inode->i_state & I_NEW) {
544		set_bit(DAXDEV_ALIVE, &dax_dev->flags);
545		inode->i_cdev = &dax_dev->cdev;
546		inode->i_mode = S_IFCHR;
547		inode->i_flags = S_DAX;
548		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
549		unlock_new_inode(inode);
550	}
551
552	return dax_dev;
553}
554
555static void dax_add_host(struct dax_device *dax_dev, const char *host)
556{
557	int hash;
558
559	/*
560	 * Unconditionally init dax_dev since it's coming from a
561	 * non-zeroed slab cache
562	 */
563	INIT_HLIST_NODE(&dax_dev->list);
564	dax_dev->host = host;
565	if (!host)
566		return;
567
568	hash = dax_host_hash(host);
569	spin_lock(&dax_host_lock);
570	hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
571	spin_unlock(&dax_host_lock);
572}
573
574struct dax_device *alloc_dax(void *private, const char *__host,
575		const struct dax_operations *ops, unsigned long flags)
576{
577	struct dax_device *dax_dev;
578	const char *host;
579	dev_t devt;
580	int minor;
581
582	if (ops && !ops->zero_page_range) {
583		pr_debug("%s: error: device does not provide dax"
584			 " operation zero_page_range()\n",
585			 __host ? __host : "Unknown");
586		return ERR_PTR(-EINVAL);
587	}
588
589	host = kstrdup(__host, GFP_KERNEL);
590	if (__host && !host)
591		return ERR_PTR(-ENOMEM);
592
593	minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
594	if (minor < 0)
595		goto err_minor;
596
597	devt = MKDEV(MAJOR(dax_devt), minor);
598	dax_dev = dax_dev_get(devt);
599	if (!dax_dev)
600		goto err_dev;
601
602	dax_add_host(dax_dev, host);
603	dax_dev->ops = ops;
604	dax_dev->private = private;
605	if (flags & DAXDEV_F_SYNC)
606		set_dax_synchronous(dax_dev);
607
608	return dax_dev;
609
610 err_dev:
611	ida_simple_remove(&dax_minor_ida, minor);
612 err_minor:
613	kfree(host);
614	return ERR_PTR(-ENOMEM);
615}
616EXPORT_SYMBOL_GPL(alloc_dax);
617
618void put_dax(struct dax_device *dax_dev)
619{
620	if (!dax_dev)
621		return;
622	iput(&dax_dev->inode);
623}
624EXPORT_SYMBOL_GPL(put_dax);
625
626/**
627 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
628 * @host: alternate name for the device registered by a dax driver
629 */
630struct dax_device *dax_get_by_host(const char *host)
631{
632	struct dax_device *dax_dev, *found = NULL;
633	int hash, id;
634
635	if (!host)
636		return NULL;
637
638	hash = dax_host_hash(host);
639
640	id = dax_read_lock();
641	spin_lock(&dax_host_lock);
642	hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
643		if (!dax_alive(dax_dev)
644				|| strcmp(host, dax_dev->host) != 0)
645			continue;
646
647		if (igrab(&dax_dev->inode))
648			found = dax_dev;
649		break;
650	}
651	spin_unlock(&dax_host_lock);
652	dax_read_unlock(id);
653
654	return found;
655}
656EXPORT_SYMBOL_GPL(dax_get_by_host);
657
658/**
659 * inode_dax: convert a public inode into its dax_dev
660 * @inode: An inode with i_cdev pointing to a dax_dev
661 *
662 * Note this is not equivalent to to_dax_dev() which is for private
663 * internal use where we know the inode filesystem type == dax_fs_type.
664 */
665struct dax_device *inode_dax(struct inode *inode)
666{
667	struct cdev *cdev = inode->i_cdev;
668
669	return container_of(cdev, struct dax_device, cdev);
670}
671EXPORT_SYMBOL_GPL(inode_dax);
672
673struct inode *dax_inode(struct dax_device *dax_dev)
674{
675	return &dax_dev->inode;
676}
677EXPORT_SYMBOL_GPL(dax_inode);
678
679void *dax_get_private(struct dax_device *dax_dev)
680{
681	if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
682		return NULL;
683	return dax_dev->private;
684}
685EXPORT_SYMBOL_GPL(dax_get_private);
686
687static void init_once(void *_dax_dev)
688{
689	struct dax_device *dax_dev = _dax_dev;
690	struct inode *inode = &dax_dev->inode;
691
692	memset(dax_dev, 0, sizeof(*dax_dev));
693	inode_init_once(inode);
694}
695
696static int dax_fs_init(void)
697{
698	int rc;
699
700	dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
701			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
702			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
703			init_once);
704	if (!dax_cache)
705		return -ENOMEM;
706
707	dax_mnt = kern_mount(&dax_fs_type);
708	if (IS_ERR(dax_mnt)) {
709		rc = PTR_ERR(dax_mnt);
710		goto err_mount;
711	}
712	dax_superblock = dax_mnt->mnt_sb;
713
714	return 0;
715
716 err_mount:
717	kmem_cache_destroy(dax_cache);
718
719	return rc;
720}
721
722static void dax_fs_exit(void)
723{
724	kern_unmount(dax_mnt);
725	kmem_cache_destroy(dax_cache);
726}
727
728static int __init dax_core_init(void)
729{
730	int rc;
731
732	rc = dax_fs_init();
733	if (rc)
734		return rc;
735
736	rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
737	if (rc)
738		goto err_chrdev;
739
740	rc = dax_bus_init();
741	if (rc)
742		goto err_bus;
743	return 0;
744
745err_bus:
746	unregister_chrdev_region(dax_devt, MINORMASK+1);
747err_chrdev:
748	dax_fs_exit();
749	return 0;
750}
751
752static void __exit dax_core_exit(void)
753{
754	unregister_chrdev_region(dax_devt, MINORMASK+1);
755	ida_destroy(&dax_minor_ida);
756	dax_fs_exit();
757}
758
759MODULE_AUTHOR("Intel Corporation");
760MODULE_LICENSE("GPL v2");
761subsys_initcall(dax_core_init);
762module_exit(dax_core_exit);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/pagemap.h>
  6#include <linux/module.h>
  7#include <linux/mount.h>
  8#include <linux/pseudo_fs.h>
  9#include <linux/magic.h>
 10#include <linux/genhd.h>
 11#include <linux/pfn_t.h>
 12#include <linux/cdev.h>
 13#include <linux/hash.h>
 14#include <linux/slab.h>
 15#include <linux/uio.h>
 16#include <linux/dax.h>
 17#include <linux/fs.h>
 18#include "dax-private.h"
 19
 20static dev_t dax_devt;
 21DEFINE_STATIC_SRCU(dax_srcu);
 22static struct vfsmount *dax_mnt;
 23static DEFINE_IDA(dax_minor_ida);
 24static struct kmem_cache *dax_cache __read_mostly;
 25static struct super_block *dax_superblock __read_mostly;
 26
 27#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
 28static struct hlist_head dax_host_list[DAX_HASH_SIZE];
 29static DEFINE_SPINLOCK(dax_host_lock);
 30
 31int dax_read_lock(void)
 32{
 33	return srcu_read_lock(&dax_srcu);
 34}
 35EXPORT_SYMBOL_GPL(dax_read_lock);
 36
 37void dax_read_unlock(int id)
 38{
 39	srcu_read_unlock(&dax_srcu, id);
 40}
 41EXPORT_SYMBOL_GPL(dax_read_unlock);
 42
 43#ifdef CONFIG_BLOCK
 44#include <linux/blkdev.h>
 45
 46int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
 47		pgoff_t *pgoff)
 48{
 49	phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
 50
 51	if (pgoff)
 52		*pgoff = PHYS_PFN(phys_off);
 53	if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
 54		return -EINVAL;
 55	return 0;
 56}
 57EXPORT_SYMBOL(bdev_dax_pgoff);
 58
 59#if IS_ENABLED(CONFIG_FS_DAX)
 60struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
 61{
 62	if (!blk_queue_dax(bdev->bd_queue))
 63		return NULL;
 64	return fs_dax_get_by_host(bdev->bd_disk->disk_name);
 65}
 66EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
 67#endif
 68
 69bool __generic_fsdax_supported(struct dax_device *dax_dev,
 70		struct block_device *bdev, int blocksize, sector_t start,
 71		sector_t sectors)
 72{
 73	bool dax_enabled = false;
 74	pgoff_t pgoff, pgoff_end;
 75	char buf[BDEVNAME_SIZE];
 76	void *kaddr, *end_kaddr;
 77	pfn_t pfn, end_pfn;
 78	sector_t last_page;
 79	long len, len2;
 80	int err, id;
 81
 82	if (blocksize != PAGE_SIZE) {
 83		pr_debug("%s: error: unsupported blocksize for dax\n",
 
 
 
 
 
 
 84				bdevname(bdev, buf));
 85		return false;
 86	}
 87
 88	err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
 89	if (err) {
 90		pr_debug("%s: error: unaligned partition for dax\n",
 91				bdevname(bdev, buf));
 92		return false;
 93	}
 94
 95	last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
 96	err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
 97	if (err) {
 98		pr_debug("%s: error: unaligned partition for dax\n",
 99				bdevname(bdev, buf));
100		return false;
101	}
102
103	id = dax_read_lock();
104	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
105	len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
106	dax_read_unlock(id);
107
108	if (len < 1 || len2 < 1) {
109		pr_debug("%s: error: dax access failed (%ld)\n",
110				bdevname(bdev, buf), len < 1 ? len : len2);
 
111		return false;
112	}
113
114	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
115		/*
116		 * An arch that has enabled the pmem api should also
117		 * have its drivers support pfn_t_devmap()
118		 *
119		 * This is a developer warning and should not trigger in
120		 * production. dax_flush() will crash since it depends
121		 * on being able to do (page_address(pfn_to_page())).
122		 */
123		WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
124		dax_enabled = true;
125	} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
126		struct dev_pagemap *pgmap, *end_pgmap;
127
128		pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
129		end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
130		if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
131				&& pfn_t_to_page(pfn)->pgmap == pgmap
132				&& pfn_t_to_page(end_pfn)->pgmap == pgmap
133				&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
134				&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
135			dax_enabled = true;
136		put_dev_pagemap(pgmap);
137		put_dev_pagemap(end_pgmap);
138
139	}
 
140
141	if (!dax_enabled) {
142		pr_debug("%s: error: dax support not enabled\n",
143				bdevname(bdev, buf));
144		return false;
145	}
146	return true;
147}
148EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
149
150/**
151 * __bdev_dax_supported() - Check if the device supports dax for filesystem
152 * @bdev: block device to check
153 * @blocksize: The block size of the device
154 *
155 * This is a library function for filesystems to check if the block device
156 * can be mounted with dax option.
157 *
158 * Return: true if supported, false if unsupported
159 */
160bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
161{
162	struct dax_device *dax_dev;
163	struct request_queue *q;
164	char buf[BDEVNAME_SIZE];
165	bool ret;
166	int id;
167
168	q = bdev_get_queue(bdev);
169	if (!q || !blk_queue_dax(q)) {
170		pr_debug("%s: error: request queue doesn't support dax\n",
171				bdevname(bdev, buf));
172		return false;
173	}
174
175	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
176	if (!dax_dev) {
177		pr_debug("%s: error: device does not support dax\n",
178				bdevname(bdev, buf));
179		return false;
180	}
181
182	id = dax_read_lock();
183	ret = dax_supported(dax_dev, bdev, blocksize, 0,
184			i_size_read(bdev->bd_inode) / 512);
185	dax_read_unlock(id);
186
187	put_dax(dax_dev);
188
189	return ret;
190}
191EXPORT_SYMBOL_GPL(__bdev_dax_supported);
192#endif
193
194enum dax_device_flags {
195	/* !alive + rcu grace period == no new operations / mappings */
196	DAXDEV_ALIVE,
197	/* gate whether dax_flush() calls the low level flush routine */
198	DAXDEV_WRITE_CACHE,
199	/* flag to check if device supports synchronous flush */
200	DAXDEV_SYNC,
201};
202
203/**
204 * struct dax_device - anchor object for dax services
205 * @inode: core vfs
206 * @cdev: optional character interface for "device dax"
207 * @host: optional name for lookups where the device path is not available
208 * @private: dax driver private data
209 * @flags: state and boolean properties
210 */
211struct dax_device {
212	struct hlist_node list;
213	struct inode inode;
214	struct cdev cdev;
215	const char *host;
216	void *private;
217	unsigned long flags;
218	const struct dax_operations *ops;
219};
220
221static ssize_t write_cache_show(struct device *dev,
222		struct device_attribute *attr, char *buf)
223{
224	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
225	ssize_t rc;
226
227	WARN_ON_ONCE(!dax_dev);
228	if (!dax_dev)
229		return -ENXIO;
230
231	rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
232	put_dax(dax_dev);
233	return rc;
234}
235
236static ssize_t write_cache_store(struct device *dev,
237		struct device_attribute *attr, const char *buf, size_t len)
238{
239	bool write_cache;
240	int rc = strtobool(buf, &write_cache);
241	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
242
243	WARN_ON_ONCE(!dax_dev);
244	if (!dax_dev)
245		return -ENXIO;
246
247	if (rc)
248		len = rc;
249	else
250		dax_write_cache(dax_dev, write_cache);
251
252	put_dax(dax_dev);
253	return len;
254}
255static DEVICE_ATTR_RW(write_cache);
256
257static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
258{
259	struct device *dev = container_of(kobj, typeof(*dev), kobj);
260	struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
261
262	WARN_ON_ONCE(!dax_dev);
263	if (!dax_dev)
264		return 0;
265
266#ifndef CONFIG_ARCH_HAS_PMEM_API
267	if (a == &dev_attr_write_cache.attr)
268		return 0;
269#endif
270	return a->mode;
271}
272
273static struct attribute *dax_attributes[] = {
274	&dev_attr_write_cache.attr,
275	NULL,
276};
277
278struct attribute_group dax_attribute_group = {
279	.name = "dax",
280	.attrs = dax_attributes,
281	.is_visible = dax_visible,
282};
283EXPORT_SYMBOL_GPL(dax_attribute_group);
284
285/**
286 * dax_direct_access() - translate a device pgoff to an absolute pfn
287 * @dax_dev: a dax_device instance representing the logical memory range
288 * @pgoff: offset in pages from the start of the device to translate
289 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
290 * @kaddr: output parameter that returns a virtual address mapping of pfn
291 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
292 *
293 * Return: negative errno if an error occurs, otherwise the number of
294 * pages accessible at the device relative @pgoff.
295 */
296long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
297		void **kaddr, pfn_t *pfn)
298{
299	long avail;
300
301	if (!dax_dev)
302		return -EOPNOTSUPP;
303
304	if (!dax_alive(dax_dev))
305		return -ENXIO;
306
307	if (nr_pages < 0)
308		return nr_pages;
309
310	avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
311			kaddr, pfn);
312	if (!avail)
313		return -ERANGE;
314	return min(avail, nr_pages);
315}
316EXPORT_SYMBOL_GPL(dax_direct_access);
317
318bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
319		int blocksize, sector_t start, sector_t len)
320{
 
 
 
321	if (!dax_alive(dax_dev))
322		return false;
323
324	return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
325}
 
326
327size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
328		size_t bytes, struct iov_iter *i)
329{
330	if (!dax_alive(dax_dev))
331		return 0;
332
333	return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
334}
335EXPORT_SYMBOL_GPL(dax_copy_from_iter);
336
337size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
338		size_t bytes, struct iov_iter *i)
339{
340	if (!dax_alive(dax_dev))
341		return 0;
342
343	return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
344}
345EXPORT_SYMBOL_GPL(dax_copy_to_iter);
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347#ifdef CONFIG_ARCH_HAS_PMEM_API
348void arch_wb_cache_pmem(void *addr, size_t size);
349void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
350{
351	if (unlikely(!dax_write_cache_enabled(dax_dev)))
352		return;
353
354	arch_wb_cache_pmem(addr, size);
355}
356#else
357void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
358{
359}
360#endif
361EXPORT_SYMBOL_GPL(dax_flush);
362
363void dax_write_cache(struct dax_device *dax_dev, bool wc)
364{
365	if (wc)
366		set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
367	else
368		clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
369}
370EXPORT_SYMBOL_GPL(dax_write_cache);
371
372bool dax_write_cache_enabled(struct dax_device *dax_dev)
373{
374	return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
375}
376EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
377
378bool __dax_synchronous(struct dax_device *dax_dev)
379{
380	return test_bit(DAXDEV_SYNC, &dax_dev->flags);
381}
382EXPORT_SYMBOL_GPL(__dax_synchronous);
383
384void __set_dax_synchronous(struct dax_device *dax_dev)
385{
386	set_bit(DAXDEV_SYNC, &dax_dev->flags);
387}
388EXPORT_SYMBOL_GPL(__set_dax_synchronous);
389
390bool dax_alive(struct dax_device *dax_dev)
391{
392	lockdep_assert_held(&dax_srcu);
393	return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
394}
395EXPORT_SYMBOL_GPL(dax_alive);
396
397static int dax_host_hash(const char *host)
398{
399	return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
400}
401
402/*
403 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
404 * that any fault handlers or operations that might have seen
405 * dax_alive(), have completed.  Any operations that start after
406 * synchronize_srcu() has run will abort upon seeing !dax_alive().
407 */
408void kill_dax(struct dax_device *dax_dev)
409{
410	if (!dax_dev)
411		return;
412
413	clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
414
415	synchronize_srcu(&dax_srcu);
416
417	spin_lock(&dax_host_lock);
418	hlist_del_init(&dax_dev->list);
419	spin_unlock(&dax_host_lock);
420}
421EXPORT_SYMBOL_GPL(kill_dax);
422
423void run_dax(struct dax_device *dax_dev)
424{
425	set_bit(DAXDEV_ALIVE, &dax_dev->flags);
426}
427EXPORT_SYMBOL_GPL(run_dax);
428
429static struct inode *dax_alloc_inode(struct super_block *sb)
430{
431	struct dax_device *dax_dev;
432	struct inode *inode;
433
434	dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
435	if (!dax_dev)
436		return NULL;
437
438	inode = &dax_dev->inode;
439	inode->i_rdev = 0;
440	return inode;
441}
442
443static struct dax_device *to_dax_dev(struct inode *inode)
444{
445	return container_of(inode, struct dax_device, inode);
446}
447
448static void dax_free_inode(struct inode *inode)
449{
450	struct dax_device *dax_dev = to_dax_dev(inode);
451	kfree(dax_dev->host);
452	dax_dev->host = NULL;
453	if (inode->i_rdev)
454		ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
455	kmem_cache_free(dax_cache, dax_dev);
456}
457
458static void dax_destroy_inode(struct inode *inode)
459{
460	struct dax_device *dax_dev = to_dax_dev(inode);
461	WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
462			"kill_dax() must be called before final iput()\n");
463}
464
465static const struct super_operations dax_sops = {
466	.statfs = simple_statfs,
467	.alloc_inode = dax_alloc_inode,
468	.destroy_inode = dax_destroy_inode,
469	.free_inode = dax_free_inode,
470	.drop_inode = generic_delete_inode,
471};
472
473static int dax_init_fs_context(struct fs_context *fc)
474{
475	struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
476	if (!ctx)
477		return -ENOMEM;
478	ctx->ops = &dax_sops;
479	return 0;
480}
481
482static struct file_system_type dax_fs_type = {
483	.name		= "dax",
484	.init_fs_context = dax_init_fs_context,
485	.kill_sb	= kill_anon_super,
486};
487
488static int dax_test(struct inode *inode, void *data)
489{
490	dev_t devt = *(dev_t *) data;
491
492	return inode->i_rdev == devt;
493}
494
495static int dax_set(struct inode *inode, void *data)
496{
497	dev_t devt = *(dev_t *) data;
498
499	inode->i_rdev = devt;
500	return 0;
501}
502
503static struct dax_device *dax_dev_get(dev_t devt)
504{
505	struct dax_device *dax_dev;
506	struct inode *inode;
507
508	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
509			dax_test, dax_set, &devt);
510
511	if (!inode)
512		return NULL;
513
514	dax_dev = to_dax_dev(inode);
515	if (inode->i_state & I_NEW) {
516		set_bit(DAXDEV_ALIVE, &dax_dev->flags);
517		inode->i_cdev = &dax_dev->cdev;
518		inode->i_mode = S_IFCHR;
519		inode->i_flags = S_DAX;
520		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
521		unlock_new_inode(inode);
522	}
523
524	return dax_dev;
525}
526
527static void dax_add_host(struct dax_device *dax_dev, const char *host)
528{
529	int hash;
530
531	/*
532	 * Unconditionally init dax_dev since it's coming from a
533	 * non-zeroed slab cache
534	 */
535	INIT_HLIST_NODE(&dax_dev->list);
536	dax_dev->host = host;
537	if (!host)
538		return;
539
540	hash = dax_host_hash(host);
541	spin_lock(&dax_host_lock);
542	hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
543	spin_unlock(&dax_host_lock);
544}
545
546struct dax_device *alloc_dax(void *private, const char *__host,
547		const struct dax_operations *ops, unsigned long flags)
548{
549	struct dax_device *dax_dev;
550	const char *host;
551	dev_t devt;
552	int minor;
553
 
 
 
 
 
 
 
554	host = kstrdup(__host, GFP_KERNEL);
555	if (__host && !host)
556		return NULL;
557
558	minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
559	if (minor < 0)
560		goto err_minor;
561
562	devt = MKDEV(MAJOR(dax_devt), minor);
563	dax_dev = dax_dev_get(devt);
564	if (!dax_dev)
565		goto err_dev;
566
567	dax_add_host(dax_dev, host);
568	dax_dev->ops = ops;
569	dax_dev->private = private;
570	if (flags & DAXDEV_F_SYNC)
571		set_dax_synchronous(dax_dev);
572
573	return dax_dev;
574
575 err_dev:
576	ida_simple_remove(&dax_minor_ida, minor);
577 err_minor:
578	kfree(host);
579	return NULL;
580}
581EXPORT_SYMBOL_GPL(alloc_dax);
582
583void put_dax(struct dax_device *dax_dev)
584{
585	if (!dax_dev)
586		return;
587	iput(&dax_dev->inode);
588}
589EXPORT_SYMBOL_GPL(put_dax);
590
591/**
592 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
593 * @host: alternate name for the device registered by a dax driver
594 */
595struct dax_device *dax_get_by_host(const char *host)
596{
597	struct dax_device *dax_dev, *found = NULL;
598	int hash, id;
599
600	if (!host)
601		return NULL;
602
603	hash = dax_host_hash(host);
604
605	id = dax_read_lock();
606	spin_lock(&dax_host_lock);
607	hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
608		if (!dax_alive(dax_dev)
609				|| strcmp(host, dax_dev->host) != 0)
610			continue;
611
612		if (igrab(&dax_dev->inode))
613			found = dax_dev;
614		break;
615	}
616	spin_unlock(&dax_host_lock);
617	dax_read_unlock(id);
618
619	return found;
620}
621EXPORT_SYMBOL_GPL(dax_get_by_host);
622
623/**
624 * inode_dax: convert a public inode into its dax_dev
625 * @inode: An inode with i_cdev pointing to a dax_dev
626 *
627 * Note this is not equivalent to to_dax_dev() which is for private
628 * internal use where we know the inode filesystem type == dax_fs_type.
629 */
630struct dax_device *inode_dax(struct inode *inode)
631{
632	struct cdev *cdev = inode->i_cdev;
633
634	return container_of(cdev, struct dax_device, cdev);
635}
636EXPORT_SYMBOL_GPL(inode_dax);
637
638struct inode *dax_inode(struct dax_device *dax_dev)
639{
640	return &dax_dev->inode;
641}
642EXPORT_SYMBOL_GPL(dax_inode);
643
644void *dax_get_private(struct dax_device *dax_dev)
645{
646	if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
647		return NULL;
648	return dax_dev->private;
649}
650EXPORT_SYMBOL_GPL(dax_get_private);
651
652static void init_once(void *_dax_dev)
653{
654	struct dax_device *dax_dev = _dax_dev;
655	struct inode *inode = &dax_dev->inode;
656
657	memset(dax_dev, 0, sizeof(*dax_dev));
658	inode_init_once(inode);
659}
660
661static int dax_fs_init(void)
662{
663	int rc;
664
665	dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
666			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
667			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
668			init_once);
669	if (!dax_cache)
670		return -ENOMEM;
671
672	dax_mnt = kern_mount(&dax_fs_type);
673	if (IS_ERR(dax_mnt)) {
674		rc = PTR_ERR(dax_mnt);
675		goto err_mount;
676	}
677	dax_superblock = dax_mnt->mnt_sb;
678
679	return 0;
680
681 err_mount:
682	kmem_cache_destroy(dax_cache);
683
684	return rc;
685}
686
687static void dax_fs_exit(void)
688{
689	kern_unmount(dax_mnt);
690	kmem_cache_destroy(dax_cache);
691}
692
693static int __init dax_core_init(void)
694{
695	int rc;
696
697	rc = dax_fs_init();
698	if (rc)
699		return rc;
700
701	rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
702	if (rc)
703		goto err_chrdev;
704
705	rc = dax_bus_init();
706	if (rc)
707		goto err_bus;
708	return 0;
709
710err_bus:
711	unregister_chrdev_region(dax_devt, MINORMASK+1);
712err_chrdev:
713	dax_fs_exit();
714	return 0;
715}
716
717static void __exit dax_core_exit(void)
718{
719	unregister_chrdev_region(dax_devt, MINORMASK+1);
720	ida_destroy(&dax_minor_ida);
721	dax_fs_exit();
722}
723
724MODULE_AUTHOR("Intel Corporation");
725MODULE_LICENSE("GPL v2");
726subsys_initcall(dax_core_init);
727module_exit(dax_core_exit);