Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of version 2 of the GNU General Public License as
  6 * published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 */
 13#include <linux/pagemap.h>
 14#include <linux/module.h>
 15#include <linux/device.h>
 16#include <linux/mount.h>
 17#include <linux/pfn_t.h>
 18#include <linux/hash.h>
 19#include <linux/cdev.h>
 20#include <linux/slab.h>
 21#include <linux/dax.h>
 22#include <linux/fs.h>
 23#include <linux/mm.h>
 24#include "dax.h"
 25
 26static dev_t dax_devt;
 27static struct class *dax_class;
 28static DEFINE_IDA(dax_minor_ida);
 29static int nr_dax = CONFIG_NR_DEV_DAX;
 30module_param(nr_dax, int, S_IRUGO);
 31static struct vfsmount *dax_mnt;
 32static struct kmem_cache *dax_cache __read_mostly;
 33static struct super_block *dax_superblock __read_mostly;
 34MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
 35
 36/**
 37 * struct dax_region - mapping infrastructure for dax devices
 38 * @id: kernel-wide unique region for a memory range
 39 * @base: linear address corresponding to @res
 40 * @kref: to pin while other agents have a need to do lookups
 41 * @dev: parent device backing this region
 42 * @align: allocation and mapping alignment for child dax devices
 43 * @res: physical address range of the region
 44 * @pfn_flags: identify whether the pfns are paged back or not
 45 */
 46struct dax_region {
 47	int id;
 48	struct ida ida;
 49	void *base;
 50	struct kref kref;
 51	struct device *dev;
 52	unsigned int align;
 53	struct resource res;
 54	unsigned long pfn_flags;
 55};
 56
 57/**
 58 * struct dax_dev - subdivision of a dax region
 59 * @region - parent region
 60 * @dev - device backing the character device
 61 * @cdev - core chardev data
 62 * @alive - !alive + rcu grace period == no new mappings can be established
 63 * @id - child id in the region
 64 * @num_resources - number of physical address extents in this device
 65 * @res - array of physical address ranges
 66 */
 67struct dax_dev {
 68	struct dax_region *region;
 69	struct inode *inode;
 70	struct device dev;
 71	struct cdev cdev;
 72	bool alive;
 73	int id;
 74	int num_resources;
 75	struct resource res[0];
 76};
 77
 78static ssize_t id_show(struct device *dev,
 79		struct device_attribute *attr, char *buf)
 80{
 81	struct dax_region *dax_region;
 82	ssize_t rc = -ENXIO;
 83
 84	device_lock(dev);
 85	dax_region = dev_get_drvdata(dev);
 86	if (dax_region)
 87		rc = sprintf(buf, "%d\n", dax_region->id);
 88	device_unlock(dev);
 89
 90	return rc;
 91}
 92static DEVICE_ATTR_RO(id);
 93
 94static ssize_t region_size_show(struct device *dev,
 95		struct device_attribute *attr, char *buf)
 96{
 97	struct dax_region *dax_region;
 98	ssize_t rc = -ENXIO;
 99
100	device_lock(dev);
101	dax_region = dev_get_drvdata(dev);
102	if (dax_region)
103		rc = sprintf(buf, "%llu\n", (unsigned long long)
104				resource_size(&dax_region->res));
105	device_unlock(dev);
106
107	return rc;
108}
109static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
110		region_size_show, NULL);
111
112static ssize_t align_show(struct device *dev,
113		struct device_attribute *attr, char *buf)
114{
115	struct dax_region *dax_region;
116	ssize_t rc = -ENXIO;
117
118	device_lock(dev);
119	dax_region = dev_get_drvdata(dev);
120	if (dax_region)
121		rc = sprintf(buf, "%u\n", dax_region->align);
122	device_unlock(dev);
123
124	return rc;
125}
126static DEVICE_ATTR_RO(align);
127
128static struct attribute *dax_region_attributes[] = {
129	&dev_attr_region_size.attr,
130	&dev_attr_align.attr,
131	&dev_attr_id.attr,
132	NULL,
133};
134
135static const struct attribute_group dax_region_attribute_group = {
136	.name = "dax_region",
137	.attrs = dax_region_attributes,
138};
139
140static const struct attribute_group *dax_region_attribute_groups[] = {
141	&dax_region_attribute_group,
142	NULL,
143};
144
145static struct inode *dax_alloc_inode(struct super_block *sb)
146{
147	return kmem_cache_alloc(dax_cache, GFP_KERNEL);
148}
149
150static void dax_i_callback(struct rcu_head *head)
151{
152	struct inode *inode = container_of(head, struct inode, i_rcu);
153
154	kmem_cache_free(dax_cache, inode);
155}
156
157static void dax_destroy_inode(struct inode *inode)
158{
159	call_rcu(&inode->i_rcu, dax_i_callback);
160}
161
162static const struct super_operations dax_sops = {
163	.statfs = simple_statfs,
164	.alloc_inode = dax_alloc_inode,
165	.destroy_inode = dax_destroy_inode,
166	.drop_inode = generic_delete_inode,
167};
168
169static struct dentry *dax_mount(struct file_system_type *fs_type,
170		int flags, const char *dev_name, void *data)
171{
172	return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
173}
174
175static struct file_system_type dax_type = {
176	.name = "dax",
177	.mount = dax_mount,
178	.kill_sb = kill_anon_super,
179};
180
181static int dax_test(struct inode *inode, void *data)
182{
183	return inode->i_cdev == data;
184}
185
186static int dax_set(struct inode *inode, void *data)
187{
188	inode->i_cdev = data;
189	return 0;
190}
191
192static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
193{
194	struct inode *inode;
195
196	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
197			dax_test, dax_set, cdev);
198
199	if (!inode)
200		return NULL;
201
202	if (inode->i_state & I_NEW) {
203		inode->i_mode = S_IFCHR;
204		inode->i_flags = S_DAX;
205		inode->i_rdev = devt;
206		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
207		unlock_new_inode(inode);
208	}
209	return inode;
210}
211
212static void init_once(void *inode)
213{
214	inode_init_once(inode);
215}
216
217static int dax_inode_init(void)
218{
219	int rc;
220
221	dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
222			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
223			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
224			init_once);
225	if (!dax_cache)
226		return -ENOMEM;
227
228	rc = register_filesystem(&dax_type);
229	if (rc)
230		goto err_register_fs;
231
232	dax_mnt = kern_mount(&dax_type);
233	if (IS_ERR(dax_mnt)) {
234		rc = PTR_ERR(dax_mnt);
235		goto err_mount;
236	}
237	dax_superblock = dax_mnt->mnt_sb;
238
239	return 0;
240
241 err_mount:
242	unregister_filesystem(&dax_type);
243 err_register_fs:
244	kmem_cache_destroy(dax_cache);
245
246	return rc;
247}
248
249static void dax_inode_exit(void)
250{
251	kern_unmount(dax_mnt);
252	unregister_filesystem(&dax_type);
253	kmem_cache_destroy(dax_cache);
254}
255
256static void dax_region_free(struct kref *kref)
257{
258	struct dax_region *dax_region;
259
260	dax_region = container_of(kref, struct dax_region, kref);
261	kfree(dax_region);
262}
263
264void dax_region_put(struct dax_region *dax_region)
265{
266	kref_put(&dax_region->kref, dax_region_free);
267}
268EXPORT_SYMBOL_GPL(dax_region_put);
269
270static void dax_region_unregister(void *region)
271{
272	struct dax_region *dax_region = region;
273
274	sysfs_remove_groups(&dax_region->dev->kobj,
275			dax_region_attribute_groups);
276	dax_region_put(dax_region);
277}
278
279struct dax_region *alloc_dax_region(struct device *parent, int region_id,
280		struct resource *res, unsigned int align, void *addr,
281		unsigned long pfn_flags)
282{
283	struct dax_region *dax_region;
284
285	/*
286	 * The DAX core assumes that it can store its private data in
287	 * parent->driver_data. This WARN is a reminder / safeguard for
288	 * developers of device-dax drivers.
289	 */
290	if (dev_get_drvdata(parent)) {
291		dev_WARN(parent, "dax core failed to setup private data\n");
292		return NULL;
293	}
294
295	if (!IS_ALIGNED(res->start, align)
296			|| !IS_ALIGNED(resource_size(res), align))
297		return NULL;
298
299	dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
300	if (!dax_region)
301		return NULL;
302
303	dev_set_drvdata(parent, dax_region);
304	memcpy(&dax_region->res, res, sizeof(*res));
305	dax_region->pfn_flags = pfn_flags;
306	kref_init(&dax_region->kref);
307	dax_region->id = region_id;
308	ida_init(&dax_region->ida);
309	dax_region->align = align;
310	dax_region->dev = parent;
311	dax_region->base = addr;
312	if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
313		kfree(dax_region);
314		return NULL;;
315	}
316
317	kref_get(&dax_region->kref);
318	if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
319		return NULL;
320	return dax_region;
321}
322EXPORT_SYMBOL_GPL(alloc_dax_region);
323
324static struct dax_dev *to_dax_dev(struct device *dev)
325{
326	return container_of(dev, struct dax_dev, dev);
327}
328
329static ssize_t size_show(struct device *dev,
330		struct device_attribute *attr, char *buf)
331{
332	struct dax_dev *dax_dev = to_dax_dev(dev);
333	unsigned long long size = 0;
334	int i;
335
336	for (i = 0; i < dax_dev->num_resources; i++)
337		size += resource_size(&dax_dev->res[i]);
338
339	return sprintf(buf, "%llu\n", size);
340}
341static DEVICE_ATTR_RO(size);
342
343static struct attribute *dax_device_attributes[] = {
344	&dev_attr_size.attr,
345	NULL,
346};
347
348static const struct attribute_group dax_device_attribute_group = {
349	.attrs = dax_device_attributes,
350};
351
352static const struct attribute_group *dax_attribute_groups[] = {
353	&dax_device_attribute_group,
354	NULL,
355};
356
357static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
358		const char *func)
359{
360	struct dax_region *dax_region = dax_dev->region;
361	struct device *dev = &dax_dev->dev;
362	unsigned long mask;
363
364	if (!dax_dev->alive)
365		return -ENXIO;
366
367	/* prevent private mappings from being established */
368	if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
369		dev_info(dev, "%s: %s: fail, attempted private mapping\n",
370				current->comm, func);
371		return -EINVAL;
372	}
373
374	mask = dax_region->align - 1;
375	if (vma->vm_start & mask || vma->vm_end & mask) {
376		dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
377				current->comm, func, vma->vm_start, vma->vm_end,
378				mask);
379		return -EINVAL;
380	}
381
382	if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
383			&& (vma->vm_flags & VM_DONTCOPY) == 0) {
384		dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
385				current->comm, func);
386		return -EINVAL;
387	}
388
389	if (!vma_is_dax(vma)) {
390		dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
391				current->comm, func);
392		return -EINVAL;
393	}
394
395	return 0;
396}
397
398static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
399		unsigned long size)
400{
401	struct resource *res;
402	phys_addr_t phys;
403	int i;
404
405	for (i = 0; i < dax_dev->num_resources; i++) {
406		res = &dax_dev->res[i];
407		phys = pgoff * PAGE_SIZE + res->start;
408		if (phys >= res->start && phys <= res->end)
409			break;
410		pgoff -= PHYS_PFN(resource_size(res));
411	}
412
413	if (i < dax_dev->num_resources) {
414		res = &dax_dev->res[i];
415		if (phys + size - 1 <= res->end)
416			return phys;
417	}
418
419	return -1;
420}
421
422static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
423		struct vm_fault *vmf)
424{
425	struct device *dev = &dax_dev->dev;
426	struct dax_region *dax_region;
427	int rc = VM_FAULT_SIGBUS;
428	phys_addr_t phys;
429	pfn_t pfn;
430	unsigned int fault_size = PAGE_SIZE;
431
432	if (check_vma(dax_dev, vma, __func__))
433		return VM_FAULT_SIGBUS;
434
435	dax_region = dax_dev->region;
436	if (dax_region->align > PAGE_SIZE) {
437		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
438		return VM_FAULT_SIGBUS;
439	}
440
441	if (fault_size != dax_region->align)
442		return VM_FAULT_SIGBUS;
443
444	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
445	if (phys == -1) {
446		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
447				vmf->pgoff);
448		return VM_FAULT_SIGBUS;
449	}
450
451	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
452
453	rc = vm_insert_mixed(vma, vmf->address, pfn);
454
455	if (rc == -ENOMEM)
456		return VM_FAULT_OOM;
457	if (rc < 0 && rc != -EBUSY)
458		return VM_FAULT_SIGBUS;
459
460	return VM_FAULT_NOPAGE;
461}
462
463static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
464{
465	int rc;
466	struct file *filp = vma->vm_file;
467	struct dax_dev *dax_dev = filp->private_data;
468
469	dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
470			current->comm, (vmf->flags & FAULT_FLAG_WRITE)
471			? "write" : "read", vma->vm_start, vma->vm_end);
472	rcu_read_lock();
473	rc = __dax_dev_fault(dax_dev, vma, vmf);
474	rcu_read_unlock();
475
476	return rc;
477}
478
479static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
480		struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
481		unsigned int flags)
482{
483	unsigned long pmd_addr = addr & PMD_MASK;
484	struct device *dev = &dax_dev->dev;
485	struct dax_region *dax_region;
486	phys_addr_t phys;
487	pgoff_t pgoff;
488	pfn_t pfn;
489	unsigned int fault_size = PMD_SIZE;
490
491	if (check_vma(dax_dev, vma, __func__))
492		return VM_FAULT_SIGBUS;
493
494	dax_region = dax_dev->region;
495	if (dax_region->align > PMD_SIZE) {
496		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
497		return VM_FAULT_SIGBUS;
498	}
499
500	/* dax pmd mappings require pfn_t_devmap() */
501	if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
502		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
503		return VM_FAULT_SIGBUS;
504	}
505
506	if (fault_size < dax_region->align)
507		return VM_FAULT_SIGBUS;
508	else if (fault_size > dax_region->align)
509		return VM_FAULT_FALLBACK;
510
511	/* if we are outside of the VMA */
512	if (pmd_addr < vma->vm_start ||
513			(pmd_addr + PMD_SIZE) > vma->vm_end)
514		return VM_FAULT_SIGBUS;
515
516	pgoff = linear_page_index(vma, pmd_addr);
517	phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
518	if (phys == -1) {
519		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
520				pgoff);
521		return VM_FAULT_SIGBUS;
522	}
523
524	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
525
526	return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
527			flags & FAULT_FLAG_WRITE);
528}
529
530static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
531		pmd_t *pmd, unsigned int flags)
532{
533	int rc;
534	struct file *filp = vma->vm_file;
535	struct dax_dev *dax_dev = filp->private_data;
536
537	dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
538			current->comm, (flags & FAULT_FLAG_WRITE)
539			? "write" : "read", vma->vm_start, vma->vm_end);
540
541	rcu_read_lock();
542	rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
543	rcu_read_unlock();
544
545	return rc;
546}
547
548static const struct vm_operations_struct dax_dev_vm_ops = {
549	.fault = dax_dev_fault,
550	.pmd_fault = dax_dev_pmd_fault,
551};
552
553static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
554{
555	struct dax_dev *dax_dev = filp->private_data;
556	int rc;
557
558	dev_dbg(&dax_dev->dev, "%s\n", __func__);
559
560	rc = check_vma(dax_dev, vma, __func__);
561	if (rc)
562		return rc;
563
564	vma->vm_ops = &dax_dev_vm_ops;
565	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
566	return 0;
567}
568
569/* return an unmapped area aligned to the dax region specified alignment */
570static unsigned long dax_get_unmapped_area(struct file *filp,
571		unsigned long addr, unsigned long len, unsigned long pgoff,
572		unsigned long flags)
573{
574	unsigned long off, off_end, off_align, len_align, addr_align, align;
575	struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
576	struct dax_region *dax_region;
577
578	if (!dax_dev || addr)
579		goto out;
580
581	dax_region = dax_dev->region;
582	align = dax_region->align;
583	off = pgoff << PAGE_SHIFT;
584	off_end = off + len;
585	off_align = round_up(off, align);
586
587	if ((off_end <= off_align) || ((off_end - off_align) < align))
588		goto out;
589
590	len_align = len + align;
591	if ((off + len_align) < off)
592		goto out;
593
594	addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
595			pgoff, flags);
596	if (!IS_ERR_VALUE(addr_align)) {
597		addr_align += (off - addr_align) & (align - 1);
598		return addr_align;
599	}
600 out:
601	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
602}
603
604static int dax_open(struct inode *inode, struct file *filp)
605{
606	struct dax_dev *dax_dev;
607
608	dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
609	dev_dbg(&dax_dev->dev, "%s\n", __func__);
610	inode->i_mapping = dax_dev->inode->i_mapping;
611	inode->i_mapping->host = dax_dev->inode;
612	filp->f_mapping = inode->i_mapping;
613	filp->private_data = dax_dev;
614	inode->i_flags = S_DAX;
615
616	return 0;
617}
618
619static int dax_release(struct inode *inode, struct file *filp)
620{
621	struct dax_dev *dax_dev = filp->private_data;
622
623	dev_dbg(&dax_dev->dev, "%s\n", __func__);
624	return 0;
625}
626
627static const struct file_operations dax_fops = {
628	.llseek = noop_llseek,
629	.owner = THIS_MODULE,
630	.open = dax_open,
631	.release = dax_release,
632	.get_unmapped_area = dax_get_unmapped_area,
633	.mmap = dax_mmap,
634};
635
636static void dax_dev_release(struct device *dev)
637{
638	struct dax_dev *dax_dev = to_dax_dev(dev);
639	struct dax_region *dax_region = dax_dev->region;
640
641	ida_simple_remove(&dax_region->ida, dax_dev->id);
642	ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
643	dax_region_put(dax_region);
644	iput(dax_dev->inode);
645	kfree(dax_dev);
646}
647
648static void unregister_dax_dev(void *dev)
649{
650	struct dax_dev *dax_dev = to_dax_dev(dev);
651	struct cdev *cdev = &dax_dev->cdev;
652
653	dev_dbg(dev, "%s\n", __func__);
654
655	/*
656	 * Note, rcu is not protecting the liveness of dax_dev, rcu is
657	 * ensuring that any fault handlers that might have seen
658	 * dax_dev->alive == true, have completed.  Any fault handlers
659	 * that start after synchronize_rcu() has started will abort
660	 * upon seeing dax_dev->alive == false.
661	 */
662	dax_dev->alive = false;
663	synchronize_rcu();
664	unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
665	cdev_del(cdev);
666	device_unregister(dev);
667}
668
669struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
670		struct resource *res, int count)
671{
672	struct device *parent = dax_region->dev;
673	struct dax_dev *dax_dev;
674	int rc = 0, minor, i;
675	struct device *dev;
676	struct cdev *cdev;
677	dev_t dev_t;
678
679	dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
680	if (!dax_dev)
681		return ERR_PTR(-ENOMEM);
682
683	for (i = 0; i < count; i++) {
684		if (!IS_ALIGNED(res[i].start, dax_region->align)
685				|| !IS_ALIGNED(resource_size(&res[i]),
686					dax_region->align)) {
687			rc = -EINVAL;
688			break;
689		}
690		dax_dev->res[i].start = res[i].start;
691		dax_dev->res[i].end = res[i].end;
692	}
693
694	if (i < count)
695		goto err_id;
696
697	dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
698	if (dax_dev->id < 0) {
699		rc = dax_dev->id;
700		goto err_id;
701	}
702
703	minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
704	if (minor < 0) {
705		rc = minor;
706		goto err_minor;
707	}
708
709	dev_t = MKDEV(MAJOR(dax_devt), minor);
710	dev = &dax_dev->dev;
711	dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
712	if (!dax_dev->inode) {
713		rc = -ENOMEM;
714		goto err_inode;
715	}
716
717	/* device_initialize() so cdev can reference kobj parent */
718	device_initialize(dev);
719
720	cdev = &dax_dev->cdev;
721	cdev_init(cdev, &dax_fops);
722	cdev->owner = parent->driver->owner;
723	cdev->kobj.parent = &dev->kobj;
724	rc = cdev_add(&dax_dev->cdev, dev_t, 1);
725	if (rc)
726		goto err_cdev;
727
728	/* from here on we're committed to teardown via dax_dev_release() */
729	dax_dev->num_resources = count;
730	dax_dev->alive = true;
731	dax_dev->region = dax_region;
732	kref_get(&dax_region->kref);
733
734	dev->devt = dev_t;
735	dev->class = dax_class;
736	dev->parent = parent;
737	dev->groups = dax_attribute_groups;
738	dev->release = dax_dev_release;
739	dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
740	rc = device_add(dev);
741	if (rc) {
742		put_device(dev);
743		return ERR_PTR(rc);
744	}
745
746	rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
747	if (rc)
748		return ERR_PTR(rc);
749
750	return dax_dev;
751
752 err_cdev:
753	iput(dax_dev->inode);
754 err_inode:
755	ida_simple_remove(&dax_minor_ida, minor);
756 err_minor:
757	ida_simple_remove(&dax_region->ida, dax_dev->id);
758 err_id:
759	kfree(dax_dev);
760
761	return ERR_PTR(rc);
762}
763EXPORT_SYMBOL_GPL(devm_create_dax_dev);
764
765static int __init dax_init(void)
766{
767	int rc;
768
769	rc = dax_inode_init();
770	if (rc)
771		return rc;
772
773	nr_dax = max(nr_dax, 256);
774	rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
775	if (rc)
776		goto err_chrdev;
777
778	dax_class = class_create(THIS_MODULE, "dax");
779	if (IS_ERR(dax_class)) {
780		rc = PTR_ERR(dax_class);
781		goto err_class;
782	}
783
784	return 0;
785
786 err_class:
787	unregister_chrdev_region(dax_devt, nr_dax);
788 err_chrdev:
789	dax_inode_exit();
790	return rc;
791}
792
793static void __exit dax_exit(void)
794{
795	class_destroy(dax_class);
796	unregister_chrdev_region(dax_devt, nr_dax);
797	ida_destroy(&dax_minor_ida);
798	dax_inode_exit();
799}
800
801MODULE_AUTHOR("Intel Corporation");
802MODULE_LICENSE("GPL v2");
803subsys_initcall(dax_init);
804module_exit(dax_exit);