Loading...
1/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/pagemap.h>
14#include <linux/module.h>
15#include <linux/mount.h>
16#include <linux/magic.h>
17#include <linux/genhd.h>
18#include <linux/pfn_t.h>
19#include <linux/cdev.h>
20#include <linux/hash.h>
21#include <linux/slab.h>
22#include <linux/uio.h>
23#include <linux/dax.h>
24#include <linux/fs.h>
25
26static dev_t dax_devt;
27DEFINE_STATIC_SRCU(dax_srcu);
28static struct vfsmount *dax_mnt;
29static DEFINE_IDA(dax_minor_ida);
30static struct kmem_cache *dax_cache __read_mostly;
31static struct super_block *dax_superblock __read_mostly;
32
33#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
34static struct hlist_head dax_host_list[DAX_HASH_SIZE];
35static DEFINE_SPINLOCK(dax_host_lock);
36
37int dax_read_lock(void)
38{
39 return srcu_read_lock(&dax_srcu);
40}
41EXPORT_SYMBOL_GPL(dax_read_lock);
42
43void dax_read_unlock(int id)
44{
45 srcu_read_unlock(&dax_srcu, id);
46}
47EXPORT_SYMBOL_GPL(dax_read_unlock);
48
49#ifdef CONFIG_BLOCK
50#include <linux/blkdev.h>
51
52int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
53 pgoff_t *pgoff)
54{
55 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
56
57 if (pgoff)
58 *pgoff = PHYS_PFN(phys_off);
59 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
60 return -EINVAL;
61 return 0;
62}
63EXPORT_SYMBOL(bdev_dax_pgoff);
64
65#if IS_ENABLED(CONFIG_FS_DAX)
66struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
67{
68 if (!blk_queue_dax(bdev->bd_queue))
69 return NULL;
70 return fs_dax_get_by_host(bdev->bd_disk->disk_name);
71}
72EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
73#endif
74
75/**
76 * __bdev_dax_supported() - Check if the device supports dax for filesystem
77 * @sb: The superblock of the device
78 * @blocksize: The block size of the device
79 *
80 * This is a library function for filesystems to check if the block device
81 * can be mounted with dax option.
82 *
83 * Return: negative errno if unsupported, 0 if supported.
84 */
85int __bdev_dax_supported(struct super_block *sb, int blocksize)
86{
87 struct block_device *bdev = sb->s_bdev;
88 struct dax_device *dax_dev;
89 pgoff_t pgoff;
90 int err, id;
91 void *kaddr;
92 pfn_t pfn;
93 long len;
94
95 if (blocksize != PAGE_SIZE) {
96 pr_debug("VFS (%s): error: unsupported blocksize for dax\n",
97 sb->s_id);
98 return -EINVAL;
99 }
100
101 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
102 if (err) {
103 pr_debug("VFS (%s): error: unaligned partition for dax\n",
104 sb->s_id);
105 return err;
106 }
107
108 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
109 if (!dax_dev) {
110 pr_debug("VFS (%s): error: device does not support dax\n",
111 sb->s_id);
112 return -EOPNOTSUPP;
113 }
114
115 id = dax_read_lock();
116 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
117 dax_read_unlock(id);
118
119 put_dax(dax_dev);
120
121 if (len < 1) {
122 pr_debug("VFS (%s): error: dax access failed (%ld)\n",
123 sb->s_id, len);
124 return len < 0 ? len : -EIO;
125 }
126
127 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
128 /*
129 * An arch that has enabled the pmem api should also
130 * have its drivers support pfn_t_devmap()
131 *
132 * This is a developer warning and should not trigger in
133 * production. dax_flush() will crash since it depends
134 * on being able to do (page_address(pfn_to_page())).
135 */
136 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
137 } else if (pfn_t_devmap(pfn)) {
138 /* pass */;
139 } else {
140 pr_debug("VFS (%s): error: dax support not enabled\n",
141 sb->s_id);
142 return -EOPNOTSUPP;
143 }
144
145 return 0;
146}
147EXPORT_SYMBOL_GPL(__bdev_dax_supported);
148#endif
149
150enum dax_device_flags {
151 /* !alive + rcu grace period == no new operations / mappings */
152 DAXDEV_ALIVE,
153 /* gate whether dax_flush() calls the low level flush routine */
154 DAXDEV_WRITE_CACHE,
155};
156
157/**
158 * struct dax_device - anchor object for dax services
159 * @inode: core vfs
160 * @cdev: optional character interface for "device dax"
161 * @host: optional name for lookups where the device path is not available
162 * @private: dax driver private data
163 * @flags: state and boolean properties
164 */
165struct dax_device {
166 struct hlist_node list;
167 struct inode inode;
168 struct cdev cdev;
169 const char *host;
170 void *private;
171 unsigned long flags;
172 const struct dax_operations *ops;
173};
174
175static ssize_t write_cache_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
179 ssize_t rc;
180
181 WARN_ON_ONCE(!dax_dev);
182 if (!dax_dev)
183 return -ENXIO;
184
185 rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
186 &dax_dev->flags));
187 put_dax(dax_dev);
188 return rc;
189}
190
191static ssize_t write_cache_store(struct device *dev,
192 struct device_attribute *attr, const char *buf, size_t len)
193{
194 bool write_cache;
195 int rc = strtobool(buf, &write_cache);
196 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
197
198 WARN_ON_ONCE(!dax_dev);
199 if (!dax_dev)
200 return -ENXIO;
201
202 if (rc)
203 len = rc;
204 else if (write_cache)
205 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
206 else
207 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
208
209 put_dax(dax_dev);
210 return len;
211}
212static DEVICE_ATTR_RW(write_cache);
213
214static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
215{
216 struct device *dev = container_of(kobj, typeof(*dev), kobj);
217 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
218
219 WARN_ON_ONCE(!dax_dev);
220 if (!dax_dev)
221 return 0;
222
223#ifndef CONFIG_ARCH_HAS_PMEM_API
224 if (a == &dev_attr_write_cache.attr)
225 return 0;
226#endif
227 return a->mode;
228}
229
230static struct attribute *dax_attributes[] = {
231 &dev_attr_write_cache.attr,
232 NULL,
233};
234
235struct attribute_group dax_attribute_group = {
236 .name = "dax",
237 .attrs = dax_attributes,
238 .is_visible = dax_visible,
239};
240EXPORT_SYMBOL_GPL(dax_attribute_group);
241
242/**
243 * dax_direct_access() - translate a device pgoff to an absolute pfn
244 * @dax_dev: a dax_device instance representing the logical memory range
245 * @pgoff: offset in pages from the start of the device to translate
246 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
247 * @kaddr: output parameter that returns a virtual address mapping of pfn
248 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
249 *
250 * Return: negative errno if an error occurs, otherwise the number of
251 * pages accessible at the device relative @pgoff.
252 */
253long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
254 void **kaddr, pfn_t *pfn)
255{
256 long avail;
257
258 if (!dax_dev)
259 return -EOPNOTSUPP;
260
261 if (!dax_alive(dax_dev))
262 return -ENXIO;
263
264 if (nr_pages < 0)
265 return nr_pages;
266
267 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
268 kaddr, pfn);
269 if (!avail)
270 return -ERANGE;
271 return min(avail, nr_pages);
272}
273EXPORT_SYMBOL_GPL(dax_direct_access);
274
275size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
276 size_t bytes, struct iov_iter *i)
277{
278 if (!dax_alive(dax_dev))
279 return 0;
280
281 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
282}
283EXPORT_SYMBOL_GPL(dax_copy_from_iter);
284
285#ifdef CONFIG_ARCH_HAS_PMEM_API
286void arch_wb_cache_pmem(void *addr, size_t size);
287void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
288{
289 if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
290 return;
291
292 arch_wb_cache_pmem(addr, size);
293}
294#else
295void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
296{
297}
298#endif
299EXPORT_SYMBOL_GPL(dax_flush);
300
301void dax_write_cache(struct dax_device *dax_dev, bool wc)
302{
303 if (wc)
304 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
305 else
306 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
307}
308EXPORT_SYMBOL_GPL(dax_write_cache);
309
310bool dax_write_cache_enabled(struct dax_device *dax_dev)
311{
312 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
313}
314EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
315
316bool dax_alive(struct dax_device *dax_dev)
317{
318 lockdep_assert_held(&dax_srcu);
319 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
320}
321EXPORT_SYMBOL_GPL(dax_alive);
322
323static int dax_host_hash(const char *host)
324{
325 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
326}
327
328/*
329 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
330 * that any fault handlers or operations that might have seen
331 * dax_alive(), have completed. Any operations that start after
332 * synchronize_srcu() has run will abort upon seeing !dax_alive().
333 */
334void kill_dax(struct dax_device *dax_dev)
335{
336 if (!dax_dev)
337 return;
338
339 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
340
341 synchronize_srcu(&dax_srcu);
342
343 spin_lock(&dax_host_lock);
344 hlist_del_init(&dax_dev->list);
345 spin_unlock(&dax_host_lock);
346
347 dax_dev->private = NULL;
348}
349EXPORT_SYMBOL_GPL(kill_dax);
350
351static struct inode *dax_alloc_inode(struct super_block *sb)
352{
353 struct dax_device *dax_dev;
354 struct inode *inode;
355
356 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
357 if (!dax_dev)
358 return NULL;
359
360 inode = &dax_dev->inode;
361 inode->i_rdev = 0;
362 return inode;
363}
364
365static struct dax_device *to_dax_dev(struct inode *inode)
366{
367 return container_of(inode, struct dax_device, inode);
368}
369
370static void dax_i_callback(struct rcu_head *head)
371{
372 struct inode *inode = container_of(head, struct inode, i_rcu);
373 struct dax_device *dax_dev = to_dax_dev(inode);
374
375 kfree(dax_dev->host);
376 dax_dev->host = NULL;
377 if (inode->i_rdev)
378 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
379 kmem_cache_free(dax_cache, dax_dev);
380}
381
382static void dax_destroy_inode(struct inode *inode)
383{
384 struct dax_device *dax_dev = to_dax_dev(inode);
385
386 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
387 "kill_dax() must be called before final iput()\n");
388 call_rcu(&inode->i_rcu, dax_i_callback);
389}
390
391static const struct super_operations dax_sops = {
392 .statfs = simple_statfs,
393 .alloc_inode = dax_alloc_inode,
394 .destroy_inode = dax_destroy_inode,
395 .drop_inode = generic_delete_inode,
396};
397
398static struct dentry *dax_mount(struct file_system_type *fs_type,
399 int flags, const char *dev_name, void *data)
400{
401 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
402}
403
404static struct file_system_type dax_fs_type = {
405 .name = "dax",
406 .mount = dax_mount,
407 .kill_sb = kill_anon_super,
408};
409
410static int dax_test(struct inode *inode, void *data)
411{
412 dev_t devt = *(dev_t *) data;
413
414 return inode->i_rdev == devt;
415}
416
417static int dax_set(struct inode *inode, void *data)
418{
419 dev_t devt = *(dev_t *) data;
420
421 inode->i_rdev = devt;
422 return 0;
423}
424
425static struct dax_device *dax_dev_get(dev_t devt)
426{
427 struct dax_device *dax_dev;
428 struct inode *inode;
429
430 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
431 dax_test, dax_set, &devt);
432
433 if (!inode)
434 return NULL;
435
436 dax_dev = to_dax_dev(inode);
437 if (inode->i_state & I_NEW) {
438 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
439 inode->i_cdev = &dax_dev->cdev;
440 inode->i_mode = S_IFCHR;
441 inode->i_flags = S_DAX;
442 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
443 unlock_new_inode(inode);
444 }
445
446 return dax_dev;
447}
448
449static void dax_add_host(struct dax_device *dax_dev, const char *host)
450{
451 int hash;
452
453 /*
454 * Unconditionally init dax_dev since it's coming from a
455 * non-zeroed slab cache
456 */
457 INIT_HLIST_NODE(&dax_dev->list);
458 dax_dev->host = host;
459 if (!host)
460 return;
461
462 hash = dax_host_hash(host);
463 spin_lock(&dax_host_lock);
464 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
465 spin_unlock(&dax_host_lock);
466}
467
468struct dax_device *alloc_dax(void *private, const char *__host,
469 const struct dax_operations *ops)
470{
471 struct dax_device *dax_dev;
472 const char *host;
473 dev_t devt;
474 int minor;
475
476 host = kstrdup(__host, GFP_KERNEL);
477 if (__host && !host)
478 return NULL;
479
480 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
481 if (minor < 0)
482 goto err_minor;
483
484 devt = MKDEV(MAJOR(dax_devt), minor);
485 dax_dev = dax_dev_get(devt);
486 if (!dax_dev)
487 goto err_dev;
488
489 dax_add_host(dax_dev, host);
490 dax_dev->ops = ops;
491 dax_dev->private = private;
492 return dax_dev;
493
494 err_dev:
495 ida_simple_remove(&dax_minor_ida, minor);
496 err_minor:
497 kfree(host);
498 return NULL;
499}
500EXPORT_SYMBOL_GPL(alloc_dax);
501
502void put_dax(struct dax_device *dax_dev)
503{
504 if (!dax_dev)
505 return;
506 iput(&dax_dev->inode);
507}
508EXPORT_SYMBOL_GPL(put_dax);
509
510/**
511 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
512 * @host: alternate name for the device registered by a dax driver
513 */
514struct dax_device *dax_get_by_host(const char *host)
515{
516 struct dax_device *dax_dev, *found = NULL;
517 int hash, id;
518
519 if (!host)
520 return NULL;
521
522 hash = dax_host_hash(host);
523
524 id = dax_read_lock();
525 spin_lock(&dax_host_lock);
526 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
527 if (!dax_alive(dax_dev)
528 || strcmp(host, dax_dev->host) != 0)
529 continue;
530
531 if (igrab(&dax_dev->inode))
532 found = dax_dev;
533 break;
534 }
535 spin_unlock(&dax_host_lock);
536 dax_read_unlock(id);
537
538 return found;
539}
540EXPORT_SYMBOL_GPL(dax_get_by_host);
541
542/**
543 * inode_dax: convert a public inode into its dax_dev
544 * @inode: An inode with i_cdev pointing to a dax_dev
545 *
546 * Note this is not equivalent to to_dax_dev() which is for private
547 * internal use where we know the inode filesystem type == dax_fs_type.
548 */
549struct dax_device *inode_dax(struct inode *inode)
550{
551 struct cdev *cdev = inode->i_cdev;
552
553 return container_of(cdev, struct dax_device, cdev);
554}
555EXPORT_SYMBOL_GPL(inode_dax);
556
557struct inode *dax_inode(struct dax_device *dax_dev)
558{
559 return &dax_dev->inode;
560}
561EXPORT_SYMBOL_GPL(dax_inode);
562
563void *dax_get_private(struct dax_device *dax_dev)
564{
565 return dax_dev->private;
566}
567EXPORT_SYMBOL_GPL(dax_get_private);
568
569static void init_once(void *_dax_dev)
570{
571 struct dax_device *dax_dev = _dax_dev;
572 struct inode *inode = &dax_dev->inode;
573
574 memset(dax_dev, 0, sizeof(*dax_dev));
575 inode_init_once(inode);
576}
577
578static int __dax_fs_init(void)
579{
580 int rc;
581
582 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
583 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
584 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
585 init_once);
586 if (!dax_cache)
587 return -ENOMEM;
588
589 rc = register_filesystem(&dax_fs_type);
590 if (rc)
591 goto err_register_fs;
592
593 dax_mnt = kern_mount(&dax_fs_type);
594 if (IS_ERR(dax_mnt)) {
595 rc = PTR_ERR(dax_mnt);
596 goto err_mount;
597 }
598 dax_superblock = dax_mnt->mnt_sb;
599
600 return 0;
601
602 err_mount:
603 unregister_filesystem(&dax_fs_type);
604 err_register_fs:
605 kmem_cache_destroy(dax_cache);
606
607 return rc;
608}
609
610static void __dax_fs_exit(void)
611{
612 kern_unmount(dax_mnt);
613 unregister_filesystem(&dax_fs_type);
614 kmem_cache_destroy(dax_cache);
615}
616
617static int __init dax_fs_init(void)
618{
619 int rc;
620
621 rc = __dax_fs_init();
622 if (rc)
623 return rc;
624
625 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
626 if (rc)
627 __dax_fs_exit();
628 return rc;
629}
630
631static void __exit dax_fs_exit(void)
632{
633 unregister_chrdev_region(dax_devt, MINORMASK+1);
634 ida_destroy(&dax_minor_ida);
635 __dax_fs_exit();
636}
637
638MODULE_AUTHOR("Intel Corporation");
639MODULE_LICENSE("GPL v2");
640subsys_initcall(dax_fs_init);
641module_exit(dax_fs_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 */
5#include <linux/pagemap.h>
6#include <linux/module.h>
7#include <linux/mount.h>
8#include <linux/pseudo_fs.h>
9#include <linux/magic.h>
10#include <linux/genhd.h>
11#include <linux/pfn_t.h>
12#include <linux/cdev.h>
13#include <linux/hash.h>
14#include <linux/slab.h>
15#include <linux/uio.h>
16#include <linux/dax.h>
17#include <linux/fs.h>
18#include "dax-private.h"
19
20static dev_t dax_devt;
21DEFINE_STATIC_SRCU(dax_srcu);
22static struct vfsmount *dax_mnt;
23static DEFINE_IDA(dax_minor_ida);
24static struct kmem_cache *dax_cache __read_mostly;
25static struct super_block *dax_superblock __read_mostly;
26
27#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
28static struct hlist_head dax_host_list[DAX_HASH_SIZE];
29static DEFINE_SPINLOCK(dax_host_lock);
30
31int dax_read_lock(void)
32{
33 return srcu_read_lock(&dax_srcu);
34}
35EXPORT_SYMBOL_GPL(dax_read_lock);
36
37void dax_read_unlock(int id)
38{
39 srcu_read_unlock(&dax_srcu, id);
40}
41EXPORT_SYMBOL_GPL(dax_read_unlock);
42
43#ifdef CONFIG_BLOCK
44#include <linux/blkdev.h>
45
46int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
47 pgoff_t *pgoff)
48{
49 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
50
51 if (pgoff)
52 *pgoff = PHYS_PFN(phys_off);
53 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
54 return -EINVAL;
55 return 0;
56}
57EXPORT_SYMBOL(bdev_dax_pgoff);
58
59#if IS_ENABLED(CONFIG_FS_DAX)
60struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
61{
62 if (!blk_queue_dax(bdev->bd_queue))
63 return NULL;
64 return fs_dax_get_by_host(bdev->bd_disk->disk_name);
65}
66EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
67#endif
68
69bool __generic_fsdax_supported(struct dax_device *dax_dev,
70 struct block_device *bdev, int blocksize, sector_t start,
71 sector_t sectors)
72{
73 bool dax_enabled = false;
74 pgoff_t pgoff, pgoff_end;
75 char buf[BDEVNAME_SIZE];
76 void *kaddr, *end_kaddr;
77 pfn_t pfn, end_pfn;
78 sector_t last_page;
79 long len, len2;
80 int err, id;
81
82 if (blocksize != PAGE_SIZE) {
83 pr_debug("%s: error: unsupported blocksize for dax\n",
84 bdevname(bdev, buf));
85 return false;
86 }
87
88 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
89 if (err) {
90 pr_debug("%s: error: unaligned partition for dax\n",
91 bdevname(bdev, buf));
92 return false;
93 }
94
95 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
96 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
97 if (err) {
98 pr_debug("%s: error: unaligned partition for dax\n",
99 bdevname(bdev, buf));
100 return false;
101 }
102
103 id = dax_read_lock();
104 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
105 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
106 dax_read_unlock(id);
107
108 if (len < 1 || len2 < 1) {
109 pr_debug("%s: error: dax access failed (%ld)\n",
110 bdevname(bdev, buf), len < 1 ? len : len2);
111 return false;
112 }
113
114 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
115 /*
116 * An arch that has enabled the pmem api should also
117 * have its drivers support pfn_t_devmap()
118 *
119 * This is a developer warning and should not trigger in
120 * production. dax_flush() will crash since it depends
121 * on being able to do (page_address(pfn_to_page())).
122 */
123 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
124 dax_enabled = true;
125 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
126 struct dev_pagemap *pgmap, *end_pgmap;
127
128 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
129 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
130 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
131 && pfn_t_to_page(pfn)->pgmap == pgmap
132 && pfn_t_to_page(end_pfn)->pgmap == pgmap
133 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
134 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
135 dax_enabled = true;
136 put_dev_pagemap(pgmap);
137 put_dev_pagemap(end_pgmap);
138
139 }
140
141 if (!dax_enabled) {
142 pr_debug("%s: error: dax support not enabled\n",
143 bdevname(bdev, buf));
144 return false;
145 }
146 return true;
147}
148EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
149
150/**
151 * __bdev_dax_supported() - Check if the device supports dax for filesystem
152 * @bdev: block device to check
153 * @blocksize: The block size of the device
154 *
155 * This is a library function for filesystems to check if the block device
156 * can be mounted with dax option.
157 *
158 * Return: true if supported, false if unsupported
159 */
160bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
161{
162 struct dax_device *dax_dev;
163 struct request_queue *q;
164 char buf[BDEVNAME_SIZE];
165 bool ret;
166 int id;
167
168 q = bdev_get_queue(bdev);
169 if (!q || !blk_queue_dax(q)) {
170 pr_debug("%s: error: request queue doesn't support dax\n",
171 bdevname(bdev, buf));
172 return false;
173 }
174
175 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
176 if (!dax_dev) {
177 pr_debug("%s: error: device does not support dax\n",
178 bdevname(bdev, buf));
179 return false;
180 }
181
182 id = dax_read_lock();
183 ret = dax_supported(dax_dev, bdev, blocksize, 0,
184 i_size_read(bdev->bd_inode) / 512);
185 dax_read_unlock(id);
186
187 put_dax(dax_dev);
188
189 return ret;
190}
191EXPORT_SYMBOL_GPL(__bdev_dax_supported);
192#endif
193
194enum dax_device_flags {
195 /* !alive + rcu grace period == no new operations / mappings */
196 DAXDEV_ALIVE,
197 /* gate whether dax_flush() calls the low level flush routine */
198 DAXDEV_WRITE_CACHE,
199 /* flag to check if device supports synchronous flush */
200 DAXDEV_SYNC,
201};
202
203/**
204 * struct dax_device - anchor object for dax services
205 * @inode: core vfs
206 * @cdev: optional character interface for "device dax"
207 * @host: optional name for lookups where the device path is not available
208 * @private: dax driver private data
209 * @flags: state and boolean properties
210 */
211struct dax_device {
212 struct hlist_node list;
213 struct inode inode;
214 struct cdev cdev;
215 const char *host;
216 void *private;
217 unsigned long flags;
218 const struct dax_operations *ops;
219};
220
221static ssize_t write_cache_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
225 ssize_t rc;
226
227 WARN_ON_ONCE(!dax_dev);
228 if (!dax_dev)
229 return -ENXIO;
230
231 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
232 put_dax(dax_dev);
233 return rc;
234}
235
236static ssize_t write_cache_store(struct device *dev,
237 struct device_attribute *attr, const char *buf, size_t len)
238{
239 bool write_cache;
240 int rc = strtobool(buf, &write_cache);
241 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
242
243 WARN_ON_ONCE(!dax_dev);
244 if (!dax_dev)
245 return -ENXIO;
246
247 if (rc)
248 len = rc;
249 else
250 dax_write_cache(dax_dev, write_cache);
251
252 put_dax(dax_dev);
253 return len;
254}
255static DEVICE_ATTR_RW(write_cache);
256
257static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
258{
259 struct device *dev = container_of(kobj, typeof(*dev), kobj);
260 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
261
262 WARN_ON_ONCE(!dax_dev);
263 if (!dax_dev)
264 return 0;
265
266#ifndef CONFIG_ARCH_HAS_PMEM_API
267 if (a == &dev_attr_write_cache.attr)
268 return 0;
269#endif
270 return a->mode;
271}
272
273static struct attribute *dax_attributes[] = {
274 &dev_attr_write_cache.attr,
275 NULL,
276};
277
278struct attribute_group dax_attribute_group = {
279 .name = "dax",
280 .attrs = dax_attributes,
281 .is_visible = dax_visible,
282};
283EXPORT_SYMBOL_GPL(dax_attribute_group);
284
285/**
286 * dax_direct_access() - translate a device pgoff to an absolute pfn
287 * @dax_dev: a dax_device instance representing the logical memory range
288 * @pgoff: offset in pages from the start of the device to translate
289 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
290 * @kaddr: output parameter that returns a virtual address mapping of pfn
291 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
292 *
293 * Return: negative errno if an error occurs, otherwise the number of
294 * pages accessible at the device relative @pgoff.
295 */
296long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
297 void **kaddr, pfn_t *pfn)
298{
299 long avail;
300
301 if (!dax_dev)
302 return -EOPNOTSUPP;
303
304 if (!dax_alive(dax_dev))
305 return -ENXIO;
306
307 if (nr_pages < 0)
308 return nr_pages;
309
310 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
311 kaddr, pfn);
312 if (!avail)
313 return -ERANGE;
314 return min(avail, nr_pages);
315}
316EXPORT_SYMBOL_GPL(dax_direct_access);
317
318bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
319 int blocksize, sector_t start, sector_t len)
320{
321 if (!dax_alive(dax_dev))
322 return false;
323
324 return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
325}
326
327size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
328 size_t bytes, struct iov_iter *i)
329{
330 if (!dax_alive(dax_dev))
331 return 0;
332
333 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
334}
335EXPORT_SYMBOL_GPL(dax_copy_from_iter);
336
337size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
338 size_t bytes, struct iov_iter *i)
339{
340 if (!dax_alive(dax_dev))
341 return 0;
342
343 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
344}
345EXPORT_SYMBOL_GPL(dax_copy_to_iter);
346
347#ifdef CONFIG_ARCH_HAS_PMEM_API
348void arch_wb_cache_pmem(void *addr, size_t size);
349void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
350{
351 if (unlikely(!dax_write_cache_enabled(dax_dev)))
352 return;
353
354 arch_wb_cache_pmem(addr, size);
355}
356#else
357void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
358{
359}
360#endif
361EXPORT_SYMBOL_GPL(dax_flush);
362
363void dax_write_cache(struct dax_device *dax_dev, bool wc)
364{
365 if (wc)
366 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
367 else
368 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
369}
370EXPORT_SYMBOL_GPL(dax_write_cache);
371
372bool dax_write_cache_enabled(struct dax_device *dax_dev)
373{
374 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
375}
376EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
377
378bool __dax_synchronous(struct dax_device *dax_dev)
379{
380 return test_bit(DAXDEV_SYNC, &dax_dev->flags);
381}
382EXPORT_SYMBOL_GPL(__dax_synchronous);
383
384void __set_dax_synchronous(struct dax_device *dax_dev)
385{
386 set_bit(DAXDEV_SYNC, &dax_dev->flags);
387}
388EXPORT_SYMBOL_GPL(__set_dax_synchronous);
389
390bool dax_alive(struct dax_device *dax_dev)
391{
392 lockdep_assert_held(&dax_srcu);
393 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
394}
395EXPORT_SYMBOL_GPL(dax_alive);
396
397static int dax_host_hash(const char *host)
398{
399 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
400}
401
402/*
403 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
404 * that any fault handlers or operations that might have seen
405 * dax_alive(), have completed. Any operations that start after
406 * synchronize_srcu() has run will abort upon seeing !dax_alive().
407 */
408void kill_dax(struct dax_device *dax_dev)
409{
410 if (!dax_dev)
411 return;
412
413 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
414
415 synchronize_srcu(&dax_srcu);
416
417 spin_lock(&dax_host_lock);
418 hlist_del_init(&dax_dev->list);
419 spin_unlock(&dax_host_lock);
420}
421EXPORT_SYMBOL_GPL(kill_dax);
422
423void run_dax(struct dax_device *dax_dev)
424{
425 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
426}
427EXPORT_SYMBOL_GPL(run_dax);
428
429static struct inode *dax_alloc_inode(struct super_block *sb)
430{
431 struct dax_device *dax_dev;
432 struct inode *inode;
433
434 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
435 if (!dax_dev)
436 return NULL;
437
438 inode = &dax_dev->inode;
439 inode->i_rdev = 0;
440 return inode;
441}
442
443static struct dax_device *to_dax_dev(struct inode *inode)
444{
445 return container_of(inode, struct dax_device, inode);
446}
447
448static void dax_free_inode(struct inode *inode)
449{
450 struct dax_device *dax_dev = to_dax_dev(inode);
451 kfree(dax_dev->host);
452 dax_dev->host = NULL;
453 if (inode->i_rdev)
454 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
455 kmem_cache_free(dax_cache, dax_dev);
456}
457
458static void dax_destroy_inode(struct inode *inode)
459{
460 struct dax_device *dax_dev = to_dax_dev(inode);
461 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
462 "kill_dax() must be called before final iput()\n");
463}
464
465static const struct super_operations dax_sops = {
466 .statfs = simple_statfs,
467 .alloc_inode = dax_alloc_inode,
468 .destroy_inode = dax_destroy_inode,
469 .free_inode = dax_free_inode,
470 .drop_inode = generic_delete_inode,
471};
472
473static int dax_init_fs_context(struct fs_context *fc)
474{
475 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
476 if (!ctx)
477 return -ENOMEM;
478 ctx->ops = &dax_sops;
479 return 0;
480}
481
482static struct file_system_type dax_fs_type = {
483 .name = "dax",
484 .init_fs_context = dax_init_fs_context,
485 .kill_sb = kill_anon_super,
486};
487
488static int dax_test(struct inode *inode, void *data)
489{
490 dev_t devt = *(dev_t *) data;
491
492 return inode->i_rdev == devt;
493}
494
495static int dax_set(struct inode *inode, void *data)
496{
497 dev_t devt = *(dev_t *) data;
498
499 inode->i_rdev = devt;
500 return 0;
501}
502
503static struct dax_device *dax_dev_get(dev_t devt)
504{
505 struct dax_device *dax_dev;
506 struct inode *inode;
507
508 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
509 dax_test, dax_set, &devt);
510
511 if (!inode)
512 return NULL;
513
514 dax_dev = to_dax_dev(inode);
515 if (inode->i_state & I_NEW) {
516 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
517 inode->i_cdev = &dax_dev->cdev;
518 inode->i_mode = S_IFCHR;
519 inode->i_flags = S_DAX;
520 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
521 unlock_new_inode(inode);
522 }
523
524 return dax_dev;
525}
526
527static void dax_add_host(struct dax_device *dax_dev, const char *host)
528{
529 int hash;
530
531 /*
532 * Unconditionally init dax_dev since it's coming from a
533 * non-zeroed slab cache
534 */
535 INIT_HLIST_NODE(&dax_dev->list);
536 dax_dev->host = host;
537 if (!host)
538 return;
539
540 hash = dax_host_hash(host);
541 spin_lock(&dax_host_lock);
542 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
543 spin_unlock(&dax_host_lock);
544}
545
546struct dax_device *alloc_dax(void *private, const char *__host,
547 const struct dax_operations *ops, unsigned long flags)
548{
549 struct dax_device *dax_dev;
550 const char *host;
551 dev_t devt;
552 int minor;
553
554 host = kstrdup(__host, GFP_KERNEL);
555 if (__host && !host)
556 return NULL;
557
558 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
559 if (minor < 0)
560 goto err_minor;
561
562 devt = MKDEV(MAJOR(dax_devt), minor);
563 dax_dev = dax_dev_get(devt);
564 if (!dax_dev)
565 goto err_dev;
566
567 dax_add_host(dax_dev, host);
568 dax_dev->ops = ops;
569 dax_dev->private = private;
570 if (flags & DAXDEV_F_SYNC)
571 set_dax_synchronous(dax_dev);
572
573 return dax_dev;
574
575 err_dev:
576 ida_simple_remove(&dax_minor_ida, minor);
577 err_minor:
578 kfree(host);
579 return NULL;
580}
581EXPORT_SYMBOL_GPL(alloc_dax);
582
583void put_dax(struct dax_device *dax_dev)
584{
585 if (!dax_dev)
586 return;
587 iput(&dax_dev->inode);
588}
589EXPORT_SYMBOL_GPL(put_dax);
590
591/**
592 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
593 * @host: alternate name for the device registered by a dax driver
594 */
595struct dax_device *dax_get_by_host(const char *host)
596{
597 struct dax_device *dax_dev, *found = NULL;
598 int hash, id;
599
600 if (!host)
601 return NULL;
602
603 hash = dax_host_hash(host);
604
605 id = dax_read_lock();
606 spin_lock(&dax_host_lock);
607 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
608 if (!dax_alive(dax_dev)
609 || strcmp(host, dax_dev->host) != 0)
610 continue;
611
612 if (igrab(&dax_dev->inode))
613 found = dax_dev;
614 break;
615 }
616 spin_unlock(&dax_host_lock);
617 dax_read_unlock(id);
618
619 return found;
620}
621EXPORT_SYMBOL_GPL(dax_get_by_host);
622
623/**
624 * inode_dax: convert a public inode into its dax_dev
625 * @inode: An inode with i_cdev pointing to a dax_dev
626 *
627 * Note this is not equivalent to to_dax_dev() which is for private
628 * internal use where we know the inode filesystem type == dax_fs_type.
629 */
630struct dax_device *inode_dax(struct inode *inode)
631{
632 struct cdev *cdev = inode->i_cdev;
633
634 return container_of(cdev, struct dax_device, cdev);
635}
636EXPORT_SYMBOL_GPL(inode_dax);
637
638struct inode *dax_inode(struct dax_device *dax_dev)
639{
640 return &dax_dev->inode;
641}
642EXPORT_SYMBOL_GPL(dax_inode);
643
644void *dax_get_private(struct dax_device *dax_dev)
645{
646 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
647 return NULL;
648 return dax_dev->private;
649}
650EXPORT_SYMBOL_GPL(dax_get_private);
651
652static void init_once(void *_dax_dev)
653{
654 struct dax_device *dax_dev = _dax_dev;
655 struct inode *inode = &dax_dev->inode;
656
657 memset(dax_dev, 0, sizeof(*dax_dev));
658 inode_init_once(inode);
659}
660
661static int dax_fs_init(void)
662{
663 int rc;
664
665 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
666 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
667 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
668 init_once);
669 if (!dax_cache)
670 return -ENOMEM;
671
672 dax_mnt = kern_mount(&dax_fs_type);
673 if (IS_ERR(dax_mnt)) {
674 rc = PTR_ERR(dax_mnt);
675 goto err_mount;
676 }
677 dax_superblock = dax_mnt->mnt_sb;
678
679 return 0;
680
681 err_mount:
682 kmem_cache_destroy(dax_cache);
683
684 return rc;
685}
686
687static void dax_fs_exit(void)
688{
689 kern_unmount(dax_mnt);
690 kmem_cache_destroy(dax_cache);
691}
692
693static int __init dax_core_init(void)
694{
695 int rc;
696
697 rc = dax_fs_init();
698 if (rc)
699 return rc;
700
701 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
702 if (rc)
703 goto err_chrdev;
704
705 rc = dax_bus_init();
706 if (rc)
707 goto err_bus;
708 return 0;
709
710err_bus:
711 unregister_chrdev_region(dax_devt, MINORMASK+1);
712err_chrdev:
713 dax_fs_exit();
714 return 0;
715}
716
717static void __exit dax_core_exit(void)
718{
719 unregister_chrdev_region(dax_devt, MINORMASK+1);
720 ida_destroy(&dax_minor_ida);
721 dax_fs_exit();
722}
723
724MODULE_AUTHOR("Intel Corporation");
725MODULE_LICENSE("GPL v2");
726subsys_initcall(dax_core_init);
727module_exit(dax_core_exit);