Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Persistent Memory Driver
4 *
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 */
9
10#include <linux/blkdev.h>
11#include <linux/pagemap.h>
12#include <linux/hdreg.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/set_memory.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/badblocks.h>
19#include <linux/memremap.h>
20#include <linux/kstrtox.h>
21#include <linux/vmalloc.h>
22#include <linux/blk-mq.h>
23#include <linux/pfn_t.h>
24#include <linux/slab.h>
25#include <linux/uio.h>
26#include <linux/dax.h>
27#include <linux/nd.h>
28#include <linux/mm.h>
29#include <asm/cacheflush.h>
30#include "pmem.h"
31#include "btt.h"
32#include "pfn.h"
33#include "nd.h"
34
35static struct device *to_dev(struct pmem_device *pmem)
36{
37 /*
38 * nvdimm bus services need a 'dev' parameter, and we record the device
39 * at init in bb.dev.
40 */
41 return pmem->bb.dev;
42}
43
44static struct nd_region *to_region(struct pmem_device *pmem)
45{
46 return to_nd_region(to_dev(pmem)->parent);
47}
48
49static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
50{
51 return pmem->phys_addr + offset;
52}
53
54static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset)
55{
56 return (offset - pmem->data_offset) >> SECTOR_SHIFT;
57}
58
59static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
60{
61 return (sector << SECTOR_SHIFT) + pmem->data_offset;
62}
63
64static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
65 unsigned int len)
66{
67 phys_addr_t phys = pmem_to_phys(pmem, offset);
68 unsigned long pfn_start, pfn_end, pfn;
69
70 /* only pmem in the linear map supports HWPoison */
71 if (is_vmalloc_addr(pmem->virt_addr))
72 return;
73
74 pfn_start = PHYS_PFN(phys);
75 pfn_end = pfn_start + PHYS_PFN(len);
76 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
77 struct page *page = pfn_to_page(pfn);
78
79 /*
80 * Note, no need to hold a get_dev_pagemap() reference
81 * here since we're in the driver I/O path and
82 * outstanding I/O requests pin the dev_pagemap.
83 */
84 if (test_and_clear_pmem_poison(page))
85 clear_mce_nospec(pfn);
86 }
87}
88
89static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
90{
91 if (blks == 0)
92 return;
93 badblocks_clear(&pmem->bb, sector, blks);
94 if (pmem->bb_state)
95 sysfs_notify_dirent(pmem->bb_state);
96}
97
98static long __pmem_clear_poison(struct pmem_device *pmem,
99 phys_addr_t offset, unsigned int len)
100{
101 phys_addr_t phys = pmem_to_phys(pmem, offset);
102 long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
103
104 if (cleared > 0) {
105 pmem_mkpage_present(pmem, offset, cleared);
106 arch_invalidate_pmem(pmem->virt_addr + offset, len);
107 }
108 return cleared;
109}
110
111static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
112 phys_addr_t offset, unsigned int len)
113{
114 long cleared = __pmem_clear_poison(pmem, offset, len);
115
116 if (cleared < 0)
117 return BLK_STS_IOERR;
118
119 pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT);
120 if (cleared < len)
121 return BLK_STS_IOERR;
122 return BLK_STS_OK;
123}
124
125static void write_pmem(void *pmem_addr, struct page *page,
126 unsigned int off, unsigned int len)
127{
128 unsigned int chunk;
129 void *mem;
130
131 while (len) {
132 mem = kmap_atomic(page);
133 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
134 memcpy_flushcache(pmem_addr, mem + off, chunk);
135 kunmap_atomic(mem);
136 len -= chunk;
137 off = 0;
138 page++;
139 pmem_addr += chunk;
140 }
141}
142
143static blk_status_t read_pmem(struct page *page, unsigned int off,
144 void *pmem_addr, unsigned int len)
145{
146 unsigned int chunk;
147 unsigned long rem;
148 void *mem;
149
150 while (len) {
151 mem = kmap_atomic(page);
152 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
153 rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
154 kunmap_atomic(mem);
155 if (rem)
156 return BLK_STS_IOERR;
157 len -= chunk;
158 off = 0;
159 page++;
160 pmem_addr += chunk;
161 }
162 return BLK_STS_OK;
163}
164
165static blk_status_t pmem_do_read(struct pmem_device *pmem,
166 struct page *page, unsigned int page_off,
167 sector_t sector, unsigned int len)
168{
169 blk_status_t rc;
170 phys_addr_t pmem_off = to_offset(pmem, sector);
171 void *pmem_addr = pmem->virt_addr + pmem_off;
172
173 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
174 return BLK_STS_IOERR;
175
176 rc = read_pmem(page, page_off, pmem_addr, len);
177 flush_dcache_page(page);
178 return rc;
179}
180
181static blk_status_t pmem_do_write(struct pmem_device *pmem,
182 struct page *page, unsigned int page_off,
183 sector_t sector, unsigned int len)
184{
185 phys_addr_t pmem_off = to_offset(pmem, sector);
186 void *pmem_addr = pmem->virt_addr + pmem_off;
187
188 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) {
189 blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len);
190
191 if (rc != BLK_STS_OK)
192 return rc;
193 }
194
195 flush_dcache_page(page);
196 write_pmem(pmem_addr, page, page_off, len);
197
198 return BLK_STS_OK;
199}
200
201static void pmem_submit_bio(struct bio *bio)
202{
203 int ret = 0;
204 blk_status_t rc = 0;
205 bool do_acct;
206 unsigned long start;
207 struct bio_vec bvec;
208 struct bvec_iter iter;
209 struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
210 struct nd_region *nd_region = to_region(pmem);
211
212 if (bio->bi_opf & REQ_PREFLUSH)
213 ret = nvdimm_flush(nd_region, bio);
214
215 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
216 if (do_acct)
217 start = bio_start_io_acct(bio);
218 bio_for_each_segment(bvec, bio, iter) {
219 if (op_is_write(bio_op(bio)))
220 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
221 iter.bi_sector, bvec.bv_len);
222 else
223 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
224 iter.bi_sector, bvec.bv_len);
225 if (rc) {
226 bio->bi_status = rc;
227 break;
228 }
229 }
230 if (do_acct)
231 bio_end_io_acct(bio, start);
232
233 if (bio->bi_opf & REQ_FUA)
234 ret = nvdimm_flush(nd_region, bio);
235
236 if (ret)
237 bio->bi_status = errno_to_blk_status(ret);
238
239 bio_endio(bio);
240}
241
242/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
243__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
244 long nr_pages, enum dax_access_mode mode, void **kaddr,
245 pfn_t *pfn)
246{
247 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
248 sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
249 unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
250 struct badblocks *bb = &pmem->bb;
251 sector_t first_bad;
252 int num_bad;
253
254 if (kaddr)
255 *kaddr = pmem->virt_addr + offset;
256 if (pfn)
257 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
258
259 if (bb->count &&
260 badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
261 long actual_nr;
262
263 if (mode != DAX_RECOVERY_WRITE)
264 return -EHWPOISON;
265
266 /*
267 * Set the recovery stride is set to kernel page size because
268 * the underlying driver and firmware clear poison functions
269 * don't appear to handle large chunk(such as 2MiB) reliably.
270 */
271 actual_nr = PHYS_PFN(
272 PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT));
273 dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
274 sector, nr_pages, first_bad, actual_nr);
275 if (actual_nr)
276 return actual_nr;
277 return 1;
278 }
279
280 /*
281 * If badblocks are present but not in the range, limit known good range
282 * to the requested range.
283 */
284 if (bb->count)
285 return nr_pages;
286 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
287}
288
289static const struct block_device_operations pmem_fops = {
290 .owner = THIS_MODULE,
291 .submit_bio = pmem_submit_bio,
292};
293
294static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
295 size_t nr_pages)
296{
297 struct pmem_device *pmem = dax_get_private(dax_dev);
298
299 return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
300 PFN_PHYS(pgoff) >> SECTOR_SHIFT,
301 PAGE_SIZE));
302}
303
304static long pmem_dax_direct_access(struct dax_device *dax_dev,
305 pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
306 void **kaddr, pfn_t *pfn)
307{
308 struct pmem_device *pmem = dax_get_private(dax_dev);
309
310 return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
311}
312
313/*
314 * The recovery write thread started out as a normal pwrite thread and
315 * when the filesystem was told about potential media error in the
316 * range, filesystem turns the normal pwrite to a dax_recovery_write.
317 *
318 * The recovery write consists of clearing media poison, clearing page
319 * HWPoison bit, re-enable page-wide read-write permission, flush the
320 * caches and finally write. A competing pread thread will be held
321 * off during the recovery process since data read back might not be
322 * valid, and this is achieved by clearing the badblock records after
323 * the recovery write is complete. Competing recovery write threads
324 * are already serialized by writer lock held by dax_iomap_rw().
325 */
326static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
327 void *addr, size_t bytes, struct iov_iter *i)
328{
329 struct pmem_device *pmem = dax_get_private(dax_dev);
330 size_t olen, len, off;
331 phys_addr_t pmem_off;
332 struct device *dev = pmem->bb.dev;
333 long cleared;
334
335 off = offset_in_page(addr);
336 len = PFN_PHYS(PFN_UP(off + bytes));
337 if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len))
338 return _copy_from_iter_flushcache(addr, bytes, i);
339
340 /*
341 * Not page-aligned range cannot be recovered. This should not
342 * happen unless something else went wrong.
343 */
344 if (off || !PAGE_ALIGNED(bytes)) {
345 dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
346 addr, bytes);
347 return 0;
348 }
349
350 pmem_off = PFN_PHYS(pgoff) + pmem->data_offset;
351 cleared = __pmem_clear_poison(pmem, pmem_off, len);
352 if (cleared > 0 && cleared < len) {
353 dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n",
354 cleared, len);
355 return 0;
356 }
357 if (cleared < 0) {
358 dev_dbg(dev, "poison clear failed: %ld\n", cleared);
359 return 0;
360 }
361
362 olen = _copy_from_iter_flushcache(addr, bytes, i);
363 pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT);
364
365 return olen;
366}
367
368static const struct dax_operations pmem_dax_ops = {
369 .direct_access = pmem_dax_direct_access,
370 .zero_page_range = pmem_dax_zero_page_range,
371 .recovery_write = pmem_recovery_write,
372};
373
374static ssize_t write_cache_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
378
379 return sprintf(buf, "%d\n", !!dax_write_cache_enabled(pmem->dax_dev));
380}
381
382static ssize_t write_cache_store(struct device *dev,
383 struct device_attribute *attr, const char *buf, size_t len)
384{
385 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
386 bool write_cache;
387 int rc;
388
389 rc = kstrtobool(buf, &write_cache);
390 if (rc)
391 return rc;
392 dax_write_cache(pmem->dax_dev, write_cache);
393 return len;
394}
395static DEVICE_ATTR_RW(write_cache);
396
397static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
398{
399#ifndef CONFIG_ARCH_HAS_PMEM_API
400 if (a == &dev_attr_write_cache.attr)
401 return 0;
402#endif
403 return a->mode;
404}
405
406static struct attribute *dax_attributes[] = {
407 &dev_attr_write_cache.attr,
408 NULL,
409};
410
411static const struct attribute_group dax_attribute_group = {
412 .name = "dax",
413 .attrs = dax_attributes,
414 .is_visible = dax_visible,
415};
416
417static const struct attribute_group *pmem_attribute_groups[] = {
418 &dax_attribute_group,
419 NULL,
420};
421
422static void pmem_release_disk(void *__pmem)
423{
424 struct pmem_device *pmem = __pmem;
425
426 dax_remove_host(pmem->disk);
427 kill_dax(pmem->dax_dev);
428 put_dax(pmem->dax_dev);
429 del_gendisk(pmem->disk);
430
431 put_disk(pmem->disk);
432}
433
434static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
435 unsigned long pfn, unsigned long nr_pages, int mf_flags)
436{
437 struct pmem_device *pmem =
438 container_of(pgmap, struct pmem_device, pgmap);
439 u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
440 u64 len = nr_pages << PAGE_SHIFT;
441
442 return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
443}
444
445static const struct dev_pagemap_ops fsdax_pagemap_ops = {
446 .memory_failure = pmem_pagemap_memory_failure,
447};
448
449static int pmem_attach_disk(struct device *dev,
450 struct nd_namespace_common *ndns)
451{
452 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
453 struct nd_region *nd_region = to_nd_region(dev->parent);
454 struct queue_limits lim = {
455 .logical_block_size = pmem_sector_size(ndns),
456 .physical_block_size = PAGE_SIZE,
457 .max_hw_sectors = UINT_MAX,
458 .features = BLK_FEAT_WRITE_CACHE |
459 BLK_FEAT_SYNCHRONOUS,
460 };
461 int nid = dev_to_node(dev), fua;
462 struct resource *res = &nsio->res;
463 struct range bb_range;
464 struct nd_pfn *nd_pfn = NULL;
465 struct dax_device *dax_dev;
466 struct nd_pfn_sb *pfn_sb;
467 struct pmem_device *pmem;
468 struct gendisk *disk;
469 void *addr;
470 int rc;
471
472 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
473 if (!pmem)
474 return -ENOMEM;
475
476 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
477 if (rc)
478 return rc;
479
480 /* while nsio_rw_bytes is active, parse a pfn info block if present */
481 if (is_nd_pfn(dev)) {
482 nd_pfn = to_nd_pfn(dev);
483 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
484 if (rc)
485 return rc;
486 }
487
488 /* we're attaching a block device, disable raw namespace access */
489 devm_namespace_disable(dev, ndns);
490
491 dev_set_drvdata(dev, pmem);
492 pmem->phys_addr = res->start;
493 pmem->size = resource_size(res);
494 fua = nvdimm_has_flush(nd_region);
495 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
496 dev_warn(dev, "unable to guarantee persistence of writes\n");
497 fua = 0;
498 }
499 if (fua)
500 lim.features |= BLK_FEAT_FUA;
501 if (is_nd_pfn(dev) || pmem_should_map_pages(dev))
502 lim.features |= BLK_FEAT_DAX;
503
504 if (!devm_request_mem_region(dev, res->start, resource_size(res),
505 dev_name(&ndns->dev))) {
506 dev_warn(dev, "could not reserve region %pR\n", res);
507 return -EBUSY;
508 }
509
510 disk = blk_alloc_disk(&lim, nid);
511 if (IS_ERR(disk))
512 return PTR_ERR(disk);
513
514 pmem->disk = disk;
515 pmem->pgmap.owner = pmem;
516 pmem->pfn_flags = PFN_DEV;
517 if (is_nd_pfn(dev)) {
518 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
519 pmem->pgmap.ops = &fsdax_pagemap_ops;
520 addr = devm_memremap_pages(dev, &pmem->pgmap);
521 pfn_sb = nd_pfn->pfn_sb;
522 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
523 pmem->pfn_pad = resource_size(res) -
524 range_len(&pmem->pgmap.range);
525 pmem->pfn_flags |= PFN_MAP;
526 bb_range = pmem->pgmap.range;
527 bb_range.start += pmem->data_offset;
528 } else if (pmem_should_map_pages(dev)) {
529 pmem->pgmap.range.start = res->start;
530 pmem->pgmap.range.end = res->end;
531 pmem->pgmap.nr_range = 1;
532 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
533 pmem->pgmap.ops = &fsdax_pagemap_ops;
534 addr = devm_memremap_pages(dev, &pmem->pgmap);
535 pmem->pfn_flags |= PFN_MAP;
536 bb_range = pmem->pgmap.range;
537 } else {
538 addr = devm_memremap(dev, pmem->phys_addr,
539 pmem->size, ARCH_MEMREMAP_PMEM);
540 bb_range.start = res->start;
541 bb_range.end = res->end;
542 }
543
544 if (IS_ERR(addr)) {
545 rc = PTR_ERR(addr);
546 goto out;
547 }
548 pmem->virt_addr = addr;
549
550 disk->fops = &pmem_fops;
551 disk->private_data = pmem;
552 nvdimm_namespace_disk_name(ndns, disk->disk_name);
553 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
554 / 512);
555 if (devm_init_badblocks(dev, &pmem->bb))
556 return -ENOMEM;
557 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
558 disk->bb = &pmem->bb;
559
560 dax_dev = alloc_dax(pmem, &pmem_dax_ops);
561 if (IS_ERR(dax_dev)) {
562 rc = PTR_ERR(dax_dev);
563 if (rc != -EOPNOTSUPP)
564 goto out;
565 } else {
566 set_dax_nocache(dax_dev);
567 set_dax_nomc(dax_dev);
568 if (is_nvdimm_sync(nd_region))
569 set_dax_synchronous(dax_dev);
570 pmem->dax_dev = dax_dev;
571 rc = dax_add_host(dax_dev, disk);
572 if (rc)
573 goto out_cleanup_dax;
574 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
575 }
576 rc = device_add_disk(dev, disk, pmem_attribute_groups);
577 if (rc)
578 goto out_remove_host;
579 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
580 return -ENOMEM;
581
582 nvdimm_check_and_set_ro(disk);
583
584 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
585 "badblocks");
586 if (!pmem->bb_state)
587 dev_warn(dev, "'badblocks' notification disabled\n");
588 return 0;
589
590out_remove_host:
591 dax_remove_host(pmem->disk);
592out_cleanup_dax:
593 kill_dax(pmem->dax_dev);
594 put_dax(pmem->dax_dev);
595out:
596 put_disk(pmem->disk);
597 return rc;
598}
599
600static int nd_pmem_probe(struct device *dev)
601{
602 int ret;
603 struct nd_namespace_common *ndns;
604
605 ndns = nvdimm_namespace_common_probe(dev);
606 if (IS_ERR(ndns))
607 return PTR_ERR(ndns);
608
609 if (is_nd_btt(dev))
610 return nvdimm_namespace_attach_btt(ndns);
611
612 if (is_nd_pfn(dev))
613 return pmem_attach_disk(dev, ndns);
614
615 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
616 if (ret)
617 return ret;
618
619 ret = nd_btt_probe(dev, ndns);
620 if (ret == 0)
621 return -ENXIO;
622
623 /*
624 * We have two failure conditions here, there is no
625 * info reserver block or we found a valid info reserve block
626 * but failed to initialize the pfn superblock.
627 *
628 * For the first case consider namespace as a raw pmem namespace
629 * and attach a disk.
630 *
631 * For the latter, consider this a success and advance the namespace
632 * seed.
633 */
634 ret = nd_pfn_probe(dev, ndns);
635 if (ret == 0)
636 return -ENXIO;
637 else if (ret == -EOPNOTSUPP)
638 return ret;
639
640 ret = nd_dax_probe(dev, ndns);
641 if (ret == 0)
642 return -ENXIO;
643 else if (ret == -EOPNOTSUPP)
644 return ret;
645
646 /* probe complete, attach handles namespace enabling */
647 devm_namespace_disable(dev, ndns);
648
649 return pmem_attach_disk(dev, ndns);
650}
651
652static void nd_pmem_remove(struct device *dev)
653{
654 struct pmem_device *pmem = dev_get_drvdata(dev);
655
656 if (is_nd_btt(dev))
657 nvdimm_namespace_detach_btt(to_nd_btt(dev));
658 else {
659 /*
660 * Note, this assumes device_lock() context to not
661 * race nd_pmem_notify()
662 */
663 sysfs_put(pmem->bb_state);
664 pmem->bb_state = NULL;
665 }
666 nvdimm_flush(to_nd_region(dev->parent), NULL);
667}
668
669static void nd_pmem_shutdown(struct device *dev)
670{
671 nvdimm_flush(to_nd_region(dev->parent), NULL);
672}
673
674static void pmem_revalidate_poison(struct device *dev)
675{
676 struct nd_region *nd_region;
677 resource_size_t offset = 0, end_trunc = 0;
678 struct nd_namespace_common *ndns;
679 struct nd_namespace_io *nsio;
680 struct badblocks *bb;
681 struct range range;
682 struct kernfs_node *bb_state;
683
684 if (is_nd_btt(dev)) {
685 struct nd_btt *nd_btt = to_nd_btt(dev);
686
687 ndns = nd_btt->ndns;
688 nd_region = to_nd_region(ndns->dev.parent);
689 nsio = to_nd_namespace_io(&ndns->dev);
690 bb = &nsio->bb;
691 bb_state = NULL;
692 } else {
693 struct pmem_device *pmem = dev_get_drvdata(dev);
694
695 nd_region = to_region(pmem);
696 bb = &pmem->bb;
697 bb_state = pmem->bb_state;
698
699 if (is_nd_pfn(dev)) {
700 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
701 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
702
703 ndns = nd_pfn->ndns;
704 offset = pmem->data_offset +
705 __le32_to_cpu(pfn_sb->start_pad);
706 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
707 } else {
708 ndns = to_ndns(dev);
709 }
710
711 nsio = to_nd_namespace_io(&ndns->dev);
712 }
713
714 range.start = nsio->res.start + offset;
715 range.end = nsio->res.end - end_trunc;
716 nvdimm_badblocks_populate(nd_region, bb, &range);
717 if (bb_state)
718 sysfs_notify_dirent(bb_state);
719}
720
721static void pmem_revalidate_region(struct device *dev)
722{
723 struct pmem_device *pmem;
724
725 if (is_nd_btt(dev)) {
726 struct nd_btt *nd_btt = to_nd_btt(dev);
727 struct btt *btt = nd_btt->btt;
728
729 nvdimm_check_and_set_ro(btt->btt_disk);
730 return;
731 }
732
733 pmem = dev_get_drvdata(dev);
734 nvdimm_check_and_set_ro(pmem->disk);
735}
736
737static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
738{
739 switch (event) {
740 case NVDIMM_REVALIDATE_POISON:
741 pmem_revalidate_poison(dev);
742 break;
743 case NVDIMM_REVALIDATE_REGION:
744 pmem_revalidate_region(dev);
745 break;
746 default:
747 dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
748 break;
749 }
750}
751
752MODULE_ALIAS("pmem");
753MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
754MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
755static struct nd_device_driver nd_pmem_driver = {
756 .probe = nd_pmem_probe,
757 .remove = nd_pmem_remove,
758 .notify = nd_pmem_notify,
759 .shutdown = nd_pmem_shutdown,
760 .drv = {
761 .name = "nd_pmem",
762 },
763 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
764};
765
766module_nd_driver(nd_pmem_driver);
767
768MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
769MODULE_DESCRIPTION("NVDIMM Persistent Memory Driver");
770MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Persistent Memory Driver
4 *
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 */
9
10#include <asm/cacheflush.h>
11#include <linux/blkdev.h>
12#include <linux/hdreg.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/set_memory.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/badblocks.h>
19#include <linux/memremap.h>
20#include <linux/vmalloc.h>
21#include <linux/blk-mq.h>
22#include <linux/pfn_t.h>
23#include <linux/slab.h>
24#include <linux/uio.h>
25#include <linux/dax.h>
26#include <linux/nd.h>
27#include <linux/backing-dev.h>
28#include "pmem.h"
29#include "pfn.h"
30#include "nd.h"
31#include "nd-core.h"
32
33static struct device *to_dev(struct pmem_device *pmem)
34{
35 /*
36 * nvdimm bus services need a 'dev' parameter, and we record the device
37 * at init in bb.dev.
38 */
39 return pmem->bb.dev;
40}
41
42static struct nd_region *to_region(struct pmem_device *pmem)
43{
44 return to_nd_region(to_dev(pmem)->parent);
45}
46
47static void hwpoison_clear(struct pmem_device *pmem,
48 phys_addr_t phys, unsigned int len)
49{
50 unsigned long pfn_start, pfn_end, pfn;
51
52 /* only pmem in the linear map supports HWPoison */
53 if (is_vmalloc_addr(pmem->virt_addr))
54 return;
55
56 pfn_start = PHYS_PFN(phys);
57 pfn_end = pfn_start + PHYS_PFN(len);
58 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
59 struct page *page = pfn_to_page(pfn);
60
61 /*
62 * Note, no need to hold a get_dev_pagemap() reference
63 * here since we're in the driver I/O path and
64 * outstanding I/O requests pin the dev_pagemap.
65 */
66 if (test_and_clear_pmem_poison(page))
67 clear_mce_nospec(pfn);
68 }
69}
70
71static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
72 phys_addr_t offset, unsigned int len)
73{
74 struct device *dev = to_dev(pmem);
75 sector_t sector;
76 long cleared;
77 blk_status_t rc = BLK_STS_OK;
78
79 sector = (offset - pmem->data_offset) / 512;
80
81 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
82 if (cleared < len)
83 rc = BLK_STS_IOERR;
84 if (cleared > 0 && cleared / 512) {
85 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
86 cleared /= 512;
87 dev_dbg(dev, "%#llx clear %ld sector%s\n",
88 (unsigned long long) sector, cleared,
89 cleared > 1 ? "s" : "");
90 badblocks_clear(&pmem->bb, sector, cleared);
91 if (pmem->bb_state)
92 sysfs_notify_dirent(pmem->bb_state);
93 }
94
95 arch_invalidate_pmem(pmem->virt_addr + offset, len);
96
97 return rc;
98}
99
100static void write_pmem(void *pmem_addr, struct page *page,
101 unsigned int off, unsigned int len)
102{
103 unsigned int chunk;
104 void *mem;
105
106 while (len) {
107 mem = kmap_atomic(page);
108 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
109 memcpy_flushcache(pmem_addr, mem + off, chunk);
110 kunmap_atomic(mem);
111 len -= chunk;
112 off = 0;
113 page++;
114 pmem_addr += chunk;
115 }
116}
117
118static blk_status_t read_pmem(struct page *page, unsigned int off,
119 void *pmem_addr, unsigned int len)
120{
121 unsigned int chunk;
122 unsigned long rem;
123 void *mem;
124
125 while (len) {
126 mem = kmap_atomic(page);
127 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
128 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
129 kunmap_atomic(mem);
130 if (rem)
131 return BLK_STS_IOERR;
132 len -= chunk;
133 off = 0;
134 page++;
135 pmem_addr += chunk;
136 }
137 return BLK_STS_OK;
138}
139
140static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
141 unsigned int len, unsigned int off, unsigned int op,
142 sector_t sector)
143{
144 blk_status_t rc = BLK_STS_OK;
145 bool bad_pmem = false;
146 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
147 void *pmem_addr = pmem->virt_addr + pmem_off;
148
149 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150 bad_pmem = true;
151
152 if (!op_is_write(op)) {
153 if (unlikely(bad_pmem))
154 rc = BLK_STS_IOERR;
155 else {
156 rc = read_pmem(page, off, pmem_addr, len);
157 flush_dcache_page(page);
158 }
159 } else {
160 /*
161 * Note that we write the data both before and after
162 * clearing poison. The write before clear poison
163 * handles situations where the latest written data is
164 * preserved and the clear poison operation simply marks
165 * the address range as valid without changing the data.
166 * In this case application software can assume that an
167 * interrupted write will either return the new good
168 * data or an error.
169 *
170 * However, if pmem_clear_poison() leaves the data in an
171 * indeterminate state we need to perform the write
172 * after clear poison.
173 */
174 flush_dcache_page(page);
175 write_pmem(pmem_addr, page, off, len);
176 if (unlikely(bad_pmem)) {
177 rc = pmem_clear_poison(pmem, pmem_off, len);
178 write_pmem(pmem_addr, page, off, len);
179 }
180 }
181
182 return rc;
183}
184
185static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
186{
187 int ret = 0;
188 blk_status_t rc = 0;
189 bool do_acct;
190 unsigned long start;
191 struct bio_vec bvec;
192 struct bvec_iter iter;
193 struct pmem_device *pmem = q->queuedata;
194 struct nd_region *nd_region = to_region(pmem);
195
196 if (bio->bi_opf & REQ_PREFLUSH)
197 ret = nvdimm_flush(nd_region, bio);
198
199 do_acct = nd_iostat_start(bio, &start);
200 bio_for_each_segment(bvec, bio, iter) {
201 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
202 bvec.bv_offset, bio_op(bio), iter.bi_sector);
203 if (rc) {
204 bio->bi_status = rc;
205 break;
206 }
207 }
208 if (do_acct)
209 nd_iostat_end(bio, start);
210
211 if (bio->bi_opf & REQ_FUA)
212 ret = nvdimm_flush(nd_region, bio);
213
214 if (ret)
215 bio->bi_status = errno_to_blk_status(ret);
216
217 bio_endio(bio);
218 return BLK_QC_T_NONE;
219}
220
221static int pmem_rw_page(struct block_device *bdev, sector_t sector,
222 struct page *page, unsigned int op)
223{
224 struct pmem_device *pmem = bdev->bd_queue->queuedata;
225 blk_status_t rc;
226
227 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
228 0, op, sector);
229
230 /*
231 * The ->rw_page interface is subtle and tricky. The core
232 * retries on any error, so we can only invoke page_endio() in
233 * the successful completion case. Otherwise, we'll see crashes
234 * caused by double completion.
235 */
236 if (rc == 0)
237 page_endio(page, op_is_write(op), 0);
238
239 return blk_status_to_errno(rc);
240}
241
242/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
243__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
244 long nr_pages, void **kaddr, pfn_t *pfn)
245{
246 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
247
248 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
249 PFN_PHYS(nr_pages))))
250 return -EIO;
251
252 if (kaddr)
253 *kaddr = pmem->virt_addr + offset;
254 if (pfn)
255 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
256
257 /*
258 * If badblocks are present, limit known good range to the
259 * requested range.
260 */
261 if (unlikely(pmem->bb.count))
262 return nr_pages;
263 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
264}
265
266static const struct block_device_operations pmem_fops = {
267 .owner = THIS_MODULE,
268 .rw_page = pmem_rw_page,
269 .revalidate_disk = nvdimm_revalidate_disk,
270};
271
272static long pmem_dax_direct_access(struct dax_device *dax_dev,
273 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
274{
275 struct pmem_device *pmem = dax_get_private(dax_dev);
276
277 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
278}
279
280/*
281 * Use the 'no check' versions of copy_from_iter_flushcache() and
282 * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
283 * checking, both file offset and device offset, is handled by
284 * dax_iomap_actor()
285 */
286static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
287 void *addr, size_t bytes, struct iov_iter *i)
288{
289 return _copy_from_iter_flushcache(addr, bytes, i);
290}
291
292static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
293 void *addr, size_t bytes, struct iov_iter *i)
294{
295 return _copy_to_iter_mcsafe(addr, bytes, i);
296}
297
298static const struct dax_operations pmem_dax_ops = {
299 .direct_access = pmem_dax_direct_access,
300 .dax_supported = generic_fsdax_supported,
301 .copy_from_iter = pmem_copy_from_iter,
302 .copy_to_iter = pmem_copy_to_iter,
303};
304
305static const struct attribute_group *pmem_attribute_groups[] = {
306 &dax_attribute_group,
307 NULL,
308};
309
310static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
311{
312 struct request_queue *q =
313 container_of(pgmap->ref, struct request_queue, q_usage_counter);
314
315 blk_cleanup_queue(q);
316}
317
318static void pmem_release_queue(void *pgmap)
319{
320 pmem_pagemap_cleanup(pgmap);
321}
322
323static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
324{
325 struct request_queue *q =
326 container_of(pgmap->ref, struct request_queue, q_usage_counter);
327
328 blk_freeze_queue_start(q);
329}
330
331static void pmem_release_disk(void *__pmem)
332{
333 struct pmem_device *pmem = __pmem;
334
335 kill_dax(pmem->dax_dev);
336 put_dax(pmem->dax_dev);
337 del_gendisk(pmem->disk);
338 put_disk(pmem->disk);
339}
340
341static void pmem_pagemap_page_free(struct page *page)
342{
343 wake_up_var(&page->_refcount);
344}
345
346static const struct dev_pagemap_ops fsdax_pagemap_ops = {
347 .page_free = pmem_pagemap_page_free,
348 .kill = pmem_pagemap_kill,
349 .cleanup = pmem_pagemap_cleanup,
350};
351
352static int pmem_attach_disk(struct device *dev,
353 struct nd_namespace_common *ndns)
354{
355 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
356 struct nd_region *nd_region = to_nd_region(dev->parent);
357 int nid = dev_to_node(dev), fua;
358 struct resource *res = &nsio->res;
359 struct resource bb_res;
360 struct nd_pfn *nd_pfn = NULL;
361 struct dax_device *dax_dev;
362 struct nd_pfn_sb *pfn_sb;
363 struct pmem_device *pmem;
364 struct request_queue *q;
365 struct device *gendev;
366 struct gendisk *disk;
367 void *addr;
368 int rc;
369 unsigned long flags = 0UL;
370
371 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
372 if (!pmem)
373 return -ENOMEM;
374
375 /* while nsio_rw_bytes is active, parse a pfn info block if present */
376 if (is_nd_pfn(dev)) {
377 nd_pfn = to_nd_pfn(dev);
378 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
379 if (rc)
380 return rc;
381 }
382
383 /* we're attaching a block device, disable raw namespace access */
384 devm_nsio_disable(dev, nsio);
385
386 dev_set_drvdata(dev, pmem);
387 pmem->phys_addr = res->start;
388 pmem->size = resource_size(res);
389 fua = nvdimm_has_flush(nd_region);
390 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
391 dev_warn(dev, "unable to guarantee persistence of writes\n");
392 fua = 0;
393 }
394
395 if (!devm_request_mem_region(dev, res->start, resource_size(res),
396 dev_name(&ndns->dev))) {
397 dev_warn(dev, "could not reserve region %pR\n", res);
398 return -EBUSY;
399 }
400
401 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
402 if (!q)
403 return -ENOMEM;
404
405 pmem->pfn_flags = PFN_DEV;
406 pmem->pgmap.ref = &q->q_usage_counter;
407 if (is_nd_pfn(dev)) {
408 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
409 pmem->pgmap.ops = &fsdax_pagemap_ops;
410 addr = devm_memremap_pages(dev, &pmem->pgmap);
411 pfn_sb = nd_pfn->pfn_sb;
412 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
413 pmem->pfn_pad = resource_size(res) -
414 resource_size(&pmem->pgmap.res);
415 pmem->pfn_flags |= PFN_MAP;
416 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
417 bb_res.start += pmem->data_offset;
418 } else if (pmem_should_map_pages(dev)) {
419 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
420 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
421 pmem->pgmap.ops = &fsdax_pagemap_ops;
422 addr = devm_memremap_pages(dev, &pmem->pgmap);
423 pmem->pfn_flags |= PFN_MAP;
424 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
425 } else {
426 if (devm_add_action_or_reset(dev, pmem_release_queue,
427 &pmem->pgmap))
428 return -ENOMEM;
429 addr = devm_memremap(dev, pmem->phys_addr,
430 pmem->size, ARCH_MEMREMAP_PMEM);
431 memcpy(&bb_res, &nsio->res, sizeof(bb_res));
432 }
433
434 if (IS_ERR(addr))
435 return PTR_ERR(addr);
436 pmem->virt_addr = addr;
437
438 blk_queue_write_cache(q, true, fua);
439 blk_queue_make_request(q, pmem_make_request);
440 blk_queue_physical_block_size(q, PAGE_SIZE);
441 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
442 blk_queue_max_hw_sectors(q, UINT_MAX);
443 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
444 if (pmem->pfn_flags & PFN_MAP)
445 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
446 q->queuedata = pmem;
447
448 disk = alloc_disk_node(0, nid);
449 if (!disk)
450 return -ENOMEM;
451 pmem->disk = disk;
452
453 disk->fops = &pmem_fops;
454 disk->queue = q;
455 disk->flags = GENHD_FL_EXT_DEVT;
456 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
457 nvdimm_namespace_disk_name(ndns, disk->disk_name);
458 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
459 / 512);
460 if (devm_init_badblocks(dev, &pmem->bb))
461 return -ENOMEM;
462 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
463 disk->bb = &pmem->bb;
464
465 if (is_nvdimm_sync(nd_region))
466 flags = DAXDEV_F_SYNC;
467 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
468 if (!dax_dev) {
469 put_disk(disk);
470 return -ENOMEM;
471 }
472 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
473 pmem->dax_dev = dax_dev;
474 gendev = disk_to_dev(disk);
475 gendev->groups = pmem_attribute_groups;
476
477 device_add_disk(dev, disk, NULL);
478 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
479 return -ENOMEM;
480
481 revalidate_disk(disk);
482
483 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
484 "badblocks");
485 if (!pmem->bb_state)
486 dev_warn(dev, "'badblocks' notification disabled\n");
487
488 return 0;
489}
490
491static int nd_pmem_probe(struct device *dev)
492{
493 int ret;
494 struct nd_namespace_common *ndns;
495
496 ndns = nvdimm_namespace_common_probe(dev);
497 if (IS_ERR(ndns))
498 return PTR_ERR(ndns);
499
500 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
501 return -ENXIO;
502
503 if (is_nd_btt(dev))
504 return nvdimm_namespace_attach_btt(ndns);
505
506 if (is_nd_pfn(dev))
507 return pmem_attach_disk(dev, ndns);
508
509 ret = nd_btt_probe(dev, ndns);
510 if (ret == 0)
511 return -ENXIO;
512
513 /*
514 * We have two failure conditions here, there is no
515 * info reserver block or we found a valid info reserve block
516 * but failed to initialize the pfn superblock.
517 *
518 * For the first case consider namespace as a raw pmem namespace
519 * and attach a disk.
520 *
521 * For the latter, consider this a success and advance the namespace
522 * seed.
523 */
524 ret = nd_pfn_probe(dev, ndns);
525 if (ret == 0)
526 return -ENXIO;
527 else if (ret == -EOPNOTSUPP)
528 return ret;
529
530 ret = nd_dax_probe(dev, ndns);
531 if (ret == 0)
532 return -ENXIO;
533 else if (ret == -EOPNOTSUPP)
534 return ret;
535 return pmem_attach_disk(dev, ndns);
536}
537
538static int nd_pmem_remove(struct device *dev)
539{
540 struct pmem_device *pmem = dev_get_drvdata(dev);
541
542 if (is_nd_btt(dev))
543 nvdimm_namespace_detach_btt(to_nd_btt(dev));
544 else {
545 /*
546 * Note, this assumes nd_device_lock() context to not
547 * race nd_pmem_notify()
548 */
549 sysfs_put(pmem->bb_state);
550 pmem->bb_state = NULL;
551 }
552 nvdimm_flush(to_nd_region(dev->parent), NULL);
553
554 return 0;
555}
556
557static void nd_pmem_shutdown(struct device *dev)
558{
559 nvdimm_flush(to_nd_region(dev->parent), NULL);
560}
561
562static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
563{
564 struct nd_region *nd_region;
565 resource_size_t offset = 0, end_trunc = 0;
566 struct nd_namespace_common *ndns;
567 struct nd_namespace_io *nsio;
568 struct resource res;
569 struct badblocks *bb;
570 struct kernfs_node *bb_state;
571
572 if (event != NVDIMM_REVALIDATE_POISON)
573 return;
574
575 if (is_nd_btt(dev)) {
576 struct nd_btt *nd_btt = to_nd_btt(dev);
577
578 ndns = nd_btt->ndns;
579 nd_region = to_nd_region(ndns->dev.parent);
580 nsio = to_nd_namespace_io(&ndns->dev);
581 bb = &nsio->bb;
582 bb_state = NULL;
583 } else {
584 struct pmem_device *pmem = dev_get_drvdata(dev);
585
586 nd_region = to_region(pmem);
587 bb = &pmem->bb;
588 bb_state = pmem->bb_state;
589
590 if (is_nd_pfn(dev)) {
591 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
592 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
593
594 ndns = nd_pfn->ndns;
595 offset = pmem->data_offset +
596 __le32_to_cpu(pfn_sb->start_pad);
597 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
598 } else {
599 ndns = to_ndns(dev);
600 }
601
602 nsio = to_nd_namespace_io(&ndns->dev);
603 }
604
605 res.start = nsio->res.start + offset;
606 res.end = nsio->res.end - end_trunc;
607 nvdimm_badblocks_populate(nd_region, bb, &res);
608 if (bb_state)
609 sysfs_notify_dirent(bb_state);
610}
611
612MODULE_ALIAS("pmem");
613MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
614MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
615static struct nd_device_driver nd_pmem_driver = {
616 .probe = nd_pmem_probe,
617 .remove = nd_pmem_remove,
618 .notify = nd_pmem_notify,
619 .shutdown = nd_pmem_shutdown,
620 .drv = {
621 .name = "nd_pmem",
622 },
623 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
624};
625
626module_nd_driver(nd_pmem_driver);
627
628MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
629MODULE_LICENSE("GPL v2");