Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/memremap.h>
  6#include <linux/blkdev.h>
  7#include <linux/device.h>
  8#include <linux/genhd.h>
  9#include <linux/sizes.h>
 10#include <linux/slab.h>
 11#include <linux/fs.h>
 12#include <linux/mm.h>
 13#include "nd-core.h"
 14#include "pfn.h"
 15#include "nd.h"
 16
 
 
 17static void nd_pfn_release(struct device *dev)
 18{
 19	struct nd_region *nd_region = to_nd_region(dev->parent);
 20	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
 21
 22	dev_dbg(dev, "trace\n");
 23	nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
 24	ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
 25	kfree(nd_pfn->uuid);
 26	kfree(nd_pfn);
 27}
 28
 29struct nd_pfn *to_nd_pfn(struct device *dev)
 30{
 31	struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
 32
 33	WARN_ON(!is_nd_pfn(dev));
 34	return nd_pfn;
 35}
 36EXPORT_SYMBOL(to_nd_pfn);
 37
 38static ssize_t mode_show(struct device *dev,
 39		struct device_attribute *attr, char *buf)
 40{
 41	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 42
 43	switch (nd_pfn->mode) {
 44	case PFN_MODE_RAM:
 45		return sprintf(buf, "ram\n");
 46	case PFN_MODE_PMEM:
 47		return sprintf(buf, "pmem\n");
 48	default:
 49		return sprintf(buf, "none\n");
 50	}
 51}
 52
 53static ssize_t mode_store(struct device *dev,
 54		struct device_attribute *attr, const char *buf, size_t len)
 55{
 56	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 57	ssize_t rc = 0;
 58
 59	nd_device_lock(dev);
 60	nvdimm_bus_lock(dev);
 61	if (dev->driver)
 62		rc = -EBUSY;
 63	else {
 64		size_t n = len - 1;
 65
 66		if (strncmp(buf, "pmem\n", n) == 0
 67				|| strncmp(buf, "pmem", n) == 0) {
 68			nd_pfn->mode = PFN_MODE_PMEM;
 69		} else if (strncmp(buf, "ram\n", n) == 0
 70				|| strncmp(buf, "ram", n) == 0)
 71			nd_pfn->mode = PFN_MODE_RAM;
 72		else if (strncmp(buf, "none\n", n) == 0
 73				|| strncmp(buf, "none", n) == 0)
 74			nd_pfn->mode = PFN_MODE_NONE;
 75		else
 76			rc = -EINVAL;
 77	}
 78	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
 79			buf[len - 1] == '\n' ? "" : "\n");
 80	nvdimm_bus_unlock(dev);
 81	nd_device_unlock(dev);
 82
 83	return rc ? rc : len;
 84}
 85static DEVICE_ATTR_RW(mode);
 86
 87static ssize_t align_show(struct device *dev,
 88		struct device_attribute *attr, char *buf)
 89{
 90	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 91
 92	return sprintf(buf, "%ld\n", nd_pfn->align);
 93}
 94
 95static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
 96{
 97
 98	alignments[0] = PAGE_SIZE;
 99
100	if (has_transparent_hugepage()) {
101		alignments[1] = HPAGE_PMD_SIZE;
102		if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
103			alignments[2] = HPAGE_PUD_SIZE;
104	}
105
106	return alignments;
107}
108
109/*
110 * Use pmd mapping if supported as default alignment
111 */
112static unsigned long nd_pfn_default_alignment(void)
113{
114
115	if (has_transparent_hugepage())
116		return HPAGE_PMD_SIZE;
117	return PAGE_SIZE;
118}
119
120static ssize_t align_store(struct device *dev,
121		struct device_attribute *attr, const char *buf, size_t len)
122{
123	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
124	unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
125	ssize_t rc;
126
127	nd_device_lock(dev);
128	nvdimm_bus_lock(dev);
129	rc = nd_size_select_store(dev, buf, &nd_pfn->align,
130			nd_pfn_supported_alignments(aligns));
131	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
132			buf[len - 1] == '\n' ? "" : "\n");
133	nvdimm_bus_unlock(dev);
134	nd_device_unlock(dev);
135
136	return rc ? rc : len;
137}
138static DEVICE_ATTR_RW(align);
139
140static ssize_t uuid_show(struct device *dev,
141		struct device_attribute *attr, char *buf)
142{
143	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
144
145	if (nd_pfn->uuid)
146		return sprintf(buf, "%pUb\n", nd_pfn->uuid);
147	return sprintf(buf, "\n");
148}
149
150static ssize_t uuid_store(struct device *dev,
151		struct device_attribute *attr, const char *buf, size_t len)
152{
153	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
154	ssize_t rc;
155
156	nd_device_lock(dev);
157	rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
158	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
159			buf[len - 1] == '\n' ? "" : "\n");
160	nd_device_unlock(dev);
161
162	return rc ? rc : len;
163}
164static DEVICE_ATTR_RW(uuid);
165
166static ssize_t namespace_show(struct device *dev,
167		struct device_attribute *attr, char *buf)
168{
169	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
170	ssize_t rc;
171
172	nvdimm_bus_lock(dev);
173	rc = sprintf(buf, "%s\n", nd_pfn->ndns
174			? dev_name(&nd_pfn->ndns->dev) : "");
175	nvdimm_bus_unlock(dev);
176	return rc;
177}
178
179static ssize_t namespace_store(struct device *dev,
180		struct device_attribute *attr, const char *buf, size_t len)
181{
182	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
183	ssize_t rc;
184
185	nd_device_lock(dev);
186	nvdimm_bus_lock(dev);
187	rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
188	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
189			buf[len - 1] == '\n' ? "" : "\n");
190	nvdimm_bus_unlock(dev);
191	nd_device_unlock(dev);
192
193	return rc;
194}
195static DEVICE_ATTR_RW(namespace);
196
197static ssize_t resource_show(struct device *dev,
198		struct device_attribute *attr, char *buf)
199{
200	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
201	ssize_t rc;
202
203	nd_device_lock(dev);
204	if (dev->driver) {
205		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
206		u64 offset = __le64_to_cpu(pfn_sb->dataoff);
207		struct nd_namespace_common *ndns = nd_pfn->ndns;
208		u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
209		struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
210
211		rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
212				+ start_pad + offset);
213	} else {
214		/* no address to convey if the pfn instance is disabled */
215		rc = -ENXIO;
216	}
217	nd_device_unlock(dev);
218
219	return rc;
220}
221static DEVICE_ATTR_ADMIN_RO(resource);
222
223static ssize_t size_show(struct device *dev,
224		struct device_attribute *attr, char *buf)
225{
226	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
227	ssize_t rc;
228
229	nd_device_lock(dev);
230	if (dev->driver) {
231		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
232		u64 offset = __le64_to_cpu(pfn_sb->dataoff);
233		struct nd_namespace_common *ndns = nd_pfn->ndns;
234		u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
235		u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
236		struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
237
238		rc = sprintf(buf, "%llu\n", (unsigned long long)
239				resource_size(&nsio->res) - start_pad
240				- end_trunc - offset);
241	} else {
242		/* no size to convey if the pfn instance is disabled */
243		rc = -ENXIO;
244	}
245	nd_device_unlock(dev);
246
247	return rc;
248}
249static DEVICE_ATTR_RO(size);
250
251static ssize_t supported_alignments_show(struct device *dev,
252		struct device_attribute *attr, char *buf)
253{
254	unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
255
256	return nd_size_select_show(0,
257			nd_pfn_supported_alignments(aligns), buf);
258}
259static DEVICE_ATTR_RO(supported_alignments);
260
261static struct attribute *nd_pfn_attributes[] = {
262	&dev_attr_mode.attr,
263	&dev_attr_namespace.attr,
264	&dev_attr_uuid.attr,
265	&dev_attr_align.attr,
266	&dev_attr_resource.attr,
267	&dev_attr_size.attr,
268	&dev_attr_supported_alignments.attr,
269	NULL,
270};
271
272static struct attribute_group nd_pfn_attribute_group = {
273	.attrs = nd_pfn_attributes,
274};
275
276const struct attribute_group *nd_pfn_attribute_groups[] = {
277	&nd_pfn_attribute_group,
278	&nd_device_attribute_group,
279	&nd_numa_attribute_group,
280	NULL,
281};
282
283static const struct device_type nd_pfn_device_type = {
284	.name = "nd_pfn",
285	.release = nd_pfn_release,
286	.groups = nd_pfn_attribute_groups,
287};
288
289bool is_nd_pfn(struct device *dev)
290{
291	return dev ? dev->type == &nd_pfn_device_type : false;
292}
293EXPORT_SYMBOL(is_nd_pfn);
294
 
 
295struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
296		struct nd_namespace_common *ndns)
297{
298	struct device *dev;
299
300	if (!nd_pfn)
301		return NULL;
302
303	nd_pfn->mode = PFN_MODE_NONE;
304	nd_pfn->align = nd_pfn_default_alignment();
305	dev = &nd_pfn->dev;
306	device_initialize(&nd_pfn->dev);
 
307	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
308		dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
309				dev_name(ndns->claim));
310		put_device(dev);
311		return NULL;
312	}
313	return dev;
314}
315
316static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
317{
318	struct nd_pfn *nd_pfn;
319	struct device *dev;
320
321	nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
322	if (!nd_pfn)
323		return NULL;
324
325	nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
326	if (nd_pfn->id < 0) {
327		kfree(nd_pfn);
328		return NULL;
329	}
330
331	dev = &nd_pfn->dev;
332	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
333	dev->type = &nd_pfn_device_type;
334	dev->parent = &nd_region->dev;
335
336	return nd_pfn;
337}
338
339struct device *nd_pfn_create(struct nd_region *nd_region)
340{
341	struct nd_pfn *nd_pfn;
342	struct device *dev;
343
344	if (!is_memory(&nd_region->dev))
345		return NULL;
346
347	nd_pfn = nd_pfn_alloc(nd_region);
348	dev = nd_pfn_devinit(nd_pfn, NULL);
349
350	__nd_device_register(dev);
351	return dev;
352}
353
354/*
355 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
356 * space associated with the namespace. If the memmap is set to DRAM, then
357 * this is a no-op. Since the memmap area is freshly initialized during
358 * probe, we have an opportunity to clear any badblocks in this area.
359 */
360static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
361{
362	struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
363	struct nd_namespace_common *ndns = nd_pfn->ndns;
364	void *zero_page = page_address(ZERO_PAGE(0));
365	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
366	int num_bad, meta_num, rc, bb_present;
367	sector_t first_bad, meta_start;
368	struct nd_namespace_io *nsio;
369
370	if (nd_pfn->mode != PFN_MODE_PMEM)
371		return 0;
372
373	nsio = to_nd_namespace_io(&ndns->dev);
374	meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
375	meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
376
377	/*
378	 * re-enable the namespace with correct size so that we can access
379	 * the device memmap area.
380	 */
381	devm_namespace_disable(&nd_pfn->dev, ndns);
382	rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
383	if (rc)
384		return rc;
385
386	do {
387		unsigned long zero_len;
388		u64 nsoff;
389
390		bb_present = badblocks_check(&nd_region->bb, meta_start,
391				meta_num, &first_bad, &num_bad);
392		if (bb_present) {
393			dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
394					num_bad, first_bad);
395			nsoff = ALIGN_DOWN((nd_region->ndr_start
396					+ (first_bad << 9)) - nsio->res.start,
397					PAGE_SIZE);
398			zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
399			while (zero_len) {
400				unsigned long chunk = min(zero_len, PAGE_SIZE);
401
402				rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
403							chunk, 0);
404				if (rc)
405					break;
406
407				zero_len -= chunk;
408				nsoff += chunk;
409			}
410			if (rc) {
411				dev_err(&nd_pfn->dev,
412					"error clearing %x badblocks at %llx\n",
413					num_bad, first_bad);
414				return rc;
415			}
416		}
417	} while (bb_present);
418
419	return 0;
420}
421
422static bool nd_supported_alignment(unsigned long align)
423{
424	int i;
425	unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
426
427	if (align == 0)
428		return false;
429
430	nd_pfn_supported_alignments(supported);
431	for (i = 0; supported[i]; i++)
432		if (align == supported[i])
433			return true;
434	return false;
435}
436
437/**
438 * nd_pfn_validate - read and validate info-block
439 * @nd_pfn: fsdax namespace runtime state / properties
440 * @sig: 'devdax' or 'fsdax' signature
441 *
442 * Upon return the info-block buffer contents (->pfn_sb) are
443 * indeterminate when validation fails, and a coherent info-block
444 * otherwise.
445 */
446int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
447{
448	u64 checksum, offset;
449	struct resource *res;
450	enum nd_pfn_mode mode;
 
451	struct nd_namespace_io *nsio;
452	unsigned long align, start_pad;
453	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
454	struct nd_namespace_common *ndns = nd_pfn->ndns;
455	const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
456
457	if (!pfn_sb || !ndns)
458		return -ENODEV;
459
460	if (!is_memory(nd_pfn->dev.parent))
461		return -ENODEV;
462
463	if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
464		return -ENXIO;
465
466	if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
467		return -ENODEV;
468
469	checksum = le64_to_cpu(pfn_sb->checksum);
470	pfn_sb->checksum = 0;
471	if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
472		return -ENODEV;
473	pfn_sb->checksum = cpu_to_le64(checksum);
474
475	if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
476		return -ENODEV;
477
478	if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
479		pfn_sb->start_pad = 0;
480		pfn_sb->end_trunc = 0;
481	}
482
483	if (__le16_to_cpu(pfn_sb->version_minor) < 2)
484		pfn_sb->align = 0;
485
486	if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
487		pfn_sb->page_struct_size = cpu_to_le16(64);
488		pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
489	}
490
491	switch (le32_to_cpu(pfn_sb->mode)) {
492	case PFN_MODE_RAM:
493	case PFN_MODE_PMEM:
494		break;
495	default:
496		return -ENXIO;
497	}
498
499	align = le32_to_cpu(pfn_sb->align);
500	offset = le64_to_cpu(pfn_sb->dataoff);
501	start_pad = le32_to_cpu(pfn_sb->start_pad);
 
502	if (align == 0)
503		align = 1UL << ilog2(offset);
504	mode = le32_to_cpu(pfn_sb->mode);
505
506	if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
507			(mode == PFN_MODE_PMEM)) {
508		dev_err(&nd_pfn->dev,
509				"init failed, page size mismatch %d\n",
510				le32_to_cpu(pfn_sb->page_size));
511		return -EOPNOTSUPP;
512	}
513
514	if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
515			(mode == PFN_MODE_PMEM)) {
516		dev_err(&nd_pfn->dev,
517				"init failed, struct page size mismatch %d\n",
518				le16_to_cpu(pfn_sb->page_struct_size));
519		return -EOPNOTSUPP;
520	}
521
522	/*
523	 * Check whether the we support the alignment. For Dax if the
524	 * superblock alignment is not matching, we won't initialize
525	 * the device.
526	 */
527	if (!nd_supported_alignment(align) &&
528			!memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
529		dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
530				"%ld:%ld\n", nd_pfn->align, align);
531		return -EOPNOTSUPP;
532	}
533
534	if (!nd_pfn->uuid) {
535		/*
536		 * When probing a namepace via nd_pfn_probe() the uuid
537		 * is NULL (see: nd_pfn_devinit()) we init settings from
538		 * pfn_sb
539		 */
540		nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
541		if (!nd_pfn->uuid)
542			return -ENOMEM;
543		nd_pfn->align = align;
544		nd_pfn->mode = mode;
545	} else {
546		/*
547		 * When probing a pfn / dax instance we validate the
548		 * live settings against the pfn_sb
549		 */
550		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
551			return -ENODEV;
552
553		/*
554		 * If the uuid validates, but other settings mismatch
555		 * return EINVAL because userspace has managed to change
556		 * the configuration without specifying new
557		 * identification.
558		 */
559		if (nd_pfn->align != align || nd_pfn->mode != mode) {
560			dev_err(&nd_pfn->dev,
561					"init failed, settings mismatch\n");
562			dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
563					nd_pfn->align, align, nd_pfn->mode,
564					mode);
565			return -EOPNOTSUPP;
566		}
567	}
568
569	if (align > nvdimm_namespace_capacity(ndns)) {
570		dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
571				align, nvdimm_namespace_capacity(ndns));
572		return -EOPNOTSUPP;
573	}
574
575	/*
576	 * These warnings are verbose because they can only trigger in
577	 * the case where the physical address alignment of the
578	 * namespace has changed since the pfn superblock was
579	 * established.
580	 */
581	nsio = to_nd_namespace_io(&ndns->dev);
582	res = &nsio->res;
583	if (offset >= resource_size(res)) {
 
584		dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
585				dev_name(&ndns->dev));
586		return -EOPNOTSUPP;
587	}
588
589	if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
590			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
591		dev_err(&nd_pfn->dev,
592				"bad offset: %#llx dax disabled align: %#lx\n",
593				offset, align);
594		return -EOPNOTSUPP;
595	}
596
597	if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
598				memremap_compat_align())) {
599		dev_err(&nd_pfn->dev, "resource start misaligned\n");
600		return -EOPNOTSUPP;
601	}
602
603	if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
604				memremap_compat_align())) {
605		dev_err(&nd_pfn->dev, "resource end misaligned\n");
606		return -EOPNOTSUPP;
607	}
608
 
 
 
 
609	return 0;
610}
611EXPORT_SYMBOL(nd_pfn_validate);
612
613int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
614{
615	int rc;
616	struct nd_pfn *nd_pfn;
617	struct device *pfn_dev;
618	struct nd_pfn_sb *pfn_sb;
619	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
620
621	if (ndns->force_raw)
622		return -ENODEV;
623
624	switch (ndns->claim_class) {
625	case NVDIMM_CCLASS_NONE:
626	case NVDIMM_CCLASS_PFN:
627		break;
628	default:
629		return -ENODEV;
630	}
631
632	nvdimm_bus_lock(&ndns->dev);
633	nd_pfn = nd_pfn_alloc(nd_region);
634	pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
635	nvdimm_bus_unlock(&ndns->dev);
636	if (!pfn_dev)
637		return -ENOMEM;
638	pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
639	nd_pfn = to_nd_pfn(pfn_dev);
640	nd_pfn->pfn_sb = pfn_sb;
641	rc = nd_pfn_validate(nd_pfn, PFN_SIG);
642	dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
643	if (rc < 0) {
644		nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
645		put_device(pfn_dev);
646	} else
647		__nd_device_register(pfn_dev);
648
649	return rc;
650}
651EXPORT_SYMBOL(nd_pfn_probe);
652
653/*
654 * We hotplug memory at sub-section granularity, pad the reserved area
655 * from the previous section base to the namespace base address.
656 */
657static unsigned long init_altmap_base(resource_size_t base)
658{
659	unsigned long base_pfn = PHYS_PFN(base);
660
661	return SUBSECTION_ALIGN_DOWN(base_pfn);
662}
663
664static unsigned long init_altmap_reserve(resource_size_t base)
665{
666	unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
667	unsigned long base_pfn = PHYS_PFN(base);
668
669	reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
670	return reserve;
671}
672
673static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
674{
675	struct resource *res = &pgmap->res;
676	struct vmem_altmap *altmap = &pgmap->altmap;
677	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
678	u64 offset = le64_to_cpu(pfn_sb->dataoff);
679	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
680	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
681	u32 reserve = nd_info_block_reserve();
682	struct nd_namespace_common *ndns = nd_pfn->ndns;
683	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
684	resource_size_t base = nsio->res.start + start_pad;
685	resource_size_t end = nsio->res.end - end_trunc;
686	struct vmem_altmap __altmap = {
687		.base_pfn = init_altmap_base(base),
688		.reserve = init_altmap_reserve(base),
689		.end_pfn = PHYS_PFN(end),
690	};
691
692	memcpy(res, &nsio->res, sizeof(*res));
693	res->start += start_pad;
694	res->end -= end_trunc;
695
 
696	if (nd_pfn->mode == PFN_MODE_RAM) {
697		if (offset < reserve)
698			return -EINVAL;
699		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
700	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
701		nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
702		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
703			dev_info(&nd_pfn->dev,
704					"number of pfns truncated from %lld to %ld\n",
705					le64_to_cpu(nd_pfn->pfn_sb->npfns),
706					nd_pfn->npfns);
707		memcpy(altmap, &__altmap, sizeof(*altmap));
708		altmap->free = PHYS_PFN(offset - reserve);
709		altmap->alloc = 0;
710		pgmap->flags |= PGMAP_ALTMAP_VALID;
711	} else
712		return -ENXIO;
713
714	return 0;
715}
716
717static int nd_pfn_init(struct nd_pfn *nd_pfn)
718{
719	struct nd_namespace_common *ndns = nd_pfn->ndns;
720	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
721	resource_size_t start, size;
722	struct nd_region *nd_region;
723	unsigned long npfns, align;
724	u32 end_trunc;
725	struct nd_pfn_sb *pfn_sb;
726	phys_addr_t offset;
727	const char *sig;
728	u64 checksum;
729	int rc;
730
731	pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
732	if (!pfn_sb)
733		return -ENOMEM;
734
735	nd_pfn->pfn_sb = pfn_sb;
736	if (is_nd_dax(&nd_pfn->dev))
737		sig = DAX_SIG;
738	else
739		sig = PFN_SIG;
740
741	rc = nd_pfn_validate(nd_pfn, sig);
742	if (rc == 0)
743		return nd_pfn_clear_memmap_errors(nd_pfn);
744	if (rc != -ENODEV)
745		return rc;
746
747	/* no info block, do init */;
748	memset(pfn_sb, 0, sizeof(*pfn_sb));
749
750	nd_region = to_nd_region(nd_pfn->dev.parent);
751	if (nd_region->ro) {
752		dev_info(&nd_pfn->dev,
753				"%s is read-only, unable to init metadata\n",
754				dev_name(&nd_region->dev));
755		return -ENXIO;
756	}
757
758	/*
759	 * Note, we use 64 here for the standard size of struct page,
760	 * debugging options may cause it to be larger in which case the
761	 * implementation will limit the pfns advertised through
762	 * ->direct_access() to those that are included in the memmap.
763	 */
764	start = nsio->res.start;
765	size = resource_size(&nsio->res);
766	npfns = PHYS_PFN(size - SZ_8K);
767	align = max(nd_pfn->align, memremap_compat_align());
768
769	/*
770	 * When @start is misaligned fail namespace creation. See
771	 * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
772	 * an option.
773	 */
774	if (!IS_ALIGNED(start, memremap_compat_align())) {
775		dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
776				dev_name(&ndns->dev), &start,
777				memremap_compat_align());
778		return -EINVAL;
779	}
780	end_trunc = start + size - ALIGN_DOWN(start + size, align);
781	if (nd_pfn->mode == PFN_MODE_PMEM) {
 
 
782		/*
783		 * The altmap should be padded out to the block size used
784		 * when populating the vmemmap. This *should* be equal to
785		 * PMD_SIZE for most architectures.
786		 *
787		 * Also make sure size of struct page is less than 64. We
788		 * want to make sure we use large enough size here so that
789		 * we don't have a dynamic reserve space depending on
790		 * struct page size. But we also want to make sure we notice
791		 * when we end up adding new elements to struct page.
 
 
 
 
792		 */
793		BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
794		offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
795			- start;
 
 
 
 
 
 
 
796	} else if (nd_pfn->mode == PFN_MODE_RAM)
797		offset = ALIGN(start + SZ_8K, align) - start;
798	else
799		return -ENXIO;
800
801	if (offset >= size) {
 
802		dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
803				dev_name(&ndns->dev));
804		return -ENXIO;
805	}
806
807	npfns = PHYS_PFN(size - offset - end_trunc);
808	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
809	pfn_sb->dataoff = cpu_to_le64(offset);
810	pfn_sb->npfns = cpu_to_le64(npfns);
811	memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
812	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
813	memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
814	pfn_sb->version_major = cpu_to_le16(1);
815	pfn_sb->version_minor = cpu_to_le16(4);
816	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
817	pfn_sb->align = cpu_to_le32(nd_pfn->align);
818	pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
 
 
 
819	pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
820	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
821	pfn_sb->checksum = cpu_to_le64(checksum);
822
823	rc = nd_pfn_clear_memmap_errors(nd_pfn);
824	if (rc)
825		return rc;
826
827	return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
828}
829
830/*
831 * Determine the effective resource range and vmem_altmap from an nd_pfn
832 * instance.
833 */
834int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
835{
836	int rc;
837
838	if (!nd_pfn->uuid || !nd_pfn->ndns)
839		return -ENODEV;
840
841	rc = nd_pfn_init(nd_pfn);
842	if (rc)
843		return rc;
844
845	/* we need a valid pfn_sb before we can init a dev_pagemap */
846	return __nvdimm_setup_pfn(nd_pfn, pgmap);
847}
848EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/memremap.h>
  6#include <linux/blkdev.h>
  7#include <linux/device.h>
 
  8#include <linux/sizes.h>
  9#include <linux/slab.h>
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include "nd-core.h"
 13#include "pfn.h"
 14#include "nd.h"
 15
 16static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
 17
 18static void nd_pfn_release(struct device *dev)
 19{
 20	struct nd_region *nd_region = to_nd_region(dev->parent);
 21	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
 22
 23	dev_dbg(dev, "trace\n");
 24	nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
 25	ida_free(&nd_region->pfn_ida, nd_pfn->id);
 26	kfree(nd_pfn->uuid);
 27	kfree(nd_pfn);
 28}
 29
 30struct nd_pfn *to_nd_pfn(struct device *dev)
 31{
 32	struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
 33
 34	WARN_ON(!is_nd_pfn(dev));
 35	return nd_pfn;
 36}
 37EXPORT_SYMBOL(to_nd_pfn);
 38
 39static ssize_t mode_show(struct device *dev,
 40		struct device_attribute *attr, char *buf)
 41{
 42	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 43
 44	switch (nd_pfn->mode) {
 45	case PFN_MODE_RAM:
 46		return sprintf(buf, "ram\n");
 47	case PFN_MODE_PMEM:
 48		return sprintf(buf, "pmem\n");
 49	default:
 50		return sprintf(buf, "none\n");
 51	}
 52}
 53
 54static ssize_t mode_store(struct device *dev,
 55		struct device_attribute *attr, const char *buf, size_t len)
 56{
 57	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 58	ssize_t rc = 0;
 59
 60	device_lock(dev);
 61	nvdimm_bus_lock(dev);
 62	if (dev->driver)
 63		rc = -EBUSY;
 64	else {
 65		size_t n = len - 1;
 66
 67		if (strncmp(buf, "pmem\n", n) == 0
 68				|| strncmp(buf, "pmem", n) == 0) {
 69			nd_pfn->mode = PFN_MODE_PMEM;
 70		} else if (strncmp(buf, "ram\n", n) == 0
 71				|| strncmp(buf, "ram", n) == 0)
 72			nd_pfn->mode = PFN_MODE_RAM;
 73		else if (strncmp(buf, "none\n", n) == 0
 74				|| strncmp(buf, "none", n) == 0)
 75			nd_pfn->mode = PFN_MODE_NONE;
 76		else
 77			rc = -EINVAL;
 78	}
 79	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
 80			buf[len - 1] == '\n' ? "" : "\n");
 81	nvdimm_bus_unlock(dev);
 82	device_unlock(dev);
 83
 84	return rc ? rc : len;
 85}
 86static DEVICE_ATTR_RW(mode);
 87
 88static ssize_t align_show(struct device *dev,
 89		struct device_attribute *attr, char *buf)
 90{
 91	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 92
 93	return sprintf(buf, "%ld\n", nd_pfn->align);
 94}
 95
 96static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
 97{
 98
 99	alignments[0] = PAGE_SIZE;
100
101	if (has_transparent_hugepage()) {
102		alignments[1] = HPAGE_PMD_SIZE;
103		if (has_transparent_pud_hugepage())
104			alignments[2] = HPAGE_PUD_SIZE;
105	}
106
107	return alignments;
108}
109
110/*
111 * Use pmd mapping if supported as default alignment
112 */
113static unsigned long nd_pfn_default_alignment(void)
114{
115
116	if (has_transparent_hugepage())
117		return HPAGE_PMD_SIZE;
118	return PAGE_SIZE;
119}
120
121static ssize_t align_store(struct device *dev,
122		struct device_attribute *attr, const char *buf, size_t len)
123{
124	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
125	unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
126	ssize_t rc;
127
128	device_lock(dev);
129	nvdimm_bus_lock(dev);
130	rc = nd_size_select_store(dev, buf, &nd_pfn->align,
131			nd_pfn_supported_alignments(aligns));
132	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
133			buf[len - 1] == '\n' ? "" : "\n");
134	nvdimm_bus_unlock(dev);
135	device_unlock(dev);
136
137	return rc ? rc : len;
138}
139static DEVICE_ATTR_RW(align);
140
141static ssize_t uuid_show(struct device *dev,
142		struct device_attribute *attr, char *buf)
143{
144	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
145
146	if (nd_pfn->uuid)
147		return sprintf(buf, "%pUb\n", nd_pfn->uuid);
148	return sprintf(buf, "\n");
149}
150
151static ssize_t uuid_store(struct device *dev,
152		struct device_attribute *attr, const char *buf, size_t len)
153{
154	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
155	ssize_t rc;
156
157	device_lock(dev);
158	rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
159	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
160			buf[len - 1] == '\n' ? "" : "\n");
161	device_unlock(dev);
162
163	return rc ? rc : len;
164}
165static DEVICE_ATTR_RW(uuid);
166
167static ssize_t namespace_show(struct device *dev,
168		struct device_attribute *attr, char *buf)
169{
170	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
171	ssize_t rc;
172
173	nvdimm_bus_lock(dev);
174	rc = sprintf(buf, "%s\n", nd_pfn->ndns
175			? dev_name(&nd_pfn->ndns->dev) : "");
176	nvdimm_bus_unlock(dev);
177	return rc;
178}
179
180static ssize_t namespace_store(struct device *dev,
181		struct device_attribute *attr, const char *buf, size_t len)
182{
183	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
184	ssize_t rc;
185
186	device_lock(dev);
187	nvdimm_bus_lock(dev);
188	rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
189	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
190			buf[len - 1] == '\n' ? "" : "\n");
191	nvdimm_bus_unlock(dev);
192	device_unlock(dev);
193
194	return rc;
195}
196static DEVICE_ATTR_RW(namespace);
197
198static ssize_t resource_show(struct device *dev,
199		struct device_attribute *attr, char *buf)
200{
201	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
202	ssize_t rc;
203
204	device_lock(dev);
205	if (dev->driver) {
206		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
207		u64 offset = __le64_to_cpu(pfn_sb->dataoff);
208		struct nd_namespace_common *ndns = nd_pfn->ndns;
209		u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
210		struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
211
212		rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
213				+ start_pad + offset);
214	} else {
215		/* no address to convey if the pfn instance is disabled */
216		rc = -ENXIO;
217	}
218	device_unlock(dev);
219
220	return rc;
221}
222static DEVICE_ATTR_ADMIN_RO(resource);
223
224static ssize_t size_show(struct device *dev,
225		struct device_attribute *attr, char *buf)
226{
227	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
228	ssize_t rc;
229
230	device_lock(dev);
231	if (dev->driver) {
232		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
233		u64 offset = __le64_to_cpu(pfn_sb->dataoff);
234		struct nd_namespace_common *ndns = nd_pfn->ndns;
235		u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
236		u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
237		struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
238
239		rc = sprintf(buf, "%llu\n", (unsigned long long)
240				resource_size(&nsio->res) - start_pad
241				- end_trunc - offset);
242	} else {
243		/* no size to convey if the pfn instance is disabled */
244		rc = -ENXIO;
245	}
246	device_unlock(dev);
247
248	return rc;
249}
250static DEVICE_ATTR_RO(size);
251
252static ssize_t supported_alignments_show(struct device *dev,
253		struct device_attribute *attr, char *buf)
254{
255	unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
256
257	return nd_size_select_show(0,
258			nd_pfn_supported_alignments(aligns), buf);
259}
260static DEVICE_ATTR_RO(supported_alignments);
261
262static struct attribute *nd_pfn_attributes[] = {
263	&dev_attr_mode.attr,
264	&dev_attr_namespace.attr,
265	&dev_attr_uuid.attr,
266	&dev_attr_align.attr,
267	&dev_attr_resource.attr,
268	&dev_attr_size.attr,
269	&dev_attr_supported_alignments.attr,
270	NULL,
271};
272
273static struct attribute_group nd_pfn_attribute_group = {
274	.attrs = nd_pfn_attributes,
275};
276
277const struct attribute_group *nd_pfn_attribute_groups[] = {
278	&nd_pfn_attribute_group,
279	&nd_device_attribute_group,
280	&nd_numa_attribute_group,
281	NULL,
282};
283
284static const struct device_type nd_pfn_device_type = {
285	.name = "nd_pfn",
286	.release = nd_pfn_release,
287	.groups = nd_pfn_attribute_groups,
288};
289
290bool is_nd_pfn(struct device *dev)
291{
292	return dev ? dev->type == &nd_pfn_device_type : false;
293}
294EXPORT_SYMBOL(is_nd_pfn);
295
296static struct lock_class_key nvdimm_pfn_key;
297
298struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
299		struct nd_namespace_common *ndns)
300{
301	struct device *dev;
302
303	if (!nd_pfn)
304		return NULL;
305
306	nd_pfn->mode = PFN_MODE_NONE;
307	nd_pfn->align = nd_pfn_default_alignment();
308	dev = &nd_pfn->dev;
309	device_initialize(&nd_pfn->dev);
310	lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
311	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
312		dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
313				dev_name(ndns->claim));
314		put_device(dev);
315		return NULL;
316	}
317	return dev;
318}
319
320static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
321{
322	struct nd_pfn *nd_pfn;
323	struct device *dev;
324
325	nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
326	if (!nd_pfn)
327		return NULL;
328
329	nd_pfn->id = ida_alloc(&nd_region->pfn_ida, GFP_KERNEL);
330	if (nd_pfn->id < 0) {
331		kfree(nd_pfn);
332		return NULL;
333	}
334
335	dev = &nd_pfn->dev;
336	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
337	dev->type = &nd_pfn_device_type;
338	dev->parent = &nd_region->dev;
339
340	return nd_pfn;
341}
342
343struct device *nd_pfn_create(struct nd_region *nd_region)
344{
345	struct nd_pfn *nd_pfn;
346	struct device *dev;
347
348	if (!is_memory(&nd_region->dev))
349		return NULL;
350
351	nd_pfn = nd_pfn_alloc(nd_region);
352	dev = nd_pfn_devinit(nd_pfn, NULL);
353
354	nd_device_register(dev);
355	return dev;
356}
357
358/*
359 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
360 * space associated with the namespace. If the memmap is set to DRAM, then
361 * this is a no-op. Since the memmap area is freshly initialized during
362 * probe, we have an opportunity to clear any badblocks in this area.
363 */
364static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
365{
366	struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
367	struct nd_namespace_common *ndns = nd_pfn->ndns;
368	void *zero_page = page_address(ZERO_PAGE(0));
369	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
370	int num_bad, meta_num, rc, bb_present;
371	sector_t first_bad, meta_start;
372	struct nd_namespace_io *nsio;
373
374	if (nd_pfn->mode != PFN_MODE_PMEM)
375		return 0;
376
377	nsio = to_nd_namespace_io(&ndns->dev);
378	meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
379	meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
380
381	/*
382	 * re-enable the namespace with correct size so that we can access
383	 * the device memmap area.
384	 */
385	devm_namespace_disable(&nd_pfn->dev, ndns);
386	rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
387	if (rc)
388		return rc;
389
390	do {
391		unsigned long zero_len;
392		u64 nsoff;
393
394		bb_present = badblocks_check(&nd_region->bb, meta_start,
395				meta_num, &first_bad, &num_bad);
396		if (bb_present) {
397			dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
398					num_bad, first_bad);
399			nsoff = ALIGN_DOWN((nd_region->ndr_start
400					+ (first_bad << 9)) - nsio->res.start,
401					PAGE_SIZE);
402			zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
403			while (zero_len) {
404				unsigned long chunk = min(zero_len, PAGE_SIZE);
405
406				rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
407							chunk, 0);
408				if (rc)
409					break;
410
411				zero_len -= chunk;
412				nsoff += chunk;
413			}
414			if (rc) {
415				dev_err(&nd_pfn->dev,
416					"error clearing %x badblocks at %llx\n",
417					num_bad, first_bad);
418				return rc;
419			}
420		}
421	} while (bb_present);
422
423	return 0;
424}
425
426static bool nd_supported_alignment(unsigned long align)
427{
428	int i;
429	unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
430
431	if (align == 0)
432		return false;
433
434	nd_pfn_supported_alignments(supported);
435	for (i = 0; supported[i]; i++)
436		if (align == supported[i])
437			return true;
438	return false;
439}
440
441/**
442 * nd_pfn_validate - read and validate info-block
443 * @nd_pfn: fsdax namespace runtime state / properties
444 * @sig: 'devdax' or 'fsdax' signature
445 *
446 * Upon return the info-block buffer contents (->pfn_sb) are
447 * indeterminate when validation fails, and a coherent info-block
448 * otherwise.
449 */
450int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
451{
452	u64 checksum, offset;
453	struct resource *res;
454	enum nd_pfn_mode mode;
455	resource_size_t res_size;
456	struct nd_namespace_io *nsio;
457	unsigned long align, start_pad, end_trunc;
458	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
459	struct nd_namespace_common *ndns = nd_pfn->ndns;
460	const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
461
462	if (!pfn_sb || !ndns)
463		return -ENODEV;
464
465	if (!is_memory(nd_pfn->dev.parent))
466		return -ENODEV;
467
468	if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
469		return -ENXIO;
470
471	if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
472		return -ENODEV;
473
474	checksum = le64_to_cpu(pfn_sb->checksum);
475	pfn_sb->checksum = 0;
476	if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
477		return -ENODEV;
478	pfn_sb->checksum = cpu_to_le64(checksum);
479
480	if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
481		return -ENODEV;
482
483	if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
484		pfn_sb->start_pad = 0;
485		pfn_sb->end_trunc = 0;
486	}
487
488	if (__le16_to_cpu(pfn_sb->version_minor) < 2)
489		pfn_sb->align = 0;
490
491	if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
492		pfn_sb->page_struct_size = cpu_to_le16(64);
493		pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
494	}
495
496	switch (le32_to_cpu(pfn_sb->mode)) {
497	case PFN_MODE_RAM:
498	case PFN_MODE_PMEM:
499		break;
500	default:
501		return -ENXIO;
502	}
503
504	align = le32_to_cpu(pfn_sb->align);
505	offset = le64_to_cpu(pfn_sb->dataoff);
506	start_pad = le32_to_cpu(pfn_sb->start_pad);
507	end_trunc = le32_to_cpu(pfn_sb->end_trunc);
508	if (align == 0)
509		align = 1UL << ilog2(offset);
510	mode = le32_to_cpu(pfn_sb->mode);
511
512	if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
513			(mode == PFN_MODE_PMEM)) {
514		dev_err(&nd_pfn->dev,
515				"init failed, page size mismatch %d\n",
516				le32_to_cpu(pfn_sb->page_size));
517		return -EOPNOTSUPP;
518	}
519
520	if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
521			(mode == PFN_MODE_PMEM)) {
522		dev_err(&nd_pfn->dev,
523				"init failed, struct page size mismatch %d\n",
524				le16_to_cpu(pfn_sb->page_struct_size));
525		return -EOPNOTSUPP;
526	}
527
528	/*
529	 * Check whether the we support the alignment. For Dax if the
530	 * superblock alignment is not matching, we won't initialize
531	 * the device.
532	 */
533	if (!nd_supported_alignment(align) &&
534			!memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
535		dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
536				"%ld:%ld\n", nd_pfn->align, align);
537		return -EOPNOTSUPP;
538	}
539
540	if (!nd_pfn->uuid) {
541		/*
542		 * When probing a namespace via nd_pfn_probe() the uuid
543		 * is NULL (see: nd_pfn_devinit()) we init settings from
544		 * pfn_sb
545		 */
546		nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
547		if (!nd_pfn->uuid)
548			return -ENOMEM;
549		nd_pfn->align = align;
550		nd_pfn->mode = mode;
551	} else {
552		/*
553		 * When probing a pfn / dax instance we validate the
554		 * live settings against the pfn_sb
555		 */
556		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
557			return -ENODEV;
558
559		/*
560		 * If the uuid validates, but other settings mismatch
561		 * return EINVAL because userspace has managed to change
562		 * the configuration without specifying new
563		 * identification.
564		 */
565		if (nd_pfn->align != align || nd_pfn->mode != mode) {
566			dev_err(&nd_pfn->dev,
567					"init failed, settings mismatch\n");
568			dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
569					nd_pfn->align, align, nd_pfn->mode,
570					mode);
571			return -EOPNOTSUPP;
572		}
573	}
574
575	if (align > nvdimm_namespace_capacity(ndns)) {
576		dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
577				align, nvdimm_namespace_capacity(ndns));
578		return -EOPNOTSUPP;
579	}
580
581	/*
582	 * These warnings are verbose because they can only trigger in
583	 * the case where the physical address alignment of the
584	 * namespace has changed since the pfn superblock was
585	 * established.
586	 */
587	nsio = to_nd_namespace_io(&ndns->dev);
588	res = &nsio->res;
589	res_size = resource_size(res);
590	if (offset >= res_size) {
591		dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
592				dev_name(&ndns->dev));
593		return -EOPNOTSUPP;
594	}
595
596	if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
597			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
598		dev_err(&nd_pfn->dev,
599				"bad offset: %#llx dax disabled align: %#lx\n",
600				offset, align);
601		return -EOPNOTSUPP;
602	}
603
604	if (!IS_ALIGNED(res->start + start_pad, memremap_compat_align())) {
 
605		dev_err(&nd_pfn->dev, "resource start misaligned\n");
606		return -EOPNOTSUPP;
607	}
608
609	if (!IS_ALIGNED(res->end + 1 - end_trunc, memremap_compat_align())) {
 
610		dev_err(&nd_pfn->dev, "resource end misaligned\n");
611		return -EOPNOTSUPP;
612	}
613
614	if (offset >= (res_size - start_pad - end_trunc)) {
615		dev_err(&nd_pfn->dev, "bad offset with small namespace\n");
616		return -EOPNOTSUPP;
617	}
618	return 0;
619}
620EXPORT_SYMBOL(nd_pfn_validate);
621
622int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
623{
624	int rc;
625	struct nd_pfn *nd_pfn;
626	struct device *pfn_dev;
627	struct nd_pfn_sb *pfn_sb;
628	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
629
630	if (ndns->force_raw)
631		return -ENODEV;
632
633	switch (ndns->claim_class) {
634	case NVDIMM_CCLASS_NONE:
635	case NVDIMM_CCLASS_PFN:
636		break;
637	default:
638		return -ENODEV;
639	}
640
641	nvdimm_bus_lock(&ndns->dev);
642	nd_pfn = nd_pfn_alloc(nd_region);
643	pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
644	nvdimm_bus_unlock(&ndns->dev);
645	if (!pfn_dev)
646		return -ENOMEM;
647	pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
648	nd_pfn = to_nd_pfn(pfn_dev);
649	nd_pfn->pfn_sb = pfn_sb;
650	rc = nd_pfn_validate(nd_pfn, PFN_SIG);
651	dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
652	if (rc < 0) {
653		nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
654		put_device(pfn_dev);
655	} else
656		nd_device_register(pfn_dev);
657
658	return rc;
659}
660EXPORT_SYMBOL(nd_pfn_probe);
661
662/*
663 * We hotplug memory at sub-section granularity, pad the reserved area
664 * from the previous section base to the namespace base address.
665 */
666static unsigned long init_altmap_base(resource_size_t base)
667{
668	unsigned long base_pfn = PHYS_PFN(base);
669
670	return SUBSECTION_ALIGN_DOWN(base_pfn);
671}
672
673static unsigned long init_altmap_reserve(resource_size_t base)
674{
675	unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
676	unsigned long base_pfn = PHYS_PFN(base);
677
678	reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
679	return reserve;
680}
681
682static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
683{
684	struct range *range = &pgmap->range;
685	struct vmem_altmap *altmap = &pgmap->altmap;
686	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
687	u64 offset = le64_to_cpu(pfn_sb->dataoff);
688	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
689	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
690	u32 reserve = nd_info_block_reserve();
691	struct nd_namespace_common *ndns = nd_pfn->ndns;
692	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
693	resource_size_t base = nsio->res.start + start_pad;
694	resource_size_t end = nsio->res.end - end_trunc;
695	struct vmem_altmap __altmap = {
696		.base_pfn = init_altmap_base(base),
697		.reserve = init_altmap_reserve(base),
698		.end_pfn = PHYS_PFN(end),
699	};
700
701	*range = (struct range) {
702		.start = nsio->res.start + start_pad,
703		.end = nsio->res.end - end_trunc,
704	};
705	pgmap->nr_range = 1;
706	if (nd_pfn->mode == PFN_MODE_RAM) {
707		if (offset < reserve)
708			return -EINVAL;
709		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
710	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
711		nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
712		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
713			dev_info(&nd_pfn->dev,
714					"number of pfns truncated from %lld to %ld\n",
715					le64_to_cpu(nd_pfn->pfn_sb->npfns),
716					nd_pfn->npfns);
717		memcpy(altmap, &__altmap, sizeof(*altmap));
718		altmap->free = PHYS_PFN(offset - reserve);
719		altmap->alloc = 0;
720		pgmap->flags |= PGMAP_ALTMAP_VALID;
721	} else
722		return -ENXIO;
723
724	return 0;
725}
726
727static int nd_pfn_init(struct nd_pfn *nd_pfn)
728{
729	struct nd_namespace_common *ndns = nd_pfn->ndns;
730	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
731	resource_size_t start, size;
732	struct nd_region *nd_region;
733	unsigned long npfns, align;
734	u32 end_trunc;
735	struct nd_pfn_sb *pfn_sb;
736	phys_addr_t offset;
737	const char *sig;
738	u64 checksum;
739	int rc;
740
741	pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
742	if (!pfn_sb)
743		return -ENOMEM;
744
745	nd_pfn->pfn_sb = pfn_sb;
746	if (is_nd_dax(&nd_pfn->dev))
747		sig = DAX_SIG;
748	else
749		sig = PFN_SIG;
750
751	rc = nd_pfn_validate(nd_pfn, sig);
752	if (rc == 0)
753		return nd_pfn_clear_memmap_errors(nd_pfn);
754	if (rc != -ENODEV)
755		return rc;
756
757	/* no info block, do init */;
758	memset(pfn_sb, 0, sizeof(*pfn_sb));
759
760	nd_region = to_nd_region(nd_pfn->dev.parent);
761	if (nd_region->ro) {
762		dev_info(&nd_pfn->dev,
763				"%s is read-only, unable to init metadata\n",
764				dev_name(&nd_region->dev));
765		return -ENXIO;
766	}
767
 
 
 
 
 
 
768	start = nsio->res.start;
769	size = resource_size(&nsio->res);
770	npfns = PHYS_PFN(size - SZ_8K);
771	align = max(nd_pfn->align, memremap_compat_align());
772
773	/*
774	 * When @start is misaligned fail namespace creation. See
775	 * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
776	 * an option.
777	 */
778	if (!IS_ALIGNED(start, memremap_compat_align())) {
779		dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
780				dev_name(&ndns->dev), &start,
781				memremap_compat_align());
782		return -EINVAL;
783	}
784	end_trunc = start + size - ALIGN_DOWN(start + size, align);
785	if (nd_pfn->mode == PFN_MODE_PMEM) {
786		unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
787
788		/*
789		 * The altmap should be padded out to the block size used
790		 * when populating the vmemmap. This *should* be equal to
791		 * PMD_SIZE for most architectures.
792		 *
793		 * Also make sure size of struct page is less than
794		 * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
795		 * face of production kernel configurations that reduce the
796		 * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
797		 * kernel configurations that increase the 'struct page' size
798		 * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
799		 * for continuing with the capacity that will be wasted when
800		 * reverting to a production kernel configuration. Otherwise,
801		 * those configurations are blocked by default.
802		 */
803		if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
804			if (page_struct_override)
805				page_map_size = sizeof(struct page) * npfns;
806			else {
807				dev_err(&nd_pfn->dev,
808					"Memory debug options prevent using pmem for the page map\n");
809				return -EINVAL;
810			}
811		}
812		offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
813	} else if (nd_pfn->mode == PFN_MODE_RAM)
814		offset = ALIGN(start + SZ_8K, align) - start;
815	else
816		return -ENXIO;
817
818	if (offset >= (size - end_trunc)) {
819		/* This results in zero size devices */
820		dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
821				dev_name(&ndns->dev));
822		return -ENXIO;
823	}
824
825	npfns = PHYS_PFN(size - offset - end_trunc);
826	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
827	pfn_sb->dataoff = cpu_to_le64(offset);
828	pfn_sb->npfns = cpu_to_le64(npfns);
829	memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
830	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
831	memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
832	pfn_sb->version_major = cpu_to_le16(1);
833	pfn_sb->version_minor = cpu_to_le16(4);
834	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
835	pfn_sb->align = cpu_to_le32(nd_pfn->align);
836	if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
837		pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
838	else
839		pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
840	pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
841	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
842	pfn_sb->checksum = cpu_to_le64(checksum);
843
844	rc = nd_pfn_clear_memmap_errors(nd_pfn);
845	if (rc)
846		return rc;
847
848	return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
849}
850
851/*
852 * Determine the effective resource range and vmem_altmap from an nd_pfn
853 * instance.
854 */
855int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
856{
857	int rc;
858
859	if (!nd_pfn->uuid || !nd_pfn->ndns)
860		return -ENODEV;
861
862	rc = nd_pfn_init(nd_pfn);
863	if (rc)
864		return rc;
865
866	/* we need a valid pfn_sb before we can init a dev_pagemap */
867	return __nvdimm_setup_pfn(nd_pfn, pgmap);
868}
869EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);