Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/sort.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/nd.h>
11#include "nd-core.h"
12#include "pmem.h"
13#include "pfn.h"
14#include "nd.h"
15
16static void namespace_io_release(struct device *dev)
17{
18 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
19
20 kfree(nsio);
21}
22
23static void namespace_pmem_release(struct device *dev)
24{
25 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
26 struct nd_region *nd_region = to_nd_region(dev->parent);
27
28 if (nspm->id >= 0)
29 ida_simple_remove(&nd_region->ns_ida, nspm->id);
30 kfree(nspm->alt_name);
31 kfree(nspm->uuid);
32 kfree(nspm);
33}
34
35static bool is_namespace_pmem(const struct device *dev);
36static bool is_namespace_io(const struct device *dev);
37
38static int is_uuid_busy(struct device *dev, void *data)
39{
40 uuid_t *uuid1 = data, *uuid2 = NULL;
41
42 if (is_namespace_pmem(dev)) {
43 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
44
45 uuid2 = nspm->uuid;
46 } else if (is_nd_btt(dev)) {
47 struct nd_btt *nd_btt = to_nd_btt(dev);
48
49 uuid2 = nd_btt->uuid;
50 } else if (is_nd_pfn(dev)) {
51 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
52
53 uuid2 = nd_pfn->uuid;
54 }
55
56 if (uuid2 && uuid_equal(uuid1, uuid2))
57 return -EBUSY;
58
59 return 0;
60}
61
62static int is_namespace_uuid_busy(struct device *dev, void *data)
63{
64 if (is_nd_region(dev))
65 return device_for_each_child(dev, data, is_uuid_busy);
66 return 0;
67}
68
69/**
70 * nd_is_uuid_unique - verify that no other namespace has @uuid
71 * @dev: any device on a nvdimm_bus
72 * @uuid: uuid to check
73 */
74bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
75{
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77
78 if (!nvdimm_bus)
79 return false;
80 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
81 if (device_for_each_child(&nvdimm_bus->dev, uuid,
82 is_namespace_uuid_busy) != 0)
83 return false;
84 return true;
85}
86
87bool pmem_should_map_pages(struct device *dev)
88{
89 struct nd_region *nd_region = to_nd_region(dev->parent);
90 struct nd_namespace_common *ndns = to_ndns(dev);
91 struct nd_namespace_io *nsio;
92
93 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
94 return false;
95
96 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
97 return false;
98
99 if (is_nd_pfn(dev) || is_nd_btt(dev))
100 return false;
101
102 if (ndns->force_raw)
103 return false;
104
105 nsio = to_nd_namespace_io(dev);
106 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
107 IORESOURCE_SYSTEM_RAM,
108 IORES_DESC_NONE) == REGION_MIXED)
109 return false;
110
111 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
112}
113EXPORT_SYMBOL(pmem_should_map_pages);
114
115unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
116{
117 if (is_namespace_pmem(&ndns->dev)) {
118 struct nd_namespace_pmem *nspm;
119
120 nspm = to_nd_namespace_pmem(&ndns->dev);
121 if (nspm->lbasize == 0 || nspm->lbasize == 512)
122 /* default */;
123 else if (nspm->lbasize == 4096)
124 return 4096;
125 else
126 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
127 nspm->lbasize);
128 }
129
130 /*
131 * There is no namespace label (is_namespace_io()), or the label
132 * indicates the default sector size.
133 */
134 return 512;
135}
136EXPORT_SYMBOL(pmem_sector_size);
137
138const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
139 char *name)
140{
141 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
142 const char *suffix = NULL;
143
144 if (ndns->claim && is_nd_btt(ndns->claim))
145 suffix = "s";
146
147 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
148 int nsidx = 0;
149
150 if (is_namespace_pmem(&ndns->dev)) {
151 struct nd_namespace_pmem *nspm;
152
153 nspm = to_nd_namespace_pmem(&ndns->dev);
154 nsidx = nspm->id;
155 }
156
157 if (nsidx)
158 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
159 suffix ? suffix : "");
160 else
161 sprintf(name, "pmem%d%s", nd_region->id,
162 suffix ? suffix : "");
163 } else {
164 return NULL;
165 }
166
167 return name;
168}
169EXPORT_SYMBOL(nvdimm_namespace_disk_name);
170
171const uuid_t *nd_dev_to_uuid(struct device *dev)
172{
173 if (dev && is_namespace_pmem(dev)) {
174 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
175
176 return nspm->uuid;
177 }
178 return &uuid_null;
179}
180EXPORT_SYMBOL(nd_dev_to_uuid);
181
182static ssize_t nstype_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct nd_region *nd_region = to_nd_region(dev->parent);
186
187 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
188}
189static DEVICE_ATTR_RO(nstype);
190
191static ssize_t __alt_name_store(struct device *dev, const char *buf,
192 const size_t len)
193{
194 char *input, *pos, *alt_name, **ns_altname;
195 ssize_t rc;
196
197 if (is_namespace_pmem(dev)) {
198 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
199
200 ns_altname = &nspm->alt_name;
201 } else
202 return -ENXIO;
203
204 if (dev->driver || to_ndns(dev)->claim)
205 return -EBUSY;
206
207 input = kstrndup(buf, len, GFP_KERNEL);
208 if (!input)
209 return -ENOMEM;
210
211 pos = strim(input);
212 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
213 rc = -EINVAL;
214 goto out;
215 }
216
217 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
218 if (!alt_name) {
219 rc = -ENOMEM;
220 goto out;
221 }
222 kfree(*ns_altname);
223 *ns_altname = alt_name;
224 sprintf(*ns_altname, "%s", pos);
225 rc = len;
226
227out:
228 kfree(input);
229 return rc;
230}
231
232static int nd_namespace_label_update(struct nd_region *nd_region,
233 struct device *dev)
234{
235 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
236 "namespace must be idle during label update\n");
237 if (dev->driver || to_ndns(dev)->claim)
238 return 0;
239
240 /*
241 * Only allow label writes that will result in a valid namespace
242 * or deletion of an existing namespace.
243 */
244 if (is_namespace_pmem(dev)) {
245 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
246 resource_size_t size = resource_size(&nspm->nsio.res);
247
248 if (size == 0 && nspm->uuid)
249 /* delete allocation */;
250 else if (!nspm->uuid)
251 return 0;
252
253 return nd_pmem_namespace_label_update(nd_region, nspm, size);
254 } else
255 return -ENXIO;
256}
257
258static ssize_t alt_name_store(struct device *dev,
259 struct device_attribute *attr, const char *buf, size_t len)
260{
261 struct nd_region *nd_region = to_nd_region(dev->parent);
262 ssize_t rc;
263
264 device_lock(dev);
265 nvdimm_bus_lock(dev);
266 wait_nvdimm_bus_probe_idle(dev);
267 rc = __alt_name_store(dev, buf, len);
268 if (rc >= 0)
269 rc = nd_namespace_label_update(nd_region, dev);
270 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
271 nvdimm_bus_unlock(dev);
272 device_unlock(dev);
273
274 return rc < 0 ? rc : len;
275}
276
277static ssize_t alt_name_show(struct device *dev,
278 struct device_attribute *attr, char *buf)
279{
280 char *ns_altname;
281
282 if (is_namespace_pmem(dev)) {
283 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
284
285 ns_altname = nspm->alt_name;
286 } else
287 return -ENXIO;
288
289 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
290}
291static DEVICE_ATTR_RW(alt_name);
292
293static int scan_free(struct nd_region *nd_region,
294 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
295 resource_size_t n)
296{
297 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
298 int rc = 0;
299
300 while (n) {
301 struct resource *res, *last;
302
303 last = NULL;
304 for_each_dpa_resource(ndd, res)
305 if (strcmp(res->name, label_id->id) == 0)
306 last = res;
307 res = last;
308 if (!res)
309 return 0;
310
311 if (n >= resource_size(res)) {
312 n -= resource_size(res);
313 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
314 nvdimm_free_dpa(ndd, res);
315 /* retry with last resource deleted */
316 continue;
317 }
318
319 rc = adjust_resource(res, res->start, resource_size(res) - n);
320 if (rc == 0)
321 res->flags |= DPA_RESOURCE_ADJUSTED;
322 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
323 break;
324 }
325
326 return rc;
327}
328
329/**
330 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
331 * @nd_region: the set of dimms to reclaim @n bytes from
332 * @label_id: unique identifier for the namespace consuming this dpa range
333 * @n: number of bytes per-dimm to release
334 *
335 * Assumes resources are ordered. Starting from the end try to
336 * adjust_resource() the allocation to @n, but if @n is larger than the
337 * allocation delete it and find the 'new' last allocation in the label
338 * set.
339 */
340static int shrink_dpa_allocation(struct nd_region *nd_region,
341 struct nd_label_id *label_id, resource_size_t n)
342{
343 int i;
344
345 for (i = 0; i < nd_region->ndr_mappings; i++) {
346 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
347 int rc;
348
349 rc = scan_free(nd_region, nd_mapping, label_id, n);
350 if (rc)
351 return rc;
352 }
353
354 return 0;
355}
356
357static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
358 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
359 resource_size_t n)
360{
361 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
362 struct resource *res;
363 int rc = 0;
364
365 /* first resource allocation for this label-id or dimm */
366 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
367 if (!res)
368 rc = -EBUSY;
369
370 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
371 return rc ? n : 0;
372}
373
374
375/**
376 * space_valid() - validate free dpa space against constraints
377 * @nd_region: hosting region of the free space
378 * @ndd: dimm device data for debug
379 * @label_id: namespace id to allocate space
380 * @prev: potential allocation that precedes free space
381 * @next: allocation that follows the given free space range
382 * @exist: first allocation with same id in the mapping
383 * @n: range that must satisfied for pmem allocations
384 * @valid: free space range to validate
385 *
386 * BLK-space is valid as long as it does not precede a PMEM
387 * allocation in a given region. PMEM-space must be contiguous
388 * and adjacent to an existing allocation (if one
389 * exists). If reserving PMEM any space is valid.
390 */
391static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
392 struct nd_label_id *label_id, struct resource *prev,
393 struct resource *next, struct resource *exist,
394 resource_size_t n, struct resource *valid)
395{
396 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
397 unsigned long align;
398
399 align = nd_region->align / nd_region->ndr_mappings;
400 valid->start = ALIGN(valid->start, align);
401 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
402
403 if (valid->start >= valid->end)
404 goto invalid;
405
406 if (is_reserve)
407 return;
408
409 /* allocation needs to be contiguous, so this is all or nothing */
410 if (resource_size(valid) < n)
411 goto invalid;
412
413 /* we've got all the space we need and no existing allocation */
414 if (!exist)
415 return;
416
417 /* allocation needs to be contiguous with the existing namespace */
418 if (valid->start == exist->end + 1
419 || valid->end == exist->start - 1)
420 return;
421
422 invalid:
423 /* truncate @valid size to 0 */
424 valid->end = valid->start - 1;
425}
426
427enum alloc_loc {
428 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
429};
430
431static resource_size_t scan_allocate(struct nd_region *nd_region,
432 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
433 resource_size_t n)
434{
435 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
436 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
437 struct resource *res, *exist = NULL, valid;
438 const resource_size_t to_allocate = n;
439 int first;
440
441 for_each_dpa_resource(ndd, res)
442 if (strcmp(label_id->id, res->name) == 0)
443 exist = res;
444
445 valid.start = nd_mapping->start;
446 valid.end = mapping_end;
447 valid.name = "free space";
448 retry:
449 first = 0;
450 for_each_dpa_resource(ndd, res) {
451 struct resource *next = res->sibling, *new_res = NULL;
452 resource_size_t allocate, available = 0;
453 enum alloc_loc loc = ALLOC_ERR;
454 const char *action;
455 int rc = 0;
456
457 /* ignore resources outside this nd_mapping */
458 if (res->start > mapping_end)
459 continue;
460 if (res->end < nd_mapping->start)
461 continue;
462
463 /* space at the beginning of the mapping */
464 if (!first++ && res->start > nd_mapping->start) {
465 valid.start = nd_mapping->start;
466 valid.end = res->start - 1;
467 space_valid(nd_region, ndd, label_id, NULL, next, exist,
468 to_allocate, &valid);
469 available = resource_size(&valid);
470 if (available)
471 loc = ALLOC_BEFORE;
472 }
473
474 /* space between allocations */
475 if (!loc && next) {
476 valid.start = res->start + resource_size(res);
477 valid.end = min(mapping_end, next->start - 1);
478 space_valid(nd_region, ndd, label_id, res, next, exist,
479 to_allocate, &valid);
480 available = resource_size(&valid);
481 if (available)
482 loc = ALLOC_MID;
483 }
484
485 /* space at the end of the mapping */
486 if (!loc && !next) {
487 valid.start = res->start + resource_size(res);
488 valid.end = mapping_end;
489 space_valid(nd_region, ndd, label_id, res, next, exist,
490 to_allocate, &valid);
491 available = resource_size(&valid);
492 if (available)
493 loc = ALLOC_AFTER;
494 }
495
496 if (!loc || !available)
497 continue;
498 allocate = min(available, n);
499 switch (loc) {
500 case ALLOC_BEFORE:
501 if (strcmp(res->name, label_id->id) == 0) {
502 /* adjust current resource up */
503 rc = adjust_resource(res, res->start - allocate,
504 resource_size(res) + allocate);
505 action = "cur grow up";
506 } else
507 action = "allocate";
508 break;
509 case ALLOC_MID:
510 if (strcmp(next->name, label_id->id) == 0) {
511 /* adjust next resource up */
512 rc = adjust_resource(next, next->start
513 - allocate, resource_size(next)
514 + allocate);
515 new_res = next;
516 action = "next grow up";
517 } else if (strcmp(res->name, label_id->id) == 0) {
518 action = "grow down";
519 } else
520 action = "allocate";
521 break;
522 case ALLOC_AFTER:
523 if (strcmp(res->name, label_id->id) == 0)
524 action = "grow down";
525 else
526 action = "allocate";
527 break;
528 default:
529 return n;
530 }
531
532 if (strcmp(action, "allocate") == 0) {
533 new_res = nvdimm_allocate_dpa(ndd, label_id,
534 valid.start, allocate);
535 if (!new_res)
536 rc = -EBUSY;
537 } else if (strcmp(action, "grow down") == 0) {
538 /* adjust current resource down */
539 rc = adjust_resource(res, res->start, resource_size(res)
540 + allocate);
541 if (rc == 0)
542 res->flags |= DPA_RESOURCE_ADJUSTED;
543 }
544
545 if (!new_res)
546 new_res = res;
547
548 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
549 action, loc, rc);
550
551 if (rc)
552 return n;
553
554 n -= allocate;
555 if (n) {
556 /*
557 * Retry scan with newly inserted resources.
558 * For example, if we did an ALLOC_BEFORE
559 * insertion there may also have been space
560 * available for an ALLOC_AFTER insertion, so we
561 * need to check this same resource again
562 */
563 goto retry;
564 } else
565 return 0;
566 }
567
568 if (n == to_allocate)
569 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
570 return n;
571}
572
573static int merge_dpa(struct nd_region *nd_region,
574 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
575{
576 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
577 struct resource *res;
578
579 if (strncmp("pmem", label_id->id, 4) == 0)
580 return 0;
581 retry:
582 for_each_dpa_resource(ndd, res) {
583 int rc;
584 struct resource *next = res->sibling;
585 resource_size_t end = res->start + resource_size(res);
586
587 if (!next || strcmp(res->name, label_id->id) != 0
588 || strcmp(next->name, label_id->id) != 0
589 || end != next->start)
590 continue;
591 end += resource_size(next);
592 nvdimm_free_dpa(ndd, next);
593 rc = adjust_resource(res, res->start, end - res->start);
594 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
595 if (rc)
596 return rc;
597 res->flags |= DPA_RESOURCE_ADJUSTED;
598 goto retry;
599 }
600
601 return 0;
602}
603
604int __reserve_free_pmem(struct device *dev, void *data)
605{
606 struct nvdimm *nvdimm = data;
607 struct nd_region *nd_region;
608 struct nd_label_id label_id;
609 int i;
610
611 if (!is_memory(dev))
612 return 0;
613
614 nd_region = to_nd_region(dev);
615 if (nd_region->ndr_mappings == 0)
616 return 0;
617
618 memset(&label_id, 0, sizeof(label_id));
619 strcat(label_id.id, "pmem-reserve");
620 for (i = 0; i < nd_region->ndr_mappings; i++) {
621 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
622 resource_size_t n, rem = 0;
623
624 if (nd_mapping->nvdimm != nvdimm)
625 continue;
626
627 n = nd_pmem_available_dpa(nd_region, nd_mapping);
628 if (n == 0)
629 return 0;
630 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
631 dev_WARN_ONCE(&nd_region->dev, rem,
632 "pmem reserve underrun: %#llx of %#llx bytes\n",
633 (unsigned long long) n - rem,
634 (unsigned long long) n);
635 return rem ? -ENXIO : 0;
636 }
637
638 return 0;
639}
640
641void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
642 struct nd_mapping *nd_mapping)
643{
644 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
645 struct resource *res, *_res;
646
647 for_each_dpa_resource_safe(ndd, res, _res)
648 if (strcmp(res->name, "pmem-reserve") == 0)
649 nvdimm_free_dpa(ndd, res);
650}
651
652/**
653 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
654 * @nd_region: the set of dimms to allocate @n more bytes from
655 * @label_id: unique identifier for the namespace consuming this dpa range
656 * @n: number of bytes per-dimm to add to the existing allocation
657 *
658 * Assumes resources are ordered. For BLK regions, first consume
659 * BLK-only available DPA free space, then consume PMEM-aliased DPA
660 * space starting at the highest DPA. For PMEM regions start
661 * allocations from the start of an interleave set and end at the first
662 * BLK allocation or the end of the interleave set, whichever comes
663 * first.
664 */
665static int grow_dpa_allocation(struct nd_region *nd_region,
666 struct nd_label_id *label_id, resource_size_t n)
667{
668 int i;
669
670 for (i = 0; i < nd_region->ndr_mappings; i++) {
671 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
672 resource_size_t rem = n;
673 int rc;
674
675 rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
676 dev_WARN_ONCE(&nd_region->dev, rem,
677 "allocation underrun: %#llx of %#llx bytes\n",
678 (unsigned long long) n - rem,
679 (unsigned long long) n);
680 if (rem)
681 return -ENXIO;
682
683 rc = merge_dpa(nd_region, nd_mapping, label_id);
684 if (rc)
685 return rc;
686 }
687
688 return 0;
689}
690
691static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
692 struct nd_namespace_pmem *nspm, resource_size_t size)
693{
694 struct resource *res = &nspm->nsio.res;
695 resource_size_t offset = 0;
696
697 if (size && !nspm->uuid) {
698 WARN_ON_ONCE(1);
699 size = 0;
700 }
701
702 if (size && nspm->uuid) {
703 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
704 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
705 struct nd_label_id label_id;
706 struct resource *res;
707
708 if (!ndd) {
709 size = 0;
710 goto out;
711 }
712
713 nd_label_gen_id(&label_id, nspm->uuid, 0);
714
715 /* calculate a spa offset from the dpa allocation offset */
716 for_each_dpa_resource(ndd, res)
717 if (strcmp(res->name, label_id.id) == 0) {
718 offset = (res->start - nd_mapping->start)
719 * nd_region->ndr_mappings;
720 goto out;
721 }
722
723 WARN_ON_ONCE(1);
724 size = 0;
725 }
726
727 out:
728 res->start = nd_region->ndr_start + offset;
729 res->end = res->start + size - 1;
730}
731
732static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
733 const char *where)
734{
735 if (!uuid) {
736 dev_dbg(dev, "%s: uuid not set\n", where);
737 return true;
738 }
739 return false;
740}
741
742static ssize_t __size_store(struct device *dev, unsigned long long val)
743{
744 resource_size_t allocated = 0, available = 0;
745 struct nd_region *nd_region = to_nd_region(dev->parent);
746 struct nd_namespace_common *ndns = to_ndns(dev);
747 struct nd_mapping *nd_mapping;
748 struct nvdimm_drvdata *ndd;
749 struct nd_label_id label_id;
750 u32 flags = 0, remainder;
751 int rc, i, id = -1;
752 uuid_t *uuid = NULL;
753
754 if (dev->driver || ndns->claim)
755 return -EBUSY;
756
757 if (is_namespace_pmem(dev)) {
758 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
759
760 uuid = nspm->uuid;
761 id = nspm->id;
762 }
763
764 /*
765 * We need a uuid for the allocation-label and dimm(s) on which
766 * to store the label.
767 */
768 if (uuid_not_set(uuid, dev, __func__))
769 return -ENXIO;
770 if (nd_region->ndr_mappings == 0) {
771 dev_dbg(dev, "not associated with dimm(s)\n");
772 return -ENXIO;
773 }
774
775 div_u64_rem(val, nd_region->align, &remainder);
776 if (remainder) {
777 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
778 nd_region->align / SZ_1K);
779 return -EINVAL;
780 }
781
782 nd_label_gen_id(&label_id, uuid, flags);
783 for (i = 0; i < nd_region->ndr_mappings; i++) {
784 nd_mapping = &nd_region->mapping[i];
785 ndd = to_ndd(nd_mapping);
786
787 /*
788 * All dimms in an interleave set, need to be enabled
789 * for the size to be changed.
790 */
791 if (!ndd)
792 return -ENXIO;
793
794 allocated += nvdimm_allocated_dpa(ndd, &label_id);
795 }
796 available = nd_region_allocatable_dpa(nd_region);
797
798 if (val > available + allocated)
799 return -ENOSPC;
800
801 if (val == allocated)
802 return 0;
803
804 val = div_u64(val, nd_region->ndr_mappings);
805 allocated = div_u64(allocated, nd_region->ndr_mappings);
806 if (val < allocated)
807 rc = shrink_dpa_allocation(nd_region, &label_id,
808 allocated - val);
809 else
810 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
811
812 if (rc)
813 return rc;
814
815 if (is_namespace_pmem(dev)) {
816 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
817
818 nd_namespace_pmem_set_resource(nd_region, nspm,
819 val * nd_region->ndr_mappings);
820 }
821
822 /*
823 * Try to delete the namespace if we deleted all of its
824 * allocation, this is not the seed or 0th device for the
825 * region, and it is not actively claimed by a btt, pfn, or dax
826 * instance.
827 */
828 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
829 nd_device_unregister(dev, ND_ASYNC);
830
831 return rc;
832}
833
834static ssize_t size_store(struct device *dev,
835 struct device_attribute *attr, const char *buf, size_t len)
836{
837 struct nd_region *nd_region = to_nd_region(dev->parent);
838 unsigned long long val;
839 int rc;
840
841 rc = kstrtoull(buf, 0, &val);
842 if (rc)
843 return rc;
844
845 device_lock(dev);
846 nvdimm_bus_lock(dev);
847 wait_nvdimm_bus_probe_idle(dev);
848 rc = __size_store(dev, val);
849 if (rc >= 0)
850 rc = nd_namespace_label_update(nd_region, dev);
851
852 /* setting size zero == 'delete namespace' */
853 if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
854 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
855
856 kfree(nspm->uuid);
857 nspm->uuid = NULL;
858 }
859
860 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
861
862 nvdimm_bus_unlock(dev);
863 device_unlock(dev);
864
865 return rc < 0 ? rc : len;
866}
867
868resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
869{
870 struct device *dev = &ndns->dev;
871
872 if (is_namespace_pmem(dev)) {
873 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
874
875 return resource_size(&nspm->nsio.res);
876 } else if (is_namespace_io(dev)) {
877 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
878
879 return resource_size(&nsio->res);
880 } else
881 WARN_ONCE(1, "unknown namespace type\n");
882 return 0;
883}
884
885resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
886{
887 resource_size_t size;
888
889 nvdimm_bus_lock(&ndns->dev);
890 size = __nvdimm_namespace_capacity(ndns);
891 nvdimm_bus_unlock(&ndns->dev);
892
893 return size;
894}
895EXPORT_SYMBOL(nvdimm_namespace_capacity);
896
897bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
898{
899 int i;
900 bool locked = false;
901 struct device *dev = &ndns->dev;
902 struct nd_region *nd_region = to_nd_region(dev->parent);
903
904 for (i = 0; i < nd_region->ndr_mappings; i++) {
905 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
906 struct nvdimm *nvdimm = nd_mapping->nvdimm;
907
908 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
909 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
910 locked = true;
911 }
912 }
913 return locked;
914}
915EXPORT_SYMBOL(nvdimm_namespace_locked);
916
917static ssize_t size_show(struct device *dev,
918 struct device_attribute *attr, char *buf)
919{
920 return sprintf(buf, "%llu\n", (unsigned long long)
921 nvdimm_namespace_capacity(to_ndns(dev)));
922}
923static DEVICE_ATTR(size, 0444, size_show, size_store);
924
925static uuid_t *namespace_to_uuid(struct device *dev)
926{
927 if (is_namespace_pmem(dev)) {
928 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
929
930 return nspm->uuid;
931 }
932 return ERR_PTR(-ENXIO);
933}
934
935static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
936 char *buf)
937{
938 uuid_t *uuid = namespace_to_uuid(dev);
939
940 if (IS_ERR(uuid))
941 return PTR_ERR(uuid);
942 if (uuid)
943 return sprintf(buf, "%pUb\n", uuid);
944 return sprintf(buf, "\n");
945}
946
947/**
948 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
949 * @nd_region: parent region so we can updates all dimms in the set
950 * @dev: namespace type for generating label_id
951 * @new_uuid: incoming uuid
952 * @old_uuid: reference to the uuid storage location in the namespace object
953 */
954static int namespace_update_uuid(struct nd_region *nd_region,
955 struct device *dev, uuid_t *new_uuid,
956 uuid_t **old_uuid)
957{
958 struct nd_label_id old_label_id;
959 struct nd_label_id new_label_id;
960 int i;
961
962 if (!nd_is_uuid_unique(dev, new_uuid))
963 return -EINVAL;
964
965 if (*old_uuid == NULL)
966 goto out;
967
968 /*
969 * If we've already written a label with this uuid, then it's
970 * too late to rename because we can't reliably update the uuid
971 * without losing the old namespace. Userspace must delete this
972 * namespace to abandon the old uuid.
973 */
974 for (i = 0; i < nd_region->ndr_mappings; i++) {
975 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
976
977 /*
978 * This check by itself is sufficient because old_uuid
979 * would be NULL above if this uuid did not exist in the
980 * currently written set.
981 *
982 * FIXME: can we delete uuid with zero dpa allocated?
983 */
984 if (list_empty(&nd_mapping->labels))
985 return -EBUSY;
986 }
987
988 nd_label_gen_id(&old_label_id, *old_uuid, 0);
989 nd_label_gen_id(&new_label_id, new_uuid, 0);
990 for (i = 0; i < nd_region->ndr_mappings; i++) {
991 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
992 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
993 struct nd_label_ent *label_ent;
994 struct resource *res;
995
996 for_each_dpa_resource(ndd, res)
997 if (strcmp(res->name, old_label_id.id) == 0)
998 sprintf((void *) res->name, "%s",
999 new_label_id.id);
1000
1001 mutex_lock(&nd_mapping->lock);
1002 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1003 struct nd_namespace_label *nd_label = label_ent->label;
1004 struct nd_label_id label_id;
1005 uuid_t uuid;
1006
1007 if (!nd_label)
1008 continue;
1009 nsl_get_uuid(ndd, nd_label, &uuid);
1010 nd_label_gen_id(&label_id, &uuid,
1011 nsl_get_flags(ndd, nd_label));
1012 if (strcmp(old_label_id.id, label_id.id) == 0)
1013 set_bit(ND_LABEL_REAP, &label_ent->flags);
1014 }
1015 mutex_unlock(&nd_mapping->lock);
1016 }
1017 kfree(*old_uuid);
1018 out:
1019 *old_uuid = new_uuid;
1020 return 0;
1021}
1022
1023static ssize_t uuid_store(struct device *dev,
1024 struct device_attribute *attr, const char *buf, size_t len)
1025{
1026 struct nd_region *nd_region = to_nd_region(dev->parent);
1027 uuid_t *uuid = NULL;
1028 uuid_t **ns_uuid;
1029 ssize_t rc = 0;
1030
1031 if (is_namespace_pmem(dev)) {
1032 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1033
1034 ns_uuid = &nspm->uuid;
1035 } else
1036 return -ENXIO;
1037
1038 device_lock(dev);
1039 nvdimm_bus_lock(dev);
1040 wait_nvdimm_bus_probe_idle(dev);
1041 if (to_ndns(dev)->claim)
1042 rc = -EBUSY;
1043 if (rc >= 0)
1044 rc = nd_uuid_store(dev, &uuid, buf, len);
1045 if (rc >= 0)
1046 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1047 if (rc >= 0)
1048 rc = nd_namespace_label_update(nd_region, dev);
1049 else
1050 kfree(uuid);
1051 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1052 buf[len - 1] == '\n' ? "" : "\n");
1053 nvdimm_bus_unlock(dev);
1054 device_unlock(dev);
1055
1056 return rc < 0 ? rc : len;
1057}
1058static DEVICE_ATTR_RW(uuid);
1059
1060static ssize_t resource_show(struct device *dev,
1061 struct device_attribute *attr, char *buf)
1062{
1063 struct resource *res;
1064
1065 if (is_namespace_pmem(dev)) {
1066 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1067
1068 res = &nspm->nsio.res;
1069 } else if (is_namespace_io(dev)) {
1070 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1071
1072 res = &nsio->res;
1073 } else
1074 return -ENXIO;
1075
1076 /* no address to convey if the namespace has no allocation */
1077 if (resource_size(res) == 0)
1078 return -ENXIO;
1079 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1080}
1081static DEVICE_ATTR_ADMIN_RO(resource);
1082
1083static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1084
1085static ssize_t sector_size_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1087{
1088 if (is_namespace_pmem(dev)) {
1089 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1090
1091 return nd_size_select_show(nspm->lbasize,
1092 pmem_lbasize_supported, buf);
1093 }
1094 return -ENXIO;
1095}
1096
1097static ssize_t sector_size_store(struct device *dev,
1098 struct device_attribute *attr, const char *buf, size_t len)
1099{
1100 struct nd_region *nd_region = to_nd_region(dev->parent);
1101 const unsigned long *supported;
1102 unsigned long *lbasize;
1103 ssize_t rc = 0;
1104
1105 if (is_namespace_pmem(dev)) {
1106 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1107
1108 lbasize = &nspm->lbasize;
1109 supported = pmem_lbasize_supported;
1110 } else
1111 return -ENXIO;
1112
1113 device_lock(dev);
1114 nvdimm_bus_lock(dev);
1115 if (to_ndns(dev)->claim)
1116 rc = -EBUSY;
1117 if (rc >= 0)
1118 rc = nd_size_select_store(dev, buf, lbasize, supported);
1119 if (rc >= 0)
1120 rc = nd_namespace_label_update(nd_region, dev);
1121 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1122 buf, buf[len - 1] == '\n' ? "" : "\n");
1123 nvdimm_bus_unlock(dev);
1124 device_unlock(dev);
1125
1126 return rc ? rc : len;
1127}
1128static DEVICE_ATTR_RW(sector_size);
1129
1130static ssize_t dpa_extents_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1132{
1133 struct nd_region *nd_region = to_nd_region(dev->parent);
1134 struct nd_label_id label_id;
1135 uuid_t *uuid = NULL;
1136 int count = 0, i;
1137 u32 flags = 0;
1138
1139 nvdimm_bus_lock(dev);
1140 if (is_namespace_pmem(dev)) {
1141 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1142
1143 uuid = nspm->uuid;
1144 flags = 0;
1145 }
1146
1147 if (!uuid)
1148 goto out;
1149
1150 nd_label_gen_id(&label_id, uuid, flags);
1151 for (i = 0; i < nd_region->ndr_mappings; i++) {
1152 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1153 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1154 struct resource *res;
1155
1156 for_each_dpa_resource(ndd, res)
1157 if (strcmp(res->name, label_id.id) == 0)
1158 count++;
1159 }
1160 out:
1161 nvdimm_bus_unlock(dev);
1162
1163 return sprintf(buf, "%d\n", count);
1164}
1165static DEVICE_ATTR_RO(dpa_extents);
1166
1167static int btt_claim_class(struct device *dev)
1168{
1169 struct nd_region *nd_region = to_nd_region(dev->parent);
1170 int i, loop_bitmask = 0;
1171
1172 for (i = 0; i < nd_region->ndr_mappings; i++) {
1173 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1174 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1175 struct nd_namespace_index *nsindex;
1176
1177 /*
1178 * If any of the DIMMs do not support labels the only
1179 * possible BTT format is v1.
1180 */
1181 if (!ndd) {
1182 loop_bitmask = 0;
1183 break;
1184 }
1185
1186 nsindex = to_namespace_index(ndd, ndd->ns_current);
1187 if (nsindex == NULL)
1188 loop_bitmask |= 1;
1189 else {
1190 /* check whether existing labels are v1.1 or v1.2 */
1191 if (__le16_to_cpu(nsindex->major) == 1
1192 && __le16_to_cpu(nsindex->minor) == 1)
1193 loop_bitmask |= 2;
1194 else
1195 loop_bitmask |= 4;
1196 }
1197 }
1198 /*
1199 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1200 * block is found, a v1.1 label for any mapping will set bit 1, and a
1201 * v1.2 label will set bit 2.
1202 *
1203 * At the end of the loop, at most one of the three bits must be set.
1204 * If multiple bits were set, it means the different mappings disagree
1205 * about their labels, and this must be cleaned up first.
1206 *
1207 * If all the label index blocks are found to agree, nsindex of NULL
1208 * implies labels haven't been initialized yet, and when they will,
1209 * they will be of the 1.2 format, so we can assume BTT2.0
1210 *
1211 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1212 * found, we enforce BTT2.0
1213 *
1214 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1215 */
1216 switch (loop_bitmask) {
1217 case 0:
1218 case 2:
1219 return NVDIMM_CCLASS_BTT;
1220 case 1:
1221 case 4:
1222 return NVDIMM_CCLASS_BTT2;
1223 default:
1224 return -ENXIO;
1225 }
1226}
1227
1228static ssize_t holder_show(struct device *dev,
1229 struct device_attribute *attr, char *buf)
1230{
1231 struct nd_namespace_common *ndns = to_ndns(dev);
1232 ssize_t rc;
1233
1234 device_lock(dev);
1235 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1236 device_unlock(dev);
1237
1238 return rc;
1239}
1240static DEVICE_ATTR_RO(holder);
1241
1242static int __holder_class_store(struct device *dev, const char *buf)
1243{
1244 struct nd_namespace_common *ndns = to_ndns(dev);
1245
1246 if (dev->driver || ndns->claim)
1247 return -EBUSY;
1248
1249 if (sysfs_streq(buf, "btt")) {
1250 int rc = btt_claim_class(dev);
1251
1252 if (rc < NVDIMM_CCLASS_NONE)
1253 return rc;
1254 ndns->claim_class = rc;
1255 } else if (sysfs_streq(buf, "pfn"))
1256 ndns->claim_class = NVDIMM_CCLASS_PFN;
1257 else if (sysfs_streq(buf, "dax"))
1258 ndns->claim_class = NVDIMM_CCLASS_DAX;
1259 else if (sysfs_streq(buf, ""))
1260 ndns->claim_class = NVDIMM_CCLASS_NONE;
1261 else
1262 return -EINVAL;
1263
1264 return 0;
1265}
1266
1267static ssize_t holder_class_store(struct device *dev,
1268 struct device_attribute *attr, const char *buf, size_t len)
1269{
1270 struct nd_region *nd_region = to_nd_region(dev->parent);
1271 int rc;
1272
1273 device_lock(dev);
1274 nvdimm_bus_lock(dev);
1275 wait_nvdimm_bus_probe_idle(dev);
1276 rc = __holder_class_store(dev, buf);
1277 if (rc >= 0)
1278 rc = nd_namespace_label_update(nd_region, dev);
1279 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1280 nvdimm_bus_unlock(dev);
1281 device_unlock(dev);
1282
1283 return rc < 0 ? rc : len;
1284}
1285
1286static ssize_t holder_class_show(struct device *dev,
1287 struct device_attribute *attr, char *buf)
1288{
1289 struct nd_namespace_common *ndns = to_ndns(dev);
1290 ssize_t rc;
1291
1292 device_lock(dev);
1293 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1294 rc = sprintf(buf, "\n");
1295 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1296 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1297 rc = sprintf(buf, "btt\n");
1298 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1299 rc = sprintf(buf, "pfn\n");
1300 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1301 rc = sprintf(buf, "dax\n");
1302 else
1303 rc = sprintf(buf, "<unknown>\n");
1304 device_unlock(dev);
1305
1306 return rc;
1307}
1308static DEVICE_ATTR_RW(holder_class);
1309
1310static ssize_t mode_show(struct device *dev,
1311 struct device_attribute *attr, char *buf)
1312{
1313 struct nd_namespace_common *ndns = to_ndns(dev);
1314 struct device *claim;
1315 char *mode;
1316 ssize_t rc;
1317
1318 device_lock(dev);
1319 claim = ndns->claim;
1320 if (claim && is_nd_btt(claim))
1321 mode = "safe";
1322 else if (claim && is_nd_pfn(claim))
1323 mode = "memory";
1324 else if (claim && is_nd_dax(claim))
1325 mode = "dax";
1326 else if (!claim && pmem_should_map_pages(dev))
1327 mode = "memory";
1328 else
1329 mode = "raw";
1330 rc = sprintf(buf, "%s\n", mode);
1331 device_unlock(dev);
1332
1333 return rc;
1334}
1335static DEVICE_ATTR_RO(mode);
1336
1337static ssize_t force_raw_store(struct device *dev,
1338 struct device_attribute *attr, const char *buf, size_t len)
1339{
1340 bool force_raw;
1341 int rc = strtobool(buf, &force_raw);
1342
1343 if (rc)
1344 return rc;
1345
1346 to_ndns(dev)->force_raw = force_raw;
1347 return len;
1348}
1349
1350static ssize_t force_raw_show(struct device *dev,
1351 struct device_attribute *attr, char *buf)
1352{
1353 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1354}
1355static DEVICE_ATTR_RW(force_raw);
1356
1357static struct attribute *nd_namespace_attributes[] = {
1358 &dev_attr_nstype.attr,
1359 &dev_attr_size.attr,
1360 &dev_attr_mode.attr,
1361 &dev_attr_uuid.attr,
1362 &dev_attr_holder.attr,
1363 &dev_attr_resource.attr,
1364 &dev_attr_alt_name.attr,
1365 &dev_attr_force_raw.attr,
1366 &dev_attr_sector_size.attr,
1367 &dev_attr_dpa_extents.attr,
1368 &dev_attr_holder_class.attr,
1369 NULL,
1370};
1371
1372static umode_t namespace_visible(struct kobject *kobj,
1373 struct attribute *a, int n)
1374{
1375 struct device *dev = container_of(kobj, struct device, kobj);
1376
1377 if (is_namespace_pmem(dev)) {
1378 if (a == &dev_attr_size.attr)
1379 return 0644;
1380
1381 return a->mode;
1382 }
1383
1384 /* base is_namespace_io() attributes */
1385 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1386 a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1387 a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1388 a == &dev_attr_resource.attr)
1389 return a->mode;
1390
1391 return 0;
1392}
1393
1394static struct attribute_group nd_namespace_attribute_group = {
1395 .attrs = nd_namespace_attributes,
1396 .is_visible = namespace_visible,
1397};
1398
1399static const struct attribute_group *nd_namespace_attribute_groups[] = {
1400 &nd_device_attribute_group,
1401 &nd_namespace_attribute_group,
1402 &nd_numa_attribute_group,
1403 NULL,
1404};
1405
1406static const struct device_type namespace_io_device_type = {
1407 .name = "nd_namespace_io",
1408 .release = namespace_io_release,
1409 .groups = nd_namespace_attribute_groups,
1410};
1411
1412static const struct device_type namespace_pmem_device_type = {
1413 .name = "nd_namespace_pmem",
1414 .release = namespace_pmem_release,
1415 .groups = nd_namespace_attribute_groups,
1416};
1417
1418static bool is_namespace_pmem(const struct device *dev)
1419{
1420 return dev ? dev->type == &namespace_pmem_device_type : false;
1421}
1422
1423static bool is_namespace_io(const struct device *dev)
1424{
1425 return dev ? dev->type == &namespace_io_device_type : false;
1426}
1427
1428struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1429{
1430 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1431 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1432 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1433 struct nd_namespace_common *ndns = NULL;
1434 resource_size_t size;
1435
1436 if (nd_btt || nd_pfn || nd_dax) {
1437 if (nd_btt)
1438 ndns = nd_btt->ndns;
1439 else if (nd_pfn)
1440 ndns = nd_pfn->ndns;
1441 else if (nd_dax)
1442 ndns = nd_dax->nd_pfn.ndns;
1443
1444 if (!ndns)
1445 return ERR_PTR(-ENODEV);
1446
1447 /*
1448 * Flush any in-progess probes / removals in the driver
1449 * for the raw personality of this namespace.
1450 */
1451 device_lock(&ndns->dev);
1452 device_unlock(&ndns->dev);
1453 if (ndns->dev.driver) {
1454 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1455 dev_name(dev));
1456 return ERR_PTR(-EBUSY);
1457 }
1458 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1459 "host (%s) vs claim (%s) mismatch\n",
1460 dev_name(dev),
1461 dev_name(ndns->claim)))
1462 return ERR_PTR(-ENXIO);
1463 } else {
1464 ndns = to_ndns(dev);
1465 if (ndns->claim) {
1466 dev_dbg(dev, "claimed by %s, failing probe\n",
1467 dev_name(ndns->claim));
1468
1469 return ERR_PTR(-ENXIO);
1470 }
1471 }
1472
1473 if (nvdimm_namespace_locked(ndns))
1474 return ERR_PTR(-EACCES);
1475
1476 size = nvdimm_namespace_capacity(ndns);
1477 if (size < ND_MIN_NAMESPACE_SIZE) {
1478 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1479 &size, ND_MIN_NAMESPACE_SIZE);
1480 return ERR_PTR(-ENODEV);
1481 }
1482
1483 /*
1484 * Note, alignment validation for fsdax and devdax mode
1485 * namespaces happens in nd_pfn_validate() where infoblock
1486 * padding parameters can be applied.
1487 */
1488 if (pmem_should_map_pages(dev)) {
1489 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1490 struct resource *res = &nsio->res;
1491
1492 if (!IS_ALIGNED(res->start | (res->end + 1),
1493 memremap_compat_align())) {
1494 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1495 return ERR_PTR(-EOPNOTSUPP);
1496 }
1497 }
1498
1499 if (is_namespace_pmem(&ndns->dev)) {
1500 struct nd_namespace_pmem *nspm;
1501
1502 nspm = to_nd_namespace_pmem(&ndns->dev);
1503 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1504 return ERR_PTR(-ENODEV);
1505 }
1506
1507 return ndns;
1508}
1509EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1510
1511int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1512 resource_size_t size)
1513{
1514 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1515}
1516EXPORT_SYMBOL_GPL(devm_namespace_enable);
1517
1518void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1519{
1520 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1521}
1522EXPORT_SYMBOL_GPL(devm_namespace_disable);
1523
1524static struct device **create_namespace_io(struct nd_region *nd_region)
1525{
1526 struct nd_namespace_io *nsio;
1527 struct device *dev, **devs;
1528 struct resource *res;
1529
1530 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1531 if (!nsio)
1532 return NULL;
1533
1534 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1535 if (!devs) {
1536 kfree(nsio);
1537 return NULL;
1538 }
1539
1540 dev = &nsio->common.dev;
1541 dev->type = &namespace_io_device_type;
1542 dev->parent = &nd_region->dev;
1543 res = &nsio->res;
1544 res->name = dev_name(&nd_region->dev);
1545 res->flags = IORESOURCE_MEM;
1546 res->start = nd_region->ndr_start;
1547 res->end = res->start + nd_region->ndr_size - 1;
1548
1549 devs[0] = dev;
1550 return devs;
1551}
1552
1553static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1554 u64 cookie, u16 pos)
1555{
1556 struct nd_namespace_label *found = NULL;
1557 int i;
1558
1559 for (i = 0; i < nd_region->ndr_mappings; i++) {
1560 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1561 struct nd_interleave_set *nd_set = nd_region->nd_set;
1562 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1563 struct nd_label_ent *label_ent;
1564 bool found_uuid = false;
1565
1566 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1567 struct nd_namespace_label *nd_label = label_ent->label;
1568 u16 position;
1569
1570 if (!nd_label)
1571 continue;
1572 position = nsl_get_position(ndd, nd_label);
1573
1574 if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1575 continue;
1576
1577 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1578 continue;
1579
1580 if (!nsl_validate_type_guid(ndd, nd_label,
1581 &nd_set->type_guid))
1582 continue;
1583
1584 if (found_uuid) {
1585 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1586 return false;
1587 }
1588 found_uuid = true;
1589 if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1590 continue;
1591 if (position != pos)
1592 continue;
1593 found = nd_label;
1594 break;
1595 }
1596 if (found)
1597 break;
1598 }
1599 return found != NULL;
1600}
1601
1602static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1603{
1604 int i;
1605
1606 if (!pmem_id)
1607 return -ENODEV;
1608
1609 for (i = 0; i < nd_region->ndr_mappings; i++) {
1610 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1611 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1612 struct nd_namespace_label *nd_label = NULL;
1613 u64 hw_start, hw_end, pmem_start, pmem_end;
1614 struct nd_label_ent *label_ent;
1615
1616 lockdep_assert_held(&nd_mapping->lock);
1617 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1618 nd_label = label_ent->label;
1619 if (!nd_label)
1620 continue;
1621 if (nsl_uuid_equal(ndd, nd_label, pmem_id))
1622 break;
1623 nd_label = NULL;
1624 }
1625
1626 if (!nd_label) {
1627 WARN_ON(1);
1628 return -EINVAL;
1629 }
1630
1631 /*
1632 * Check that this label is compliant with the dpa
1633 * range published in NFIT
1634 */
1635 hw_start = nd_mapping->start;
1636 hw_end = hw_start + nd_mapping->size;
1637 pmem_start = nsl_get_dpa(ndd, nd_label);
1638 pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1639 if (pmem_start >= hw_start && pmem_start < hw_end
1640 && pmem_end <= hw_end && pmem_end > hw_start)
1641 /* pass */;
1642 else {
1643 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1644 dev_name(ndd->dev),
1645 nsl_uuid_raw(ndd, nd_label));
1646 return -EINVAL;
1647 }
1648
1649 /* move recently validated label to the front of the list */
1650 list_move(&label_ent->list, &nd_mapping->labels);
1651 }
1652 return 0;
1653}
1654
1655/**
1656 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1657 * @nd_region: region with mappings to validate
1658 * @nspm: target namespace to create
1659 * @nd_label: target pmem namespace label to evaluate
1660 */
1661static struct device *create_namespace_pmem(struct nd_region *nd_region,
1662 struct nd_mapping *nd_mapping,
1663 struct nd_namespace_label *nd_label)
1664{
1665 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1666 struct nd_namespace_index *nsindex =
1667 to_namespace_index(ndd, ndd->ns_current);
1668 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1669 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1670 struct nd_label_ent *label_ent;
1671 struct nd_namespace_pmem *nspm;
1672 resource_size_t size = 0;
1673 struct resource *res;
1674 struct device *dev;
1675 uuid_t uuid;
1676 int rc = 0;
1677 u16 i;
1678
1679 if (cookie == 0) {
1680 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1681 return ERR_PTR(-ENXIO);
1682 }
1683
1684 if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1685 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1686 nsl_uuid_raw(ndd, nd_label));
1687 if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1688 return ERR_PTR(-EAGAIN);
1689
1690 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1691 nsl_uuid_raw(ndd, nd_label));
1692 }
1693
1694 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1695 if (!nspm)
1696 return ERR_PTR(-ENOMEM);
1697
1698 nspm->id = -1;
1699 dev = &nspm->nsio.common.dev;
1700 dev->type = &namespace_pmem_device_type;
1701 dev->parent = &nd_region->dev;
1702 res = &nspm->nsio.res;
1703 res->name = dev_name(&nd_region->dev);
1704 res->flags = IORESOURCE_MEM;
1705
1706 for (i = 0; i < nd_region->ndr_mappings; i++) {
1707 nsl_get_uuid(ndd, nd_label, &uuid);
1708 if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1709 continue;
1710 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1711 continue;
1712 break;
1713 }
1714
1715 if (i < nd_region->ndr_mappings) {
1716 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1717
1718 /*
1719 * Give up if we don't find an instance of a uuid at each
1720 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1721 * find a dimm with two instances of the same uuid.
1722 */
1723 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1724 nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
1725 rc = -EINVAL;
1726 goto err;
1727 }
1728
1729 /*
1730 * Fix up each mapping's 'labels' to have the validated pmem label for
1731 * that position at labels[0], and NULL at labels[1]. In the process,
1732 * check that the namespace aligns with interleave-set.
1733 */
1734 nsl_get_uuid(ndd, nd_label, &uuid);
1735 rc = select_pmem_id(nd_region, &uuid);
1736 if (rc)
1737 goto err;
1738
1739 /* Calculate total size and populate namespace properties from label0 */
1740 for (i = 0; i < nd_region->ndr_mappings; i++) {
1741 struct nd_namespace_label *label0;
1742 struct nvdimm_drvdata *ndd;
1743
1744 nd_mapping = &nd_region->mapping[i];
1745 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1746 typeof(*label_ent), list);
1747 label0 = label_ent ? label_ent->label : NULL;
1748
1749 if (!label0) {
1750 WARN_ON(1);
1751 continue;
1752 }
1753
1754 ndd = to_ndd(nd_mapping);
1755 size += nsl_get_rawsize(ndd, label0);
1756 if (nsl_get_position(ndd, label0) != 0)
1757 continue;
1758 WARN_ON(nspm->alt_name || nspm->uuid);
1759 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
1760 NSLABEL_NAME_LEN, GFP_KERNEL);
1761 nsl_get_uuid(ndd, label0, &uuid);
1762 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1763 nspm->lbasize = nsl_get_lbasize(ndd, label0);
1764 nspm->nsio.common.claim_class =
1765 nsl_get_claim_class(ndd, label0);
1766 }
1767
1768 if (!nspm->alt_name || !nspm->uuid) {
1769 rc = -ENOMEM;
1770 goto err;
1771 }
1772
1773 nd_namespace_pmem_set_resource(nd_region, nspm, size);
1774
1775 return dev;
1776 err:
1777 namespace_pmem_release(dev);
1778 switch (rc) {
1779 case -EINVAL:
1780 dev_dbg(&nd_region->dev, "invalid label(s)\n");
1781 break;
1782 case -ENODEV:
1783 dev_dbg(&nd_region->dev, "label not found\n");
1784 break;
1785 default:
1786 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
1787 break;
1788 }
1789 return ERR_PTR(rc);
1790}
1791
1792static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
1793{
1794 struct nd_namespace_pmem *nspm;
1795 struct resource *res;
1796 struct device *dev;
1797
1798 if (!is_memory(&nd_region->dev))
1799 return NULL;
1800
1801 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1802 if (!nspm)
1803 return NULL;
1804
1805 dev = &nspm->nsio.common.dev;
1806 dev->type = &namespace_pmem_device_type;
1807 dev->parent = &nd_region->dev;
1808 res = &nspm->nsio.res;
1809 res->name = dev_name(&nd_region->dev);
1810 res->flags = IORESOURCE_MEM;
1811
1812 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1813 if (nspm->id < 0) {
1814 kfree(nspm);
1815 return NULL;
1816 }
1817 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
1818 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1819
1820 return dev;
1821}
1822
1823static struct lock_class_key nvdimm_namespace_key;
1824
1825void nd_region_create_ns_seed(struct nd_region *nd_region)
1826{
1827 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1828
1829 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
1830 return;
1831
1832 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1833
1834 /*
1835 * Seed creation failures are not fatal, provisioning is simply
1836 * disabled until memory becomes available
1837 */
1838 if (!nd_region->ns_seed)
1839 dev_err(&nd_region->dev, "failed to create namespace\n");
1840 else {
1841 device_initialize(nd_region->ns_seed);
1842 lockdep_set_class(&nd_region->ns_seed->mutex,
1843 &nvdimm_namespace_key);
1844 nd_device_register(nd_region->ns_seed);
1845 }
1846}
1847
1848void nd_region_create_dax_seed(struct nd_region *nd_region)
1849{
1850 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1851 nd_region->dax_seed = nd_dax_create(nd_region);
1852 /*
1853 * Seed creation failures are not fatal, provisioning is simply
1854 * disabled until memory becomes available
1855 */
1856 if (!nd_region->dax_seed)
1857 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1858}
1859
1860void nd_region_create_pfn_seed(struct nd_region *nd_region)
1861{
1862 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1863 nd_region->pfn_seed = nd_pfn_create(nd_region);
1864 /*
1865 * Seed creation failures are not fatal, provisioning is simply
1866 * disabled until memory becomes available
1867 */
1868 if (!nd_region->pfn_seed)
1869 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1870}
1871
1872void nd_region_create_btt_seed(struct nd_region *nd_region)
1873{
1874 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1875 nd_region->btt_seed = nd_btt_create(nd_region);
1876 /*
1877 * Seed creation failures are not fatal, provisioning is simply
1878 * disabled until memory becomes available
1879 */
1880 if (!nd_region->btt_seed)
1881 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1882}
1883
1884static int add_namespace_resource(struct nd_region *nd_region,
1885 struct nd_namespace_label *nd_label, struct device **devs,
1886 int count)
1887{
1888 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1889 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1890 int i;
1891
1892 for (i = 0; i < count; i++) {
1893 uuid_t *uuid = namespace_to_uuid(devs[i]);
1894
1895 if (IS_ERR(uuid)) {
1896 WARN_ON(1);
1897 continue;
1898 }
1899
1900 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1901 continue;
1902 dev_err(&nd_region->dev,
1903 "error: conflicting extents for uuid: %pUb\n", uuid);
1904 return -ENXIO;
1905 }
1906
1907 return i;
1908}
1909
1910static int cmp_dpa(const void *a, const void *b)
1911{
1912 const struct device *dev_a = *(const struct device **) a;
1913 const struct device *dev_b = *(const struct device **) b;
1914 struct nd_namespace_pmem *nspm_a, *nspm_b;
1915
1916 if (is_namespace_io(dev_a))
1917 return 0;
1918
1919 nspm_a = to_nd_namespace_pmem(dev_a);
1920 nspm_b = to_nd_namespace_pmem(dev_b);
1921
1922 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
1923 sizeof(resource_size_t));
1924}
1925
1926static struct device **scan_labels(struct nd_region *nd_region)
1927{
1928 int i, count = 0;
1929 struct device *dev, **devs = NULL;
1930 struct nd_label_ent *label_ent, *e;
1931 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1932 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1933 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1934
1935 /* "safe" because create_namespace_pmem() might list_move() label_ent */
1936 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1937 struct nd_namespace_label *nd_label = label_ent->label;
1938 struct device **__devs;
1939
1940 if (!nd_label)
1941 continue;
1942
1943 /* skip labels that describe extents outside of the region */
1944 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
1945 nsl_get_dpa(ndd, nd_label) > map_end)
1946 continue;
1947
1948 i = add_namespace_resource(nd_region, nd_label, devs, count);
1949 if (i < 0)
1950 goto err;
1951 if (i < count)
1952 continue;
1953 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1954 if (!__devs)
1955 goto err;
1956 memcpy(__devs, devs, sizeof(dev) * count);
1957 kfree(devs);
1958 devs = __devs;
1959
1960 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1961 if (IS_ERR(dev)) {
1962 switch (PTR_ERR(dev)) {
1963 case -EAGAIN:
1964 /* skip invalid labels */
1965 continue;
1966 case -ENODEV:
1967 /* fallthrough to seed creation */
1968 break;
1969 default:
1970 goto err;
1971 }
1972 } else
1973 devs[count++] = dev;
1974
1975 }
1976
1977 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
1978 count == 1 ? "" : "s");
1979
1980 if (count == 0) {
1981 struct nd_namespace_pmem *nspm;
1982
1983 /* Publish a zero-sized namespace for userspace to configure. */
1984 nd_mapping_free_labels(nd_mapping);
1985
1986 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1987 if (!devs)
1988 goto err;
1989
1990 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1991 if (!nspm)
1992 goto err;
1993 dev = &nspm->nsio.common.dev;
1994 dev->type = &namespace_pmem_device_type;
1995 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1996 dev->parent = &nd_region->dev;
1997 devs[count++] = dev;
1998 } else if (is_memory(&nd_region->dev)) {
1999 /* clean unselected labels */
2000 for (i = 0; i < nd_region->ndr_mappings; i++) {
2001 struct list_head *l, *e;
2002 LIST_HEAD(list);
2003 int j;
2004
2005 nd_mapping = &nd_region->mapping[i];
2006 if (list_empty(&nd_mapping->labels)) {
2007 WARN_ON(1);
2008 continue;
2009 }
2010
2011 j = count;
2012 list_for_each_safe(l, e, &nd_mapping->labels) {
2013 if (!j--)
2014 break;
2015 list_move_tail(l, &list);
2016 }
2017 nd_mapping_free_labels(nd_mapping);
2018 list_splice_init(&list, &nd_mapping->labels);
2019 }
2020 }
2021
2022 if (count > 1)
2023 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2024
2025 return devs;
2026
2027 err:
2028 if (devs) {
2029 for (i = 0; devs[i]; i++)
2030 namespace_pmem_release(devs[i]);
2031 kfree(devs);
2032 }
2033 return NULL;
2034}
2035
2036static struct device **create_namespaces(struct nd_region *nd_region)
2037{
2038 struct nd_mapping *nd_mapping;
2039 struct device **devs;
2040 int i;
2041
2042 if (nd_region->ndr_mappings == 0)
2043 return NULL;
2044
2045 /* lock down all mappings while we scan labels */
2046 for (i = 0; i < nd_region->ndr_mappings; i++) {
2047 nd_mapping = &nd_region->mapping[i];
2048 mutex_lock_nested(&nd_mapping->lock, i);
2049 }
2050
2051 devs = scan_labels(nd_region);
2052
2053 for (i = 0; i < nd_region->ndr_mappings; i++) {
2054 int reverse = nd_region->ndr_mappings - 1 - i;
2055
2056 nd_mapping = &nd_region->mapping[reverse];
2057 mutex_unlock(&nd_mapping->lock);
2058 }
2059
2060 return devs;
2061}
2062
2063static void deactivate_labels(void *region)
2064{
2065 struct nd_region *nd_region = region;
2066 int i;
2067
2068 for (i = 0; i < nd_region->ndr_mappings; i++) {
2069 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2070 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2071 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2072
2073 mutex_lock(&nd_mapping->lock);
2074 nd_mapping_free_labels(nd_mapping);
2075 mutex_unlock(&nd_mapping->lock);
2076
2077 put_ndd(ndd);
2078 nd_mapping->ndd = NULL;
2079 if (ndd)
2080 atomic_dec(&nvdimm->busy);
2081 }
2082}
2083
2084static int init_active_labels(struct nd_region *nd_region)
2085{
2086 int i, rc = 0;
2087
2088 for (i = 0; i < nd_region->ndr_mappings; i++) {
2089 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2090 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2091 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2092 struct nd_label_ent *label_ent;
2093 int count, j;
2094
2095 /*
2096 * If the dimm is disabled then we may need to prevent
2097 * the region from being activated.
2098 */
2099 if (!ndd) {
2100 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2101 /* fail, label data may be unreadable */;
2102 else if (test_bit(NDD_LABELING, &nvdimm->flags))
2103 /* fail, labels needed to disambiguate dpa */;
2104 else
2105 continue;
2106
2107 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2108 dev_name(&nd_mapping->nvdimm->dev),
2109 test_bit(NDD_LOCKED, &nvdimm->flags)
2110 ? "locked" : "disabled");
2111 rc = -ENXIO;
2112 goto out;
2113 }
2114 nd_mapping->ndd = ndd;
2115 atomic_inc(&nvdimm->busy);
2116 get_ndd(ndd);
2117
2118 count = nd_label_active_count(ndd);
2119 dev_dbg(ndd->dev, "count: %d\n", count);
2120 if (!count)
2121 continue;
2122 for (j = 0; j < count; j++) {
2123 struct nd_namespace_label *label;
2124
2125 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2126 if (!label_ent)
2127 break;
2128 label = nd_label_active(ndd, j);
2129 label_ent->label = label;
2130
2131 mutex_lock(&nd_mapping->lock);
2132 list_add_tail(&label_ent->list, &nd_mapping->labels);
2133 mutex_unlock(&nd_mapping->lock);
2134 }
2135
2136 if (j < count)
2137 break;
2138 }
2139
2140 if (i < nd_region->ndr_mappings)
2141 rc = -ENOMEM;
2142
2143out:
2144 if (rc) {
2145 deactivate_labels(nd_region);
2146 return rc;
2147 }
2148
2149 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2150 nd_region);
2151}
2152
2153int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2154{
2155 struct device **devs = NULL;
2156 int i, rc = 0, type;
2157
2158 *err = 0;
2159 nvdimm_bus_lock(&nd_region->dev);
2160 rc = init_active_labels(nd_region);
2161 if (rc) {
2162 nvdimm_bus_unlock(&nd_region->dev);
2163 return rc;
2164 }
2165
2166 type = nd_region_to_nstype(nd_region);
2167 switch (type) {
2168 case ND_DEVICE_NAMESPACE_IO:
2169 devs = create_namespace_io(nd_region);
2170 break;
2171 case ND_DEVICE_NAMESPACE_PMEM:
2172 devs = create_namespaces(nd_region);
2173 break;
2174 default:
2175 break;
2176 }
2177 nvdimm_bus_unlock(&nd_region->dev);
2178
2179 if (!devs)
2180 return -ENODEV;
2181
2182 for (i = 0; devs[i]; i++) {
2183 struct device *dev = devs[i];
2184 int id;
2185
2186 if (type == ND_DEVICE_NAMESPACE_PMEM) {
2187 struct nd_namespace_pmem *nspm;
2188
2189 nspm = to_nd_namespace_pmem(dev);
2190 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2191 GFP_KERNEL);
2192 nspm->id = id;
2193 } else
2194 id = i;
2195
2196 if (id < 0)
2197 break;
2198 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2199 device_initialize(dev);
2200 lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
2201 nd_device_register(dev);
2202 }
2203 if (i)
2204 nd_region->ns_seed = devs[0];
2205
2206 if (devs[i]) {
2207 int j;
2208
2209 for (j = i; devs[j]; j++) {
2210 struct device *dev = devs[j];
2211
2212 device_initialize(dev);
2213 put_device(dev);
2214 }
2215 *err = j - i;
2216 /*
2217 * All of the namespaces we tried to register failed, so
2218 * fail region activation.
2219 */
2220 if (*err == 0)
2221 rc = -ENODEV;
2222 }
2223 kfree(devs);
2224
2225 if (rc == -ENODEV)
2226 return rc;
2227
2228 return i;
2229}
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/pmem.h>
17#include <linux/nd.h>
18#include "nd-core.h"
19#include "nd.h"
20
21static void namespace_io_release(struct device *dev)
22{
23 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
24
25 kfree(nsio);
26}
27
28static void namespace_pmem_release(struct device *dev)
29{
30 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
31
32 kfree(nspm->alt_name);
33 kfree(nspm->uuid);
34 kfree(nspm);
35}
36
37static void namespace_blk_release(struct device *dev)
38{
39 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
40 struct nd_region *nd_region = to_nd_region(dev->parent);
41
42 if (nsblk->id >= 0)
43 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
44 kfree(nsblk->alt_name);
45 kfree(nsblk->uuid);
46 kfree(nsblk->res);
47 kfree(nsblk);
48}
49
50static struct device_type namespace_io_device_type = {
51 .name = "nd_namespace_io",
52 .release = namespace_io_release,
53};
54
55static struct device_type namespace_pmem_device_type = {
56 .name = "nd_namespace_pmem",
57 .release = namespace_pmem_release,
58};
59
60static struct device_type namespace_blk_device_type = {
61 .name = "nd_namespace_blk",
62 .release = namespace_blk_release,
63};
64
65static bool is_namespace_pmem(struct device *dev)
66{
67 return dev ? dev->type == &namespace_pmem_device_type : false;
68}
69
70static bool is_namespace_blk(struct device *dev)
71{
72 return dev ? dev->type == &namespace_blk_device_type : false;
73}
74
75static bool is_namespace_io(struct device *dev)
76{
77 return dev ? dev->type == &namespace_io_device_type : false;
78}
79
80static int is_uuid_busy(struct device *dev, void *data)
81{
82 u8 *uuid1 = data, *uuid2 = NULL;
83
84 if (is_namespace_pmem(dev)) {
85 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
86
87 uuid2 = nspm->uuid;
88 } else if (is_namespace_blk(dev)) {
89 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
90
91 uuid2 = nsblk->uuid;
92 } else if (is_nd_btt(dev)) {
93 struct nd_btt *nd_btt = to_nd_btt(dev);
94
95 uuid2 = nd_btt->uuid;
96 } else if (is_nd_pfn(dev)) {
97 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
98
99 uuid2 = nd_pfn->uuid;
100 }
101
102 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
103 return -EBUSY;
104
105 return 0;
106}
107
108static int is_namespace_uuid_busy(struct device *dev, void *data)
109{
110 if (is_nd_pmem(dev) || is_nd_blk(dev))
111 return device_for_each_child(dev, data, is_uuid_busy);
112 return 0;
113}
114
115/**
116 * nd_is_uuid_unique - verify that no other namespace has @uuid
117 * @dev: any device on a nvdimm_bus
118 * @uuid: uuid to check
119 */
120bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
121{
122 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
123
124 if (!nvdimm_bus)
125 return false;
126 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
127 if (device_for_each_child(&nvdimm_bus->dev, uuid,
128 is_namespace_uuid_busy) != 0)
129 return false;
130 return true;
131}
132
133bool pmem_should_map_pages(struct device *dev)
134{
135 struct nd_region *nd_region = to_nd_region(dev->parent);
136 struct nd_namespace_io *nsio;
137
138 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
139 return false;
140
141 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
142 return false;
143
144 if (is_nd_pfn(dev) || is_nd_btt(dev))
145 return false;
146
147 nsio = to_nd_namespace_io(dev);
148 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
149 IORESOURCE_SYSTEM_RAM,
150 IORES_DESC_NONE) == REGION_MIXED)
151 return false;
152
153#ifdef ARCH_MEMREMAP_PMEM
154 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
155#else
156 return false;
157#endif
158}
159EXPORT_SYMBOL(pmem_should_map_pages);
160
161const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
162 char *name)
163{
164 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
165 const char *suffix = NULL;
166
167 if (ndns->claim && is_nd_btt(ndns->claim))
168 suffix = "s";
169
170 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
171 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
172 } else if (is_namespace_blk(&ndns->dev)) {
173 struct nd_namespace_blk *nsblk;
174
175 nsblk = to_nd_namespace_blk(&ndns->dev);
176 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
177 suffix ? suffix : "");
178 } else {
179 return NULL;
180 }
181
182 return name;
183}
184EXPORT_SYMBOL(nvdimm_namespace_disk_name);
185
186const u8 *nd_dev_to_uuid(struct device *dev)
187{
188 static const u8 null_uuid[16];
189
190 if (!dev)
191 return null_uuid;
192
193 if (is_namespace_pmem(dev)) {
194 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
195
196 return nspm->uuid;
197 } else if (is_namespace_blk(dev)) {
198 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
199
200 return nsblk->uuid;
201 } else
202 return null_uuid;
203}
204EXPORT_SYMBOL(nd_dev_to_uuid);
205
206static ssize_t nstype_show(struct device *dev,
207 struct device_attribute *attr, char *buf)
208{
209 struct nd_region *nd_region = to_nd_region(dev->parent);
210
211 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
212}
213static DEVICE_ATTR_RO(nstype);
214
215static ssize_t __alt_name_store(struct device *dev, const char *buf,
216 const size_t len)
217{
218 char *input, *pos, *alt_name, **ns_altname;
219 ssize_t rc;
220
221 if (is_namespace_pmem(dev)) {
222 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
223
224 ns_altname = &nspm->alt_name;
225 } else if (is_namespace_blk(dev)) {
226 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
227
228 ns_altname = &nsblk->alt_name;
229 } else
230 return -ENXIO;
231
232 if (dev->driver || to_ndns(dev)->claim)
233 return -EBUSY;
234
235 input = kmemdup(buf, len + 1, GFP_KERNEL);
236 if (!input)
237 return -ENOMEM;
238
239 input[len] = '\0';
240 pos = strim(input);
241 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
242 rc = -EINVAL;
243 goto out;
244 }
245
246 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
247 if (!alt_name) {
248 rc = -ENOMEM;
249 goto out;
250 }
251 kfree(*ns_altname);
252 *ns_altname = alt_name;
253 sprintf(*ns_altname, "%s", pos);
254 rc = len;
255
256out:
257 kfree(input);
258 return rc;
259}
260
261static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
262{
263 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
264 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
265 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
266 struct nd_label_id label_id;
267 resource_size_t size = 0;
268 struct resource *res;
269
270 if (!nsblk->uuid)
271 return 0;
272 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
273 for_each_dpa_resource(ndd, res)
274 if (strcmp(res->name, label_id.id) == 0)
275 size += resource_size(res);
276 return size;
277}
278
279static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
280{
281 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
282 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
283 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
284 struct nd_label_id label_id;
285 struct resource *res;
286 int count, i;
287
288 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
289 return false;
290
291 count = 0;
292 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
293 for_each_dpa_resource(ndd, res) {
294 if (strcmp(res->name, label_id.id) != 0)
295 continue;
296 /*
297 * Resources with unacknoweldged adjustments indicate a
298 * failure to update labels
299 */
300 if (res->flags & DPA_RESOURCE_ADJUSTED)
301 return false;
302 count++;
303 }
304
305 /* These values match after a successful label update */
306 if (count != nsblk->num_resources)
307 return false;
308
309 for (i = 0; i < nsblk->num_resources; i++) {
310 struct resource *found = NULL;
311
312 for_each_dpa_resource(ndd, res)
313 if (res == nsblk->res[i]) {
314 found = res;
315 break;
316 }
317 /* stale resource */
318 if (!found)
319 return false;
320 }
321
322 return true;
323}
324
325resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
326{
327 resource_size_t size;
328
329 nvdimm_bus_lock(&nsblk->common.dev);
330 size = __nd_namespace_blk_validate(nsblk);
331 nvdimm_bus_unlock(&nsblk->common.dev);
332
333 return size;
334}
335EXPORT_SYMBOL(nd_namespace_blk_validate);
336
337
338static int nd_namespace_label_update(struct nd_region *nd_region,
339 struct device *dev)
340{
341 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
342 "namespace must be idle during label update\n");
343 if (dev->driver || to_ndns(dev)->claim)
344 return 0;
345
346 /*
347 * Only allow label writes that will result in a valid namespace
348 * or deletion of an existing namespace.
349 */
350 if (is_namespace_pmem(dev)) {
351 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
352 resource_size_t size = resource_size(&nspm->nsio.res);
353
354 if (size == 0 && nspm->uuid)
355 /* delete allocation */;
356 else if (!nspm->uuid)
357 return 0;
358
359 return nd_pmem_namespace_label_update(nd_region, nspm, size);
360 } else if (is_namespace_blk(dev)) {
361 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
362 resource_size_t size = nd_namespace_blk_size(nsblk);
363
364 if (size == 0 && nsblk->uuid)
365 /* delete allocation */;
366 else if (!nsblk->uuid || !nsblk->lbasize)
367 return 0;
368
369 return nd_blk_namespace_label_update(nd_region, nsblk, size);
370 } else
371 return -ENXIO;
372}
373
374static ssize_t alt_name_store(struct device *dev,
375 struct device_attribute *attr, const char *buf, size_t len)
376{
377 struct nd_region *nd_region = to_nd_region(dev->parent);
378 ssize_t rc;
379
380 device_lock(dev);
381 nvdimm_bus_lock(dev);
382 wait_nvdimm_bus_probe_idle(dev);
383 rc = __alt_name_store(dev, buf, len);
384 if (rc >= 0)
385 rc = nd_namespace_label_update(nd_region, dev);
386 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
387 nvdimm_bus_unlock(dev);
388 device_unlock(dev);
389
390 return rc < 0 ? rc : len;
391}
392
393static ssize_t alt_name_show(struct device *dev,
394 struct device_attribute *attr, char *buf)
395{
396 char *ns_altname;
397
398 if (is_namespace_pmem(dev)) {
399 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
400
401 ns_altname = nspm->alt_name;
402 } else if (is_namespace_blk(dev)) {
403 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
404
405 ns_altname = nsblk->alt_name;
406 } else
407 return -ENXIO;
408
409 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
410}
411static DEVICE_ATTR_RW(alt_name);
412
413static int scan_free(struct nd_region *nd_region,
414 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
415 resource_size_t n)
416{
417 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
418 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
419 int rc = 0;
420
421 while (n) {
422 struct resource *res, *last;
423 resource_size_t new_start;
424
425 last = NULL;
426 for_each_dpa_resource(ndd, res)
427 if (strcmp(res->name, label_id->id) == 0)
428 last = res;
429 res = last;
430 if (!res)
431 return 0;
432
433 if (n >= resource_size(res)) {
434 n -= resource_size(res);
435 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
436 nvdimm_free_dpa(ndd, res);
437 /* retry with last resource deleted */
438 continue;
439 }
440
441 /*
442 * Keep BLK allocations relegated to high DPA as much as
443 * possible
444 */
445 if (is_blk)
446 new_start = res->start + n;
447 else
448 new_start = res->start;
449
450 rc = adjust_resource(res, new_start, resource_size(res) - n);
451 if (rc == 0)
452 res->flags |= DPA_RESOURCE_ADJUSTED;
453 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
454 break;
455 }
456
457 return rc;
458}
459
460/**
461 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
462 * @nd_region: the set of dimms to reclaim @n bytes from
463 * @label_id: unique identifier for the namespace consuming this dpa range
464 * @n: number of bytes per-dimm to release
465 *
466 * Assumes resources are ordered. Starting from the end try to
467 * adjust_resource() the allocation to @n, but if @n is larger than the
468 * allocation delete it and find the 'new' last allocation in the label
469 * set.
470 */
471static int shrink_dpa_allocation(struct nd_region *nd_region,
472 struct nd_label_id *label_id, resource_size_t n)
473{
474 int i;
475
476 for (i = 0; i < nd_region->ndr_mappings; i++) {
477 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
478 int rc;
479
480 rc = scan_free(nd_region, nd_mapping, label_id, n);
481 if (rc)
482 return rc;
483 }
484
485 return 0;
486}
487
488static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
489 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
490 resource_size_t n)
491{
492 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
493 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
494 resource_size_t first_dpa;
495 struct resource *res;
496 int rc = 0;
497
498 /* allocate blk from highest dpa first */
499 if (is_blk)
500 first_dpa = nd_mapping->start + nd_mapping->size - n;
501 else
502 first_dpa = nd_mapping->start;
503
504 /* first resource allocation for this label-id or dimm */
505 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
506 if (!res)
507 rc = -EBUSY;
508
509 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
510 return rc ? n : 0;
511}
512
513static bool space_valid(bool is_pmem, bool is_reserve,
514 struct nd_label_id *label_id, struct resource *res)
515{
516 /*
517 * For BLK-space any space is valid, for PMEM-space, it must be
518 * contiguous with an existing allocation unless we are
519 * reserving pmem.
520 */
521 if (is_reserve || !is_pmem)
522 return true;
523 if (!res || strcmp(res->name, label_id->id) == 0)
524 return true;
525 return false;
526}
527
528enum alloc_loc {
529 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
530};
531
532static resource_size_t scan_allocate(struct nd_region *nd_region,
533 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
534 resource_size_t n)
535{
536 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
537 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
538 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
539 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
540 const resource_size_t to_allocate = n;
541 struct resource *res;
542 int first;
543
544 retry:
545 first = 0;
546 for_each_dpa_resource(ndd, res) {
547 resource_size_t allocate, available = 0, free_start, free_end;
548 struct resource *next = res->sibling, *new_res = NULL;
549 enum alloc_loc loc = ALLOC_ERR;
550 const char *action;
551 int rc = 0;
552
553 /* ignore resources outside this nd_mapping */
554 if (res->start > mapping_end)
555 continue;
556 if (res->end < nd_mapping->start)
557 continue;
558
559 /* space at the beginning of the mapping */
560 if (!first++ && res->start > nd_mapping->start) {
561 free_start = nd_mapping->start;
562 available = res->start - free_start;
563 if (space_valid(is_pmem, is_reserve, label_id, NULL))
564 loc = ALLOC_BEFORE;
565 }
566
567 /* space between allocations */
568 if (!loc && next) {
569 free_start = res->start + resource_size(res);
570 free_end = min(mapping_end, next->start - 1);
571 if (space_valid(is_pmem, is_reserve, label_id, res)
572 && free_start < free_end) {
573 available = free_end + 1 - free_start;
574 loc = ALLOC_MID;
575 }
576 }
577
578 /* space at the end of the mapping */
579 if (!loc && !next) {
580 free_start = res->start + resource_size(res);
581 free_end = mapping_end;
582 if (space_valid(is_pmem, is_reserve, label_id, res)
583 && free_start < free_end) {
584 available = free_end + 1 - free_start;
585 loc = ALLOC_AFTER;
586 }
587 }
588
589 if (!loc || !available)
590 continue;
591 allocate = min(available, n);
592 switch (loc) {
593 case ALLOC_BEFORE:
594 if (strcmp(res->name, label_id->id) == 0) {
595 /* adjust current resource up */
596 if (is_pmem && !is_reserve)
597 return n;
598 rc = adjust_resource(res, res->start - allocate,
599 resource_size(res) + allocate);
600 action = "cur grow up";
601 } else
602 action = "allocate";
603 break;
604 case ALLOC_MID:
605 if (strcmp(next->name, label_id->id) == 0) {
606 /* adjust next resource up */
607 if (is_pmem && !is_reserve)
608 return n;
609 rc = adjust_resource(next, next->start
610 - allocate, resource_size(next)
611 + allocate);
612 new_res = next;
613 action = "next grow up";
614 } else if (strcmp(res->name, label_id->id) == 0) {
615 action = "grow down";
616 } else
617 action = "allocate";
618 break;
619 case ALLOC_AFTER:
620 if (strcmp(res->name, label_id->id) == 0)
621 action = "grow down";
622 else
623 action = "allocate";
624 break;
625 default:
626 return n;
627 }
628
629 if (strcmp(action, "allocate") == 0) {
630 /* BLK allocate bottom up */
631 if (!is_pmem)
632 free_start += available - allocate;
633 else if (!is_reserve && free_start != nd_mapping->start)
634 return n;
635
636 new_res = nvdimm_allocate_dpa(ndd, label_id,
637 free_start, allocate);
638 if (!new_res)
639 rc = -EBUSY;
640 } else if (strcmp(action, "grow down") == 0) {
641 /* adjust current resource down */
642 rc = adjust_resource(res, res->start, resource_size(res)
643 + allocate);
644 if (rc == 0)
645 res->flags |= DPA_RESOURCE_ADJUSTED;
646 }
647
648 if (!new_res)
649 new_res = res;
650
651 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
652 action, loc, rc);
653
654 if (rc)
655 return n;
656
657 n -= allocate;
658 if (n) {
659 /*
660 * Retry scan with newly inserted resources.
661 * For example, if we did an ALLOC_BEFORE
662 * insertion there may also have been space
663 * available for an ALLOC_AFTER insertion, so we
664 * need to check this same resource again
665 */
666 goto retry;
667 } else
668 return 0;
669 }
670
671 /*
672 * If we allocated nothing in the BLK case it may be because we are in
673 * an initial "pmem-reserve pass". Only do an initial BLK allocation
674 * when none of the DPA space is reserved.
675 */
676 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
677 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
678 return n;
679}
680
681static int merge_dpa(struct nd_region *nd_region,
682 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
683{
684 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
685 struct resource *res;
686
687 if (strncmp("pmem", label_id->id, 4) == 0)
688 return 0;
689 retry:
690 for_each_dpa_resource(ndd, res) {
691 int rc;
692 struct resource *next = res->sibling;
693 resource_size_t end = res->start + resource_size(res);
694
695 if (!next || strcmp(res->name, label_id->id) != 0
696 || strcmp(next->name, label_id->id) != 0
697 || end != next->start)
698 continue;
699 end += resource_size(next);
700 nvdimm_free_dpa(ndd, next);
701 rc = adjust_resource(res, res->start, end - res->start);
702 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
703 if (rc)
704 return rc;
705 res->flags |= DPA_RESOURCE_ADJUSTED;
706 goto retry;
707 }
708
709 return 0;
710}
711
712static int __reserve_free_pmem(struct device *dev, void *data)
713{
714 struct nvdimm *nvdimm = data;
715 struct nd_region *nd_region;
716 struct nd_label_id label_id;
717 int i;
718
719 if (!is_nd_pmem(dev))
720 return 0;
721
722 nd_region = to_nd_region(dev);
723 if (nd_region->ndr_mappings == 0)
724 return 0;
725
726 memset(&label_id, 0, sizeof(label_id));
727 strcat(label_id.id, "pmem-reserve");
728 for (i = 0; i < nd_region->ndr_mappings; i++) {
729 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
730 resource_size_t n, rem = 0;
731
732 if (nd_mapping->nvdimm != nvdimm)
733 continue;
734
735 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
736 if (n == 0)
737 return 0;
738 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
739 dev_WARN_ONCE(&nd_region->dev, rem,
740 "pmem reserve underrun: %#llx of %#llx bytes\n",
741 (unsigned long long) n - rem,
742 (unsigned long long) n);
743 return rem ? -ENXIO : 0;
744 }
745
746 return 0;
747}
748
749static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
750 struct nd_mapping *nd_mapping)
751{
752 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
753 struct resource *res, *_res;
754
755 for_each_dpa_resource_safe(ndd, res, _res)
756 if (strcmp(res->name, "pmem-reserve") == 0)
757 nvdimm_free_dpa(ndd, res);
758}
759
760static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
761 struct nd_mapping *nd_mapping)
762{
763 struct nvdimm *nvdimm = nd_mapping->nvdimm;
764 int rc;
765
766 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
767 __reserve_free_pmem);
768 if (rc)
769 release_free_pmem(nvdimm_bus, nd_mapping);
770 return rc;
771}
772
773/**
774 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
775 * @nd_region: the set of dimms to allocate @n more bytes from
776 * @label_id: unique identifier for the namespace consuming this dpa range
777 * @n: number of bytes per-dimm to add to the existing allocation
778 *
779 * Assumes resources are ordered. For BLK regions, first consume
780 * BLK-only available DPA free space, then consume PMEM-aliased DPA
781 * space starting at the highest DPA. For PMEM regions start
782 * allocations from the start of an interleave set and end at the first
783 * BLK allocation or the end of the interleave set, whichever comes
784 * first.
785 */
786static int grow_dpa_allocation(struct nd_region *nd_region,
787 struct nd_label_id *label_id, resource_size_t n)
788{
789 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
790 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
791 int i;
792
793 for (i = 0; i < nd_region->ndr_mappings; i++) {
794 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
795 resource_size_t rem = n;
796 int rc, j;
797
798 /*
799 * In the BLK case try once with all unallocated PMEM
800 * reserved, and once without
801 */
802 for (j = is_pmem; j < 2; j++) {
803 bool blk_only = j == 0;
804
805 if (blk_only) {
806 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
807 if (rc)
808 return rc;
809 }
810 rem = scan_allocate(nd_region, nd_mapping,
811 label_id, rem);
812 if (blk_only)
813 release_free_pmem(nvdimm_bus, nd_mapping);
814
815 /* try again and allow encroachments into PMEM */
816 if (rem == 0)
817 break;
818 }
819
820 dev_WARN_ONCE(&nd_region->dev, rem,
821 "allocation underrun: %#llx of %#llx bytes\n",
822 (unsigned long long) n - rem,
823 (unsigned long long) n);
824 if (rem)
825 return -ENXIO;
826
827 rc = merge_dpa(nd_region, nd_mapping, label_id);
828 if (rc)
829 return rc;
830 }
831
832 return 0;
833}
834
835static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
836 struct nd_namespace_pmem *nspm, resource_size_t size)
837{
838 struct resource *res = &nspm->nsio.res;
839
840 res->start = nd_region->ndr_start;
841 res->end = nd_region->ndr_start + size - 1;
842}
843
844static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
845{
846 if (!uuid) {
847 dev_dbg(dev, "%s: uuid not set\n", where);
848 return true;
849 }
850 return false;
851}
852
853static ssize_t __size_store(struct device *dev, unsigned long long val)
854{
855 resource_size_t allocated = 0, available = 0;
856 struct nd_region *nd_region = to_nd_region(dev->parent);
857 struct nd_mapping *nd_mapping;
858 struct nvdimm_drvdata *ndd;
859 struct nd_label_id label_id;
860 u32 flags = 0, remainder;
861 u8 *uuid = NULL;
862 int rc, i;
863
864 if (dev->driver || to_ndns(dev)->claim)
865 return -EBUSY;
866
867 if (is_namespace_pmem(dev)) {
868 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
869
870 uuid = nspm->uuid;
871 } else if (is_namespace_blk(dev)) {
872 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
873
874 uuid = nsblk->uuid;
875 flags = NSLABEL_FLAG_LOCAL;
876 }
877
878 /*
879 * We need a uuid for the allocation-label and dimm(s) on which
880 * to store the label.
881 */
882 if (uuid_not_set(uuid, dev, __func__))
883 return -ENXIO;
884 if (nd_region->ndr_mappings == 0) {
885 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
886 return -ENXIO;
887 }
888
889 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
890 if (remainder) {
891 dev_dbg(dev, "%llu is not %dK aligned\n", val,
892 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
893 return -EINVAL;
894 }
895
896 nd_label_gen_id(&label_id, uuid, flags);
897 for (i = 0; i < nd_region->ndr_mappings; i++) {
898 nd_mapping = &nd_region->mapping[i];
899 ndd = to_ndd(nd_mapping);
900
901 /*
902 * All dimms in an interleave set, or the base dimm for a blk
903 * region, need to be enabled for the size to be changed.
904 */
905 if (!ndd)
906 return -ENXIO;
907
908 allocated += nvdimm_allocated_dpa(ndd, &label_id);
909 }
910 available = nd_region_available_dpa(nd_region);
911
912 if (val > available + allocated)
913 return -ENOSPC;
914
915 if (val == allocated)
916 return 0;
917
918 val = div_u64(val, nd_region->ndr_mappings);
919 allocated = div_u64(allocated, nd_region->ndr_mappings);
920 if (val < allocated)
921 rc = shrink_dpa_allocation(nd_region, &label_id,
922 allocated - val);
923 else
924 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
925
926 if (rc)
927 return rc;
928
929 if (is_namespace_pmem(dev)) {
930 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
931
932 nd_namespace_pmem_set_size(nd_region, nspm,
933 val * nd_region->ndr_mappings);
934 } else if (is_namespace_blk(dev)) {
935 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
936
937 /*
938 * Try to delete the namespace if we deleted all of its
939 * allocation, this is not the seed device for the
940 * region, and it is not actively claimed by a btt
941 * instance.
942 */
943 if (val == 0 && nd_region->ns_seed != dev
944 && !nsblk->common.claim)
945 nd_device_unregister(dev, ND_ASYNC);
946 }
947
948 return rc;
949}
950
951static ssize_t size_store(struct device *dev,
952 struct device_attribute *attr, const char *buf, size_t len)
953{
954 struct nd_region *nd_region = to_nd_region(dev->parent);
955 unsigned long long val;
956 u8 **uuid = NULL;
957 int rc;
958
959 rc = kstrtoull(buf, 0, &val);
960 if (rc)
961 return rc;
962
963 device_lock(dev);
964 nvdimm_bus_lock(dev);
965 wait_nvdimm_bus_probe_idle(dev);
966 rc = __size_store(dev, val);
967 if (rc >= 0)
968 rc = nd_namespace_label_update(nd_region, dev);
969
970 if (is_namespace_pmem(dev)) {
971 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
972
973 uuid = &nspm->uuid;
974 } else if (is_namespace_blk(dev)) {
975 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
976
977 uuid = &nsblk->uuid;
978 }
979
980 if (rc == 0 && val == 0 && uuid) {
981 /* setting size zero == 'delete namespace' */
982 kfree(*uuid);
983 *uuid = NULL;
984 }
985
986 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
987 ? "fail" : "success", rc);
988
989 nvdimm_bus_unlock(dev);
990 device_unlock(dev);
991
992 return rc < 0 ? rc : len;
993}
994
995resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
996{
997 struct device *dev = &ndns->dev;
998
999 if (is_namespace_pmem(dev)) {
1000 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1001
1002 return resource_size(&nspm->nsio.res);
1003 } else if (is_namespace_blk(dev)) {
1004 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1005 } else if (is_namespace_io(dev)) {
1006 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1007
1008 return resource_size(&nsio->res);
1009 } else
1010 WARN_ONCE(1, "unknown namespace type\n");
1011 return 0;
1012}
1013
1014resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1015{
1016 resource_size_t size;
1017
1018 nvdimm_bus_lock(&ndns->dev);
1019 size = __nvdimm_namespace_capacity(ndns);
1020 nvdimm_bus_unlock(&ndns->dev);
1021
1022 return size;
1023}
1024EXPORT_SYMBOL(nvdimm_namespace_capacity);
1025
1026static ssize_t size_show(struct device *dev,
1027 struct device_attribute *attr, char *buf)
1028{
1029 return sprintf(buf, "%llu\n", (unsigned long long)
1030 nvdimm_namespace_capacity(to_ndns(dev)));
1031}
1032static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1033
1034static ssize_t uuid_show(struct device *dev,
1035 struct device_attribute *attr, char *buf)
1036{
1037 u8 *uuid;
1038
1039 if (is_namespace_pmem(dev)) {
1040 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1041
1042 uuid = nspm->uuid;
1043 } else if (is_namespace_blk(dev)) {
1044 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1045
1046 uuid = nsblk->uuid;
1047 } else
1048 return -ENXIO;
1049
1050 if (uuid)
1051 return sprintf(buf, "%pUb\n", uuid);
1052 return sprintf(buf, "\n");
1053}
1054
1055/**
1056 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1057 * @nd_region: parent region so we can updates all dimms in the set
1058 * @dev: namespace type for generating label_id
1059 * @new_uuid: incoming uuid
1060 * @old_uuid: reference to the uuid storage location in the namespace object
1061 */
1062static int namespace_update_uuid(struct nd_region *nd_region,
1063 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1064{
1065 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1066 struct nd_label_id old_label_id;
1067 struct nd_label_id new_label_id;
1068 int i;
1069
1070 if (!nd_is_uuid_unique(dev, new_uuid))
1071 return -EINVAL;
1072
1073 if (*old_uuid == NULL)
1074 goto out;
1075
1076 /*
1077 * If we've already written a label with this uuid, then it's
1078 * too late to rename because we can't reliably update the uuid
1079 * without losing the old namespace. Userspace must delete this
1080 * namespace to abandon the old uuid.
1081 */
1082 for (i = 0; i < nd_region->ndr_mappings; i++) {
1083 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1084
1085 /*
1086 * This check by itself is sufficient because old_uuid
1087 * would be NULL above if this uuid did not exist in the
1088 * currently written set.
1089 *
1090 * FIXME: can we delete uuid with zero dpa allocated?
1091 */
1092 if (nd_mapping->labels)
1093 return -EBUSY;
1094 }
1095
1096 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1097 nd_label_gen_id(&new_label_id, new_uuid, flags);
1098 for (i = 0; i < nd_region->ndr_mappings; i++) {
1099 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1100 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1101 struct resource *res;
1102
1103 for_each_dpa_resource(ndd, res)
1104 if (strcmp(res->name, old_label_id.id) == 0)
1105 sprintf((void *) res->name, "%s",
1106 new_label_id.id);
1107 }
1108 kfree(*old_uuid);
1109 out:
1110 *old_uuid = new_uuid;
1111 return 0;
1112}
1113
1114static ssize_t uuid_store(struct device *dev,
1115 struct device_attribute *attr, const char *buf, size_t len)
1116{
1117 struct nd_region *nd_region = to_nd_region(dev->parent);
1118 u8 *uuid = NULL;
1119 ssize_t rc = 0;
1120 u8 **ns_uuid;
1121
1122 if (is_namespace_pmem(dev)) {
1123 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1124
1125 ns_uuid = &nspm->uuid;
1126 } else if (is_namespace_blk(dev)) {
1127 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1128
1129 ns_uuid = &nsblk->uuid;
1130 } else
1131 return -ENXIO;
1132
1133 device_lock(dev);
1134 nvdimm_bus_lock(dev);
1135 wait_nvdimm_bus_probe_idle(dev);
1136 if (to_ndns(dev)->claim)
1137 rc = -EBUSY;
1138 if (rc >= 0)
1139 rc = nd_uuid_store(dev, &uuid, buf, len);
1140 if (rc >= 0)
1141 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1142 if (rc >= 0)
1143 rc = nd_namespace_label_update(nd_region, dev);
1144 else
1145 kfree(uuid);
1146 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1147 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1148 nvdimm_bus_unlock(dev);
1149 device_unlock(dev);
1150
1151 return rc < 0 ? rc : len;
1152}
1153static DEVICE_ATTR_RW(uuid);
1154
1155static ssize_t resource_show(struct device *dev,
1156 struct device_attribute *attr, char *buf)
1157{
1158 struct resource *res;
1159
1160 if (is_namespace_pmem(dev)) {
1161 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1162
1163 res = &nspm->nsio.res;
1164 } else if (is_namespace_io(dev)) {
1165 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1166
1167 res = &nsio->res;
1168 } else
1169 return -ENXIO;
1170
1171 /* no address to convey if the namespace has no allocation */
1172 if (resource_size(res) == 0)
1173 return -ENXIO;
1174 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1175}
1176static DEVICE_ATTR_RO(resource);
1177
1178static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1179 4096, 4104, 4160, 4224, 0 };
1180
1181static ssize_t sector_size_show(struct device *dev,
1182 struct device_attribute *attr, char *buf)
1183{
1184 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1185
1186 if (!is_namespace_blk(dev))
1187 return -ENXIO;
1188
1189 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1190}
1191
1192static ssize_t sector_size_store(struct device *dev,
1193 struct device_attribute *attr, const char *buf, size_t len)
1194{
1195 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1196 struct nd_region *nd_region = to_nd_region(dev->parent);
1197 ssize_t rc = 0;
1198
1199 if (!is_namespace_blk(dev))
1200 return -ENXIO;
1201
1202 device_lock(dev);
1203 nvdimm_bus_lock(dev);
1204 if (to_ndns(dev)->claim)
1205 rc = -EBUSY;
1206 if (rc >= 0)
1207 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1208 ns_lbasize_supported);
1209 if (rc >= 0)
1210 rc = nd_namespace_label_update(nd_region, dev);
1211 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1212 rc, rc < 0 ? "tried" : "wrote", buf,
1213 buf[len - 1] == '\n' ? "" : "\n");
1214 nvdimm_bus_unlock(dev);
1215 device_unlock(dev);
1216
1217 return rc ? rc : len;
1218}
1219static DEVICE_ATTR_RW(sector_size);
1220
1221static ssize_t dpa_extents_show(struct device *dev,
1222 struct device_attribute *attr, char *buf)
1223{
1224 struct nd_region *nd_region = to_nd_region(dev->parent);
1225 struct nd_label_id label_id;
1226 int count = 0, i;
1227 u8 *uuid = NULL;
1228 u32 flags = 0;
1229
1230 nvdimm_bus_lock(dev);
1231 if (is_namespace_pmem(dev)) {
1232 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1233
1234 uuid = nspm->uuid;
1235 flags = 0;
1236 } else if (is_namespace_blk(dev)) {
1237 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1238
1239 uuid = nsblk->uuid;
1240 flags = NSLABEL_FLAG_LOCAL;
1241 }
1242
1243 if (!uuid)
1244 goto out;
1245
1246 nd_label_gen_id(&label_id, uuid, flags);
1247 for (i = 0; i < nd_region->ndr_mappings; i++) {
1248 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1249 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1250 struct resource *res;
1251
1252 for_each_dpa_resource(ndd, res)
1253 if (strcmp(res->name, label_id.id) == 0)
1254 count++;
1255 }
1256 out:
1257 nvdimm_bus_unlock(dev);
1258
1259 return sprintf(buf, "%d\n", count);
1260}
1261static DEVICE_ATTR_RO(dpa_extents);
1262
1263static ssize_t holder_show(struct device *dev,
1264 struct device_attribute *attr, char *buf)
1265{
1266 struct nd_namespace_common *ndns = to_ndns(dev);
1267 ssize_t rc;
1268
1269 device_lock(dev);
1270 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1271 device_unlock(dev);
1272
1273 return rc;
1274}
1275static DEVICE_ATTR_RO(holder);
1276
1277static ssize_t mode_show(struct device *dev,
1278 struct device_attribute *attr, char *buf)
1279{
1280 struct nd_namespace_common *ndns = to_ndns(dev);
1281 struct device *claim;
1282 char *mode;
1283 ssize_t rc;
1284
1285 device_lock(dev);
1286 claim = ndns->claim;
1287 if (claim && is_nd_btt(claim))
1288 mode = "safe";
1289 else if (claim && is_nd_pfn(claim))
1290 mode = "memory";
1291 else if (!claim && pmem_should_map_pages(dev))
1292 mode = "memory";
1293 else
1294 mode = "raw";
1295 rc = sprintf(buf, "%s\n", mode);
1296 device_unlock(dev);
1297
1298 return rc;
1299}
1300static DEVICE_ATTR_RO(mode);
1301
1302static ssize_t force_raw_store(struct device *dev,
1303 struct device_attribute *attr, const char *buf, size_t len)
1304{
1305 bool force_raw;
1306 int rc = strtobool(buf, &force_raw);
1307
1308 if (rc)
1309 return rc;
1310
1311 to_ndns(dev)->force_raw = force_raw;
1312 return len;
1313}
1314
1315static ssize_t force_raw_show(struct device *dev,
1316 struct device_attribute *attr, char *buf)
1317{
1318 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1319}
1320static DEVICE_ATTR_RW(force_raw);
1321
1322static struct attribute *nd_namespace_attributes[] = {
1323 &dev_attr_nstype.attr,
1324 &dev_attr_size.attr,
1325 &dev_attr_mode.attr,
1326 &dev_attr_uuid.attr,
1327 &dev_attr_holder.attr,
1328 &dev_attr_resource.attr,
1329 &dev_attr_alt_name.attr,
1330 &dev_attr_force_raw.attr,
1331 &dev_attr_sector_size.attr,
1332 &dev_attr_dpa_extents.attr,
1333 NULL,
1334};
1335
1336static umode_t namespace_visible(struct kobject *kobj,
1337 struct attribute *a, int n)
1338{
1339 struct device *dev = container_of(kobj, struct device, kobj);
1340
1341 if (a == &dev_attr_resource.attr) {
1342 if (is_namespace_blk(dev))
1343 return 0;
1344 return a->mode;
1345 }
1346
1347 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1348 if (a == &dev_attr_size.attr)
1349 return S_IWUSR | S_IRUGO;
1350
1351 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1352 return 0;
1353
1354 return a->mode;
1355 }
1356
1357 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1358 || a == &dev_attr_holder.attr
1359 || a == &dev_attr_force_raw.attr
1360 || a == &dev_attr_mode.attr)
1361 return a->mode;
1362
1363 return 0;
1364}
1365
1366static struct attribute_group nd_namespace_attribute_group = {
1367 .attrs = nd_namespace_attributes,
1368 .is_visible = namespace_visible,
1369};
1370
1371static const struct attribute_group *nd_namespace_attribute_groups[] = {
1372 &nd_device_attribute_group,
1373 &nd_namespace_attribute_group,
1374 &nd_numa_attribute_group,
1375 NULL,
1376};
1377
1378struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1379{
1380 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1381 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1382 struct nd_namespace_common *ndns;
1383 resource_size_t size;
1384
1385 if (nd_btt || nd_pfn) {
1386 struct device *host = NULL;
1387
1388 if (nd_btt) {
1389 host = &nd_btt->dev;
1390 ndns = nd_btt->ndns;
1391 } else if (nd_pfn) {
1392 host = &nd_pfn->dev;
1393 ndns = nd_pfn->ndns;
1394 }
1395
1396 if (!ndns || !host)
1397 return ERR_PTR(-ENODEV);
1398
1399 /*
1400 * Flush any in-progess probes / removals in the driver
1401 * for the raw personality of this namespace.
1402 */
1403 device_lock(&ndns->dev);
1404 device_unlock(&ndns->dev);
1405 if (ndns->dev.driver) {
1406 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1407 dev_name(host));
1408 return ERR_PTR(-EBUSY);
1409 }
1410 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
1411 "host (%s) vs claim (%s) mismatch\n",
1412 dev_name(host),
1413 dev_name(ndns->claim)))
1414 return ERR_PTR(-ENXIO);
1415 } else {
1416 ndns = to_ndns(dev);
1417 if (ndns->claim) {
1418 dev_dbg(dev, "claimed by %s, failing probe\n",
1419 dev_name(ndns->claim));
1420
1421 return ERR_PTR(-ENXIO);
1422 }
1423 }
1424
1425 size = nvdimm_namespace_capacity(ndns);
1426 if (size < ND_MIN_NAMESPACE_SIZE) {
1427 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1428 &size, ND_MIN_NAMESPACE_SIZE);
1429 return ERR_PTR(-ENODEV);
1430 }
1431
1432 if (is_namespace_pmem(&ndns->dev)) {
1433 struct nd_namespace_pmem *nspm;
1434
1435 nspm = to_nd_namespace_pmem(&ndns->dev);
1436 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1437 return ERR_PTR(-ENODEV);
1438 } else if (is_namespace_blk(&ndns->dev)) {
1439 struct nd_namespace_blk *nsblk;
1440
1441 nsblk = to_nd_namespace_blk(&ndns->dev);
1442 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1443 return ERR_PTR(-ENODEV);
1444 if (!nsblk->lbasize) {
1445 dev_dbg(&ndns->dev, "%s: sector size not set\n",
1446 __func__);
1447 return ERR_PTR(-ENODEV);
1448 }
1449 if (!nd_namespace_blk_validate(nsblk))
1450 return ERR_PTR(-ENODEV);
1451 }
1452
1453 return ndns;
1454}
1455EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1456
1457static struct device **create_namespace_io(struct nd_region *nd_region)
1458{
1459 struct nd_namespace_io *nsio;
1460 struct device *dev, **devs;
1461 struct resource *res;
1462
1463 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1464 if (!nsio)
1465 return NULL;
1466
1467 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1468 if (!devs) {
1469 kfree(nsio);
1470 return NULL;
1471 }
1472
1473 dev = &nsio->common.dev;
1474 dev->type = &namespace_io_device_type;
1475 dev->parent = &nd_region->dev;
1476 res = &nsio->res;
1477 res->name = dev_name(&nd_region->dev);
1478 res->flags = IORESOURCE_MEM;
1479 res->start = nd_region->ndr_start;
1480 res->end = res->start + nd_region->ndr_size - 1;
1481
1482 devs[0] = dev;
1483 return devs;
1484}
1485
1486static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1487 u64 cookie, u16 pos)
1488{
1489 struct nd_namespace_label *found = NULL;
1490 int i;
1491
1492 for (i = 0; i < nd_region->ndr_mappings; i++) {
1493 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1494 struct nd_namespace_label *nd_label;
1495 bool found_uuid = false;
1496 int l;
1497
1498 for_each_label(l, nd_label, nd_mapping->labels) {
1499 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1500 u16 position = __le16_to_cpu(nd_label->position);
1501 u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1502
1503 if (isetcookie != cookie)
1504 continue;
1505
1506 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1507 continue;
1508
1509 if (found_uuid) {
1510 dev_dbg(to_ndd(nd_mapping)->dev,
1511 "%s duplicate entry for uuid\n",
1512 __func__);
1513 return false;
1514 }
1515 found_uuid = true;
1516 if (nlabel != nd_region->ndr_mappings)
1517 continue;
1518 if (position != pos)
1519 continue;
1520 found = nd_label;
1521 break;
1522 }
1523 if (found)
1524 break;
1525 }
1526 return found != NULL;
1527}
1528
1529static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1530{
1531 struct nd_namespace_label *select = NULL;
1532 int i;
1533
1534 if (!pmem_id)
1535 return -ENODEV;
1536
1537 for (i = 0; i < nd_region->ndr_mappings; i++) {
1538 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1539 struct nd_namespace_label *nd_label;
1540 u64 hw_start, hw_end, pmem_start, pmem_end;
1541 int l;
1542
1543 for_each_label(l, nd_label, nd_mapping->labels)
1544 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1545 break;
1546
1547 if (!nd_label) {
1548 WARN_ON(1);
1549 return -EINVAL;
1550 }
1551
1552 select = nd_label;
1553 /*
1554 * Check that this label is compliant with the dpa
1555 * range published in NFIT
1556 */
1557 hw_start = nd_mapping->start;
1558 hw_end = hw_start + nd_mapping->size;
1559 pmem_start = __le64_to_cpu(select->dpa);
1560 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1561 if (pmem_start == hw_start && pmem_end <= hw_end)
1562 /* pass */;
1563 else
1564 return -EINVAL;
1565
1566 nd_mapping->labels[0] = select;
1567 nd_mapping->labels[1] = NULL;
1568 }
1569 return 0;
1570}
1571
1572/**
1573 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1574 * @nd_region: region with mappings to validate
1575 */
1576static int find_pmem_label_set(struct nd_region *nd_region,
1577 struct nd_namespace_pmem *nspm)
1578{
1579 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1580 struct nd_namespace_label *nd_label;
1581 u8 select_id[NSLABEL_UUID_LEN];
1582 resource_size_t size = 0;
1583 u8 *pmem_id = NULL;
1584 int rc = -ENODEV, l;
1585 u16 i;
1586
1587 if (cookie == 0)
1588 return -ENXIO;
1589
1590 /*
1591 * Find a complete set of labels by uuid. By definition we can start
1592 * with any mapping as the reference label
1593 */
1594 for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1595 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1596
1597 if (isetcookie != cookie)
1598 continue;
1599
1600 for (i = 0; nd_region->ndr_mappings; i++)
1601 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1602 cookie, i))
1603 break;
1604 if (i < nd_region->ndr_mappings) {
1605 /*
1606 * Give up if we don't find an instance of a
1607 * uuid at each position (from 0 to
1608 * nd_region->ndr_mappings - 1), or if we find a
1609 * dimm with two instances of the same uuid.
1610 */
1611 rc = -EINVAL;
1612 goto err;
1613 } else if (pmem_id) {
1614 /*
1615 * If there is more than one valid uuid set, we
1616 * need userspace to clean this up.
1617 */
1618 rc = -EBUSY;
1619 goto err;
1620 }
1621 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1622 pmem_id = select_id;
1623 }
1624
1625 /*
1626 * Fix up each mapping's 'labels' to have the validated pmem label for
1627 * that position at labels[0], and NULL at labels[1]. In the process,
1628 * check that the namespace aligns with interleave-set. We know
1629 * that it does not overlap with any blk namespaces by virtue of
1630 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1631 * succeeded).
1632 */
1633 rc = select_pmem_id(nd_region, pmem_id);
1634 if (rc)
1635 goto err;
1636
1637 /* Calculate total size and populate namespace properties from label0 */
1638 for (i = 0; i < nd_region->ndr_mappings; i++) {
1639 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1640 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1641
1642 size += __le64_to_cpu(label0->rawsize);
1643 if (__le16_to_cpu(label0->position) != 0)
1644 continue;
1645 WARN_ON(nspm->alt_name || nspm->uuid);
1646 nspm->alt_name = kmemdup((void __force *) label0->name,
1647 NSLABEL_NAME_LEN, GFP_KERNEL);
1648 nspm->uuid = kmemdup((void __force *) label0->uuid,
1649 NSLABEL_UUID_LEN, GFP_KERNEL);
1650 }
1651
1652 if (!nspm->alt_name || !nspm->uuid) {
1653 rc = -ENOMEM;
1654 goto err;
1655 }
1656
1657 nd_namespace_pmem_set_size(nd_region, nspm, size);
1658
1659 return 0;
1660 err:
1661 switch (rc) {
1662 case -EINVAL:
1663 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1664 break;
1665 case -ENODEV:
1666 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1667 break;
1668 default:
1669 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1670 __func__, rc);
1671 break;
1672 }
1673 return rc;
1674}
1675
1676static struct device **create_namespace_pmem(struct nd_region *nd_region)
1677{
1678 struct nd_namespace_pmem *nspm;
1679 struct device *dev, **devs;
1680 struct resource *res;
1681 int rc;
1682
1683 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1684 if (!nspm)
1685 return NULL;
1686
1687 dev = &nspm->nsio.common.dev;
1688 dev->type = &namespace_pmem_device_type;
1689 dev->parent = &nd_region->dev;
1690 res = &nspm->nsio.res;
1691 res->name = dev_name(&nd_region->dev);
1692 res->flags = IORESOURCE_MEM;
1693 rc = find_pmem_label_set(nd_region, nspm);
1694 if (rc == -ENODEV) {
1695 int i;
1696
1697 /* Pass, try to permit namespace creation... */
1698 for (i = 0; i < nd_region->ndr_mappings; i++) {
1699 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1700
1701 kfree(nd_mapping->labels);
1702 nd_mapping->labels = NULL;
1703 }
1704
1705 /* Publish a zero-sized namespace for userspace to configure. */
1706 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1707
1708 rc = 0;
1709 } else if (rc)
1710 goto err;
1711
1712 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1713 if (!devs)
1714 goto err;
1715
1716 devs[0] = dev;
1717 return devs;
1718
1719 err:
1720 namespace_pmem_release(&nspm->nsio.common.dev);
1721 return NULL;
1722}
1723
1724struct resource *nsblk_add_resource(struct nd_region *nd_region,
1725 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1726 resource_size_t start)
1727{
1728 struct nd_label_id label_id;
1729 struct resource *res;
1730
1731 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1732 res = krealloc(nsblk->res,
1733 sizeof(void *) * (nsblk->num_resources + 1),
1734 GFP_KERNEL);
1735 if (!res)
1736 return NULL;
1737 nsblk->res = (struct resource **) res;
1738 for_each_dpa_resource(ndd, res)
1739 if (strcmp(res->name, label_id.id) == 0
1740 && res->start == start) {
1741 nsblk->res[nsblk->num_resources++] = res;
1742 return res;
1743 }
1744 return NULL;
1745}
1746
1747static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1748{
1749 struct nd_namespace_blk *nsblk;
1750 struct device *dev;
1751
1752 if (!is_nd_blk(&nd_region->dev))
1753 return NULL;
1754
1755 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1756 if (!nsblk)
1757 return NULL;
1758
1759 dev = &nsblk->common.dev;
1760 dev->type = &namespace_blk_device_type;
1761 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1762 if (nsblk->id < 0) {
1763 kfree(nsblk);
1764 return NULL;
1765 }
1766 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1767 dev->parent = &nd_region->dev;
1768 dev->groups = nd_namespace_attribute_groups;
1769
1770 return &nsblk->common.dev;
1771}
1772
1773void nd_region_create_blk_seed(struct nd_region *nd_region)
1774{
1775 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1776 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1777 /*
1778 * Seed creation failures are not fatal, provisioning is simply
1779 * disabled until memory becomes available
1780 */
1781 if (!nd_region->ns_seed)
1782 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1783 else
1784 nd_device_register(nd_region->ns_seed);
1785}
1786
1787void nd_region_create_pfn_seed(struct nd_region *nd_region)
1788{
1789 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1790 nd_region->pfn_seed = nd_pfn_create(nd_region);
1791 /*
1792 * Seed creation failures are not fatal, provisioning is simply
1793 * disabled until memory becomes available
1794 */
1795 if (!nd_region->pfn_seed)
1796 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1797}
1798
1799void nd_region_create_btt_seed(struct nd_region *nd_region)
1800{
1801 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1802 nd_region->btt_seed = nd_btt_create(nd_region);
1803 /*
1804 * Seed creation failures are not fatal, provisioning is simply
1805 * disabled until memory becomes available
1806 */
1807 if (!nd_region->btt_seed)
1808 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1809}
1810
1811static struct device **create_namespace_blk(struct nd_region *nd_region)
1812{
1813 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1814 struct nd_namespace_label *nd_label;
1815 struct device *dev, **devs = NULL;
1816 struct nd_namespace_blk *nsblk;
1817 struct nvdimm_drvdata *ndd;
1818 int i, l, count = 0;
1819 struct resource *res;
1820
1821 if (nd_region->ndr_mappings == 0)
1822 return NULL;
1823
1824 ndd = to_ndd(nd_mapping);
1825 for_each_label(l, nd_label, nd_mapping->labels) {
1826 u32 flags = __le32_to_cpu(nd_label->flags);
1827 char *name[NSLABEL_NAME_LEN];
1828 struct device **__devs;
1829
1830 if (flags & NSLABEL_FLAG_LOCAL)
1831 /* pass */;
1832 else
1833 continue;
1834
1835 for (i = 0; i < count; i++) {
1836 nsblk = to_nd_namespace_blk(devs[i]);
1837 if (memcmp(nsblk->uuid, nd_label->uuid,
1838 NSLABEL_UUID_LEN) == 0) {
1839 res = nsblk_add_resource(nd_region, ndd, nsblk,
1840 __le64_to_cpu(nd_label->dpa));
1841 if (!res)
1842 goto err;
1843 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1844 dev_name(&nsblk->common.dev));
1845 break;
1846 }
1847 }
1848 if (i < count)
1849 continue;
1850 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1851 if (!__devs)
1852 goto err;
1853 memcpy(__devs, devs, sizeof(dev) * count);
1854 kfree(devs);
1855 devs = __devs;
1856
1857 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1858 if (!nsblk)
1859 goto err;
1860 dev = &nsblk->common.dev;
1861 dev->type = &namespace_blk_device_type;
1862 dev->parent = &nd_region->dev;
1863 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1864 devs[count++] = dev;
1865 nsblk->id = -1;
1866 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1867 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1868 GFP_KERNEL);
1869 if (!nsblk->uuid)
1870 goto err;
1871 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1872 if (name[0])
1873 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1874 GFP_KERNEL);
1875 res = nsblk_add_resource(nd_region, ndd, nsblk,
1876 __le64_to_cpu(nd_label->dpa));
1877 if (!res)
1878 goto err;
1879 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1880 dev_name(&nsblk->common.dev));
1881 }
1882
1883 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1884 __func__, count, count == 1 ? "" : "s");
1885
1886 if (count == 0) {
1887 /* Publish a zero-sized namespace for userspace to configure. */
1888 for (i = 0; i < nd_region->ndr_mappings; i++) {
1889 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1890
1891 kfree(nd_mapping->labels);
1892 nd_mapping->labels = NULL;
1893 }
1894
1895 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1896 if (!devs)
1897 goto err;
1898 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1899 if (!nsblk)
1900 goto err;
1901 dev = &nsblk->common.dev;
1902 dev->type = &namespace_blk_device_type;
1903 dev->parent = &nd_region->dev;
1904 devs[count++] = dev;
1905 }
1906
1907 return devs;
1908
1909err:
1910 for (i = 0; i < count; i++) {
1911 nsblk = to_nd_namespace_blk(devs[i]);
1912 namespace_blk_release(&nsblk->common.dev);
1913 }
1914 kfree(devs);
1915 return NULL;
1916}
1917
1918static int init_active_labels(struct nd_region *nd_region)
1919{
1920 int i;
1921
1922 for (i = 0; i < nd_region->ndr_mappings; i++) {
1923 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1924 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1925 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1926 int count, j;
1927
1928 /*
1929 * If the dimm is disabled then prevent the region from
1930 * being activated if it aliases DPA.
1931 */
1932 if (!ndd) {
1933 if ((nvdimm->flags & NDD_ALIASING) == 0)
1934 return 0;
1935 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1936 dev_name(&nd_mapping->nvdimm->dev));
1937 return -ENXIO;
1938 }
1939 nd_mapping->ndd = ndd;
1940 atomic_inc(&nvdimm->busy);
1941 get_ndd(ndd);
1942
1943 count = nd_label_active_count(ndd);
1944 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1945 if (!count)
1946 continue;
1947 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1948 GFP_KERNEL);
1949 if (!nd_mapping->labels)
1950 return -ENOMEM;
1951 for (j = 0; j < count; j++) {
1952 struct nd_namespace_label *label;
1953
1954 label = nd_label_active(ndd, j);
1955 nd_mapping->labels[j] = label;
1956 }
1957 }
1958
1959 return 0;
1960}
1961
1962int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1963{
1964 struct device **devs = NULL;
1965 int i, rc = 0, type;
1966
1967 *err = 0;
1968 nvdimm_bus_lock(&nd_region->dev);
1969 rc = init_active_labels(nd_region);
1970 if (rc) {
1971 nvdimm_bus_unlock(&nd_region->dev);
1972 return rc;
1973 }
1974
1975 type = nd_region_to_nstype(nd_region);
1976 switch (type) {
1977 case ND_DEVICE_NAMESPACE_IO:
1978 devs = create_namespace_io(nd_region);
1979 break;
1980 case ND_DEVICE_NAMESPACE_PMEM:
1981 devs = create_namespace_pmem(nd_region);
1982 break;
1983 case ND_DEVICE_NAMESPACE_BLK:
1984 devs = create_namespace_blk(nd_region);
1985 break;
1986 default:
1987 break;
1988 }
1989 nvdimm_bus_unlock(&nd_region->dev);
1990
1991 if (!devs)
1992 return -ENODEV;
1993
1994 for (i = 0; devs[i]; i++) {
1995 struct device *dev = devs[i];
1996 int id;
1997
1998 if (type == ND_DEVICE_NAMESPACE_BLK) {
1999 struct nd_namespace_blk *nsblk;
2000
2001 nsblk = to_nd_namespace_blk(dev);
2002 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2003 GFP_KERNEL);
2004 nsblk->id = id;
2005 } else
2006 id = i;
2007
2008 if (id < 0)
2009 break;
2010 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2011 dev->groups = nd_namespace_attribute_groups;
2012 nd_device_register(dev);
2013 }
2014 if (i)
2015 nd_region->ns_seed = devs[0];
2016
2017 if (devs[i]) {
2018 int j;
2019
2020 for (j = i; devs[j]; j++) {
2021 struct device *dev = devs[j];
2022
2023 device_initialize(dev);
2024 put_device(dev);
2025 }
2026 *err = j - i;
2027 /*
2028 * All of the namespaces we tried to register failed, so
2029 * fail region activation.
2030 */
2031 if (*err == 0)
2032 rc = -ENODEV;
2033 }
2034 kfree(devs);
2035
2036 if (rc == -ENODEV)
2037 return rc;
2038
2039 return i;
2040}