Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/device.h>
6#include <linux/ndctl.h>
7#include <linux/uuid.h>
8#include <linux/slab.h>
9#include <linux/io.h>
10#include <linux/nd.h>
11#include "nd-core.h"
12#include "label.h"
13#include "nd.h"
14
15static guid_t nvdimm_btt_guid;
16static guid_t nvdimm_btt2_guid;
17static guid_t nvdimm_pfn_guid;
18static guid_t nvdimm_dax_guid;
19
20static uuid_t nvdimm_btt_uuid;
21static uuid_t nvdimm_btt2_uuid;
22static uuid_t nvdimm_pfn_uuid;
23static uuid_t nvdimm_dax_uuid;
24
25static uuid_t cxl_region_uuid;
26static uuid_t cxl_namespace_uuid;
27
28static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
29
30static u32 best_seq(u32 a, u32 b)
31{
32 a &= NSINDEX_SEQ_MASK;
33 b &= NSINDEX_SEQ_MASK;
34
35 if (a == 0 || a == b)
36 return b;
37 else if (b == 0)
38 return a;
39 else if (nd_inc_seq(a) == b)
40 return b;
41 else
42 return a;
43}
44
45unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
46{
47 return ndd->nslabel_size;
48}
49
50static size_t __sizeof_namespace_index(u32 nslot)
51{
52 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
53 NSINDEX_ALIGN);
54}
55
56static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
57 size_t index_size)
58{
59 return (ndd->nsarea.config_size - index_size * 2) /
60 sizeof_namespace_label(ndd);
61}
62
63int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
64{
65 u32 tmp_nslot, n;
66
67 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
68 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
69
70 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
71}
72
73size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
74{
75 u32 nslot, space, size;
76
77 /*
78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
79 * enough to hold 2 index blocks and 2 labels. The minimum index
80 * block size is 256 bytes. The label size is 128 for namespaces
81 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
82 */
83 nslot = nvdimm_num_label_slots(ndd);
84 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
85 size = __sizeof_namespace_index(nslot) * 2;
86 if (size <= space && nslot >= 2)
87 return size / 2;
88
89 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
90 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
91 return 0;
92}
93
94static int __nd_label_validate(struct nvdimm_drvdata *ndd)
95{
96 /*
97 * On media label format consists of two index blocks followed
98 * by an array of labels. None of these structures are ever
99 * updated in place. A sequence number tracks the current
100 * active index and the next one to write, while labels are
101 * written to free slots.
102 *
103 * +------------+
104 * | |
105 * | nsindex0 |
106 * | |
107 * +------------+
108 * | |
109 * | nsindex1 |
110 * | |
111 * +------------+
112 * | label0 |
113 * +------------+
114 * | label1 |
115 * +------------+
116 * | |
117 * ....nslot...
118 * | |
119 * +------------+
120 * | labelN |
121 * +------------+
122 */
123 struct nd_namespace_index *nsindex[] = {
124 to_namespace_index(ndd, 0),
125 to_namespace_index(ndd, 1),
126 };
127 const int num_index = ARRAY_SIZE(nsindex);
128 struct device *dev = ndd->dev;
129 bool valid[2] = { 0 };
130 int i, num_valid = 0;
131 u32 seq;
132
133 for (i = 0; i < num_index; i++) {
134 u32 nslot;
135 u8 sig[NSINDEX_SIG_LEN];
136 u64 sum_save, sum, size;
137 unsigned int version, labelsize;
138
139 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
140 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
141 dev_dbg(dev, "nsindex%d signature invalid\n", i);
142 continue;
143 }
144
145 /* label sizes larger than 128 arrived with v1.2 */
146 version = __le16_to_cpu(nsindex[i]->major) * 100
147 + __le16_to_cpu(nsindex[i]->minor);
148 if (version >= 102)
149 labelsize = 1 << (7 + nsindex[i]->labelsize);
150 else
151 labelsize = 128;
152
153 if (labelsize != sizeof_namespace_label(ndd)) {
154 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
155 i, nsindex[i]->labelsize);
156 continue;
157 }
158
159 sum_save = __le64_to_cpu(nsindex[i]->checksum);
160 nsindex[i]->checksum = __cpu_to_le64(0);
161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
162 nsindex[i]->checksum = __cpu_to_le64(sum_save);
163 if (sum != sum_save) {
164 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
165 continue;
166 }
167
168 seq = __le32_to_cpu(nsindex[i]->seq);
169 if ((seq & NSINDEX_SEQ_MASK) == 0) {
170 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
171 continue;
172 }
173
174 /* sanity check the index against expected values */
175 if (__le64_to_cpu(nsindex[i]->myoff)
176 != i * sizeof_namespace_index(ndd)) {
177 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
178 i, (unsigned long long)
179 __le64_to_cpu(nsindex[i]->myoff));
180 continue;
181 }
182 if (__le64_to_cpu(nsindex[i]->otheroff)
183 != (!i) * sizeof_namespace_index(ndd)) {
184 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
185 i, (unsigned long long)
186 __le64_to_cpu(nsindex[i]->otheroff));
187 continue;
188 }
189 if (__le64_to_cpu(nsindex[i]->labeloff)
190 != 2 * sizeof_namespace_index(ndd)) {
191 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
192 i, (unsigned long long)
193 __le64_to_cpu(nsindex[i]->labeloff));
194 continue;
195 }
196
197 size = __le64_to_cpu(nsindex[i]->mysize);
198 if (size > sizeof_namespace_index(ndd)
199 || size < sizeof(struct nd_namespace_index)) {
200 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
201 continue;
202 }
203
204 nslot = __le32_to_cpu(nsindex[i]->nslot);
205 if (nslot * sizeof_namespace_label(ndd)
206 + 2 * sizeof_namespace_index(ndd)
207 > ndd->nsarea.config_size) {
208 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
209 i, nslot, ndd->nsarea.config_size);
210 continue;
211 }
212 valid[i] = true;
213 num_valid++;
214 }
215
216 switch (num_valid) {
217 case 0:
218 break;
219 case 1:
220 for (i = 0; i < num_index; i++)
221 if (valid[i])
222 return i;
223 /* can't have num_valid > 0 but valid[] = { false, false } */
224 WARN_ON(1);
225 break;
226 default:
227 /* pick the best index... */
228 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
229 __le32_to_cpu(nsindex[1]->seq));
230 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
231 return 1;
232 else
233 return 0;
234 break;
235 }
236
237 return -1;
238}
239
240static int nd_label_validate(struct nvdimm_drvdata *ndd)
241{
242 /*
243 * In order to probe for and validate namespace index blocks we
244 * need to know the size of the labels, and we can't trust the
245 * size of the labels until we validate the index blocks.
246 * Resolve this dependency loop by probing for known label
247 * sizes, but default to v1.2 256-byte namespace labels if
248 * discovery fails.
249 */
250 int label_size[] = { 128, 256 };
251 int i, rc;
252
253 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
254 ndd->nslabel_size = label_size[i];
255 rc = __nd_label_validate(ndd);
256 if (rc >= 0)
257 return rc;
258 }
259
260 return -1;
261}
262
263static void nd_label_copy(struct nvdimm_drvdata *ndd,
264 struct nd_namespace_index *dst,
265 struct nd_namespace_index *src)
266{
267 /* just exit if either destination or source is NULL */
268 if (!dst || !src)
269 return;
270
271 memcpy(dst, src, sizeof_namespace_index(ndd));
272}
273
274static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
275{
276 void *base = to_namespace_index(ndd, 0);
277
278 return base + 2 * sizeof_namespace_index(ndd);
279}
280
281static int to_slot(struct nvdimm_drvdata *ndd,
282 struct nd_namespace_label *nd_label)
283{
284 unsigned long label, base;
285
286 label = (unsigned long) nd_label;
287 base = (unsigned long) nd_label_base(ndd);
288
289 return (label - base) / sizeof_namespace_label(ndd);
290}
291
292static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
293{
294 unsigned long label, base;
295
296 base = (unsigned long) nd_label_base(ndd);
297 label = base + sizeof_namespace_label(ndd) * slot;
298
299 return (struct nd_namespace_label *) label;
300}
301
302#define for_each_clear_bit_le(bit, addr, size) \
303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
304 (bit) < (size); \
305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
306
307/**
308 * preamble_index - common variable initialization for nd_label_* routines
309 * @ndd: dimm container for the relevant label set
310 * @idx: namespace_index index
311 * @nsindex_out: on return set to the currently active namespace index
312 * @free: on return set to the free label bitmap in the index
313 * @nslot: on return set to the number of slots in the label space
314 */
315static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
316 struct nd_namespace_index **nsindex_out,
317 unsigned long **free, u32 *nslot)
318{
319 struct nd_namespace_index *nsindex;
320
321 nsindex = to_namespace_index(ndd, idx);
322 if (nsindex == NULL)
323 return false;
324
325 *free = (unsigned long *) nsindex->free;
326 *nslot = __le32_to_cpu(nsindex->nslot);
327 *nsindex_out = nsindex;
328
329 return true;
330}
331
332char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
333 u32 flags)
334{
335 if (!label_id || !uuid)
336 return NULL;
337 snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid);
338 return label_id->id;
339}
340
341static bool preamble_current(struct nvdimm_drvdata *ndd,
342 struct nd_namespace_index **nsindex,
343 unsigned long **free, u32 *nslot)
344{
345 return preamble_index(ndd, ndd->ns_current, nsindex,
346 free, nslot);
347}
348
349static bool preamble_next(struct nvdimm_drvdata *ndd,
350 struct nd_namespace_index **nsindex,
351 unsigned long **free, u32 *nslot)
352{
353 return preamble_index(ndd, ndd->ns_next, nsindex,
354 free, nslot);
355}
356
357static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
358 struct nd_namespace_label *nd_label)
359{
360 u64 sum, sum_save;
361
362 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
363 return true;
364
365 sum_save = nsl_get_checksum(ndd, nd_label);
366 nsl_set_checksum(ndd, nd_label, 0);
367 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
368 nsl_set_checksum(ndd, nd_label, sum_save);
369 return sum == sum_save;
370}
371
372static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
373 struct nd_namespace_label *nd_label)
374{
375 u64 sum;
376
377 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
378 return;
379 nsl_set_checksum(ndd, nd_label, 0);
380 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
381 nsl_set_checksum(ndd, nd_label, sum);
382}
383
384static bool slot_valid(struct nvdimm_drvdata *ndd,
385 struct nd_namespace_label *nd_label, u32 slot)
386{
387 bool valid;
388
389 /* check that we are written where we expect to be written */
390 if (slot != nsl_get_slot(ndd, nd_label))
391 return false;
392 valid = nsl_validate_checksum(ndd, nd_label);
393 if (!valid)
394 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
395 return valid;
396}
397
398int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
399{
400 struct nd_namespace_index *nsindex;
401 unsigned long *free;
402 u32 nslot, slot;
403
404 if (!preamble_current(ndd, &nsindex, &free, &nslot))
405 return 0; /* no label, nothing to reserve */
406
407 for_each_clear_bit_le(slot, free, nslot) {
408 struct nd_namespace_label *nd_label;
409 struct nd_region *nd_region = NULL;
410 struct nd_label_id label_id;
411 struct resource *res;
412 uuid_t label_uuid;
413 u32 flags;
414
415 nd_label = to_label(ndd, slot);
416
417 if (!slot_valid(ndd, nd_label, slot))
418 continue;
419
420 nsl_get_uuid(ndd, nd_label, &label_uuid);
421 flags = nsl_get_flags(ndd, nd_label);
422 nd_label_gen_id(&label_id, &label_uuid, flags);
423 res = nvdimm_allocate_dpa(ndd, &label_id,
424 nsl_get_dpa(ndd, nd_label),
425 nsl_get_rawsize(ndd, nd_label));
426 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
427 if (!res)
428 return -EBUSY;
429 }
430
431 return 0;
432}
433
434int nd_label_data_init(struct nvdimm_drvdata *ndd)
435{
436 size_t config_size, read_size, max_xfer, offset;
437 struct nd_namespace_index *nsindex;
438 unsigned int i;
439 int rc = 0;
440 u32 nslot;
441
442 if (ndd->data)
443 return 0;
444
445 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
446 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
447 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
448 return -ENXIO;
449 }
450
451 /*
452 * We need to determine the maximum index area as this is the section
453 * we must read and validate before we can start processing labels.
454 *
455 * If the area is too small to contain the two indexes and 2 labels
456 * then we abort.
457 *
458 * Start at a label size of 128 as this should result in the largest
459 * possible namespace index size.
460 */
461 ndd->nslabel_size = 128;
462 read_size = sizeof_namespace_index(ndd) * 2;
463 if (!read_size)
464 return -ENXIO;
465
466 /* Allocate config data */
467 config_size = ndd->nsarea.config_size;
468 ndd->data = kvzalloc(config_size, GFP_KERNEL);
469 if (!ndd->data)
470 return -ENOMEM;
471
472 /*
473 * We want to guarantee as few reads as possible while conserving
474 * memory. To do that we figure out how much unused space will be left
475 * in the last read, divide that by the total number of reads it is
476 * going to take given our maximum transfer size, and then reduce our
477 * maximum transfer size based on that result.
478 */
479 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
480 if (read_size < max_xfer) {
481 /* trim waste */
482 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
483 DIV_ROUND_UP(config_size, max_xfer);
484 /* make certain we read indexes in exactly 1 read */
485 if (max_xfer < read_size)
486 max_xfer = read_size;
487 }
488
489 /* Make our initial read size a multiple of max_xfer size */
490 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
491 config_size);
492
493 /* Read the index data */
494 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
495 if (rc)
496 goto out_err;
497
498 /* Validate index data, if not valid assume all labels are invalid */
499 ndd->ns_current = nd_label_validate(ndd);
500 if (ndd->ns_current < 0)
501 return 0;
502
503 /* Record our index values */
504 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
505
506 /* Copy "current" index on top of the "next" index */
507 nsindex = to_current_namespace_index(ndd);
508 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
509
510 /* Determine starting offset for label data */
511 offset = __le64_to_cpu(nsindex->labeloff);
512 nslot = __le32_to_cpu(nsindex->nslot);
513
514 /* Loop through the free list pulling in any active labels */
515 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
516 size_t label_read_size;
517
518 /* zero out the unused labels */
519 if (test_bit_le(i, nsindex->free)) {
520 memset(ndd->data + offset, 0, ndd->nslabel_size);
521 continue;
522 }
523
524 /* if we already read past here then just continue */
525 if (offset + ndd->nslabel_size <= read_size)
526 continue;
527
528 /* if we haven't read in a while reset our read_size offset */
529 if (read_size < offset)
530 read_size = offset;
531
532 /* determine how much more will be read after this next call. */
533 label_read_size = offset + ndd->nslabel_size - read_size;
534 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
535 max_xfer;
536
537 /* truncate last read if needed */
538 if (read_size + label_read_size > config_size)
539 label_read_size = config_size - read_size;
540
541 /* Read the label data */
542 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
543 read_size, label_read_size);
544 if (rc)
545 goto out_err;
546
547 /* push read_size to next read offset */
548 read_size += label_read_size;
549 }
550
551 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
552out_err:
553 return rc;
554}
555
556int nd_label_active_count(struct nvdimm_drvdata *ndd)
557{
558 struct nd_namespace_index *nsindex;
559 unsigned long *free;
560 u32 nslot, slot;
561 int count = 0;
562
563 if (!preamble_current(ndd, &nsindex, &free, &nslot))
564 return 0;
565
566 for_each_clear_bit_le(slot, free, nslot) {
567 struct nd_namespace_label *nd_label;
568
569 nd_label = to_label(ndd, slot);
570
571 if (!slot_valid(ndd, nd_label, slot)) {
572 u32 label_slot = nsl_get_slot(ndd, nd_label);
573 u64 size = nsl_get_rawsize(ndd, nd_label);
574 u64 dpa = nsl_get_dpa(ndd, nd_label);
575
576 dev_dbg(ndd->dev,
577 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
578 slot, label_slot, dpa, size);
579 continue;
580 }
581 count++;
582 }
583 return count;
584}
585
586struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
587{
588 struct nd_namespace_index *nsindex;
589 unsigned long *free;
590 u32 nslot, slot;
591
592 if (!preamble_current(ndd, &nsindex, &free, &nslot))
593 return NULL;
594
595 for_each_clear_bit_le(slot, free, nslot) {
596 struct nd_namespace_label *nd_label;
597
598 nd_label = to_label(ndd, slot);
599 if (!slot_valid(ndd, nd_label, slot))
600 continue;
601
602 if (n-- == 0)
603 return to_label(ndd, slot);
604 }
605
606 return NULL;
607}
608
609u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
610{
611 struct nd_namespace_index *nsindex;
612 unsigned long *free;
613 u32 nslot, slot;
614
615 if (!preamble_next(ndd, &nsindex, &free, &nslot))
616 return UINT_MAX;
617
618 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
619
620 slot = find_next_bit_le(free, nslot, 0);
621 if (slot == nslot)
622 return UINT_MAX;
623
624 clear_bit_le(slot, free);
625
626 return slot;
627}
628
629bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
630{
631 struct nd_namespace_index *nsindex;
632 unsigned long *free;
633 u32 nslot;
634
635 if (!preamble_next(ndd, &nsindex, &free, &nslot))
636 return false;
637
638 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
639
640 if (slot < nslot)
641 return !test_and_set_bit_le(slot, free);
642 return false;
643}
644
645u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
646{
647 struct nd_namespace_index *nsindex;
648 unsigned long *free;
649 u32 nslot;
650
651 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
652
653 if (!preamble_next(ndd, &nsindex, &free, &nslot))
654 return nvdimm_num_label_slots(ndd);
655
656 return bitmap_weight(free, nslot);
657}
658
659static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
660 unsigned long flags)
661{
662 struct nd_namespace_index *nsindex;
663 unsigned long offset;
664 u64 checksum;
665 u32 nslot;
666 int rc;
667
668 nsindex = to_namespace_index(ndd, index);
669 if (flags & ND_NSINDEX_INIT)
670 nslot = nvdimm_num_label_slots(ndd);
671 else
672 nslot = __le32_to_cpu(nsindex->nslot);
673
674 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
675 memset(&nsindex->flags, 0, 3);
676 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
677 nsindex->seq = __cpu_to_le32(seq);
678 offset = (unsigned long) nsindex
679 - (unsigned long) to_namespace_index(ndd, 0);
680 nsindex->myoff = __cpu_to_le64(offset);
681 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
682 offset = (unsigned long) to_namespace_index(ndd,
683 nd_label_next_nsindex(index))
684 - (unsigned long) to_namespace_index(ndd, 0);
685 nsindex->otheroff = __cpu_to_le64(offset);
686 offset = (unsigned long) nd_label_base(ndd)
687 - (unsigned long) to_namespace_index(ndd, 0);
688 nsindex->labeloff = __cpu_to_le64(offset);
689 nsindex->nslot = __cpu_to_le32(nslot);
690 nsindex->major = __cpu_to_le16(1);
691 if (sizeof_namespace_label(ndd) < 256)
692 nsindex->minor = __cpu_to_le16(1);
693 else
694 nsindex->minor = __cpu_to_le16(2);
695 nsindex->checksum = __cpu_to_le64(0);
696 if (flags & ND_NSINDEX_INIT) {
697 unsigned long *free = (unsigned long *) nsindex->free;
698 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
699 int last_bits, i;
700
701 memset(nsindex->free, 0xff, nfree / 8);
702 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
703 clear_bit_le(nslot + i, free);
704 }
705 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
706 nsindex->checksum = __cpu_to_le64(checksum);
707 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
708 nsindex, sizeof_namespace_index(ndd));
709 if (rc < 0)
710 return rc;
711
712 if (flags & ND_NSINDEX_INIT)
713 return 0;
714
715 /* copy the index we just wrote to the new 'next' */
716 WARN_ON(index != ndd->ns_next);
717 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
718 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
719 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
720 WARN_ON(ndd->ns_current == ndd->ns_next);
721
722 return 0;
723}
724
725static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
726 struct nd_namespace_label *nd_label)
727{
728 return (unsigned long) nd_label
729 - (unsigned long) to_namespace_index(ndd, 0);
730}
731
732static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid)
733{
734 if (guid_equal(guid, &nvdimm_btt_guid))
735 return NVDIMM_CCLASS_BTT;
736 else if (guid_equal(guid, &nvdimm_btt2_guid))
737 return NVDIMM_CCLASS_BTT2;
738 else if (guid_equal(guid, &nvdimm_pfn_guid))
739 return NVDIMM_CCLASS_PFN;
740 else if (guid_equal(guid, &nvdimm_dax_guid))
741 return NVDIMM_CCLASS_DAX;
742 else if (guid_equal(guid, &guid_null))
743 return NVDIMM_CCLASS_NONE;
744
745 return NVDIMM_CCLASS_UNKNOWN;
746}
747
748/* CXL labels store UUIDs instead of GUIDs for the same data */
749static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid)
750{
751 if (uuid_equal(uuid, &nvdimm_btt_uuid))
752 return NVDIMM_CCLASS_BTT;
753 else if (uuid_equal(uuid, &nvdimm_btt2_uuid))
754 return NVDIMM_CCLASS_BTT2;
755 else if (uuid_equal(uuid, &nvdimm_pfn_uuid))
756 return NVDIMM_CCLASS_PFN;
757 else if (uuid_equal(uuid, &nvdimm_dax_uuid))
758 return NVDIMM_CCLASS_DAX;
759 else if (uuid_equal(uuid, &uuid_null))
760 return NVDIMM_CCLASS_NONE;
761
762 return NVDIMM_CCLASS_UNKNOWN;
763}
764
765static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
766 guid_t *target)
767{
768 if (claim_class == NVDIMM_CCLASS_BTT)
769 return &nvdimm_btt_guid;
770 else if (claim_class == NVDIMM_CCLASS_BTT2)
771 return &nvdimm_btt2_guid;
772 else if (claim_class == NVDIMM_CCLASS_PFN)
773 return &nvdimm_pfn_guid;
774 else if (claim_class == NVDIMM_CCLASS_DAX)
775 return &nvdimm_dax_guid;
776 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
777 /*
778 * If we're modifying a namespace for which we don't
779 * know the claim_class, don't touch the existing guid.
780 */
781 return target;
782 } else
783 return &guid_null;
784}
785
786/* CXL labels store UUIDs instead of GUIDs for the same data */
787static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class,
788 uuid_t *target)
789{
790 if (claim_class == NVDIMM_CCLASS_BTT)
791 return &nvdimm_btt_uuid;
792 else if (claim_class == NVDIMM_CCLASS_BTT2)
793 return &nvdimm_btt2_uuid;
794 else if (claim_class == NVDIMM_CCLASS_PFN)
795 return &nvdimm_pfn_uuid;
796 else if (claim_class == NVDIMM_CCLASS_DAX)
797 return &nvdimm_dax_uuid;
798 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
799 /*
800 * If we're modifying a namespace for which we don't
801 * know the claim_class, don't touch the existing uuid.
802 */
803 return target;
804 } else
805 return &uuid_null;
806}
807
808static void reap_victim(struct nd_mapping *nd_mapping,
809 struct nd_label_ent *victim)
810{
811 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
812 u32 slot = to_slot(ndd, victim->label);
813
814 dev_dbg(ndd->dev, "free: %d\n", slot);
815 nd_label_free_slot(ndd, slot);
816 victim->label = NULL;
817}
818
819static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
820 struct nd_namespace_label *nd_label, guid_t *guid)
821{
822 if (efi_namespace_label_has(ndd, type_guid))
823 guid_copy(&nd_label->efi.type_guid, guid);
824}
825
826bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
827 struct nd_namespace_label *nd_label, guid_t *guid)
828{
829 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
830 return true;
831 if (!guid_equal(&nd_label->efi.type_guid, guid)) {
832 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
833 &nd_label->efi.type_guid);
834 return false;
835 }
836 return true;
837}
838
839static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
840 struct nd_namespace_label *nd_label,
841 enum nvdimm_claim_class claim_class)
842{
843 if (ndd->cxl) {
844 uuid_t uuid;
845
846 import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
847 export_uuid(nd_label->cxl.abstraction_uuid,
848 to_abstraction_uuid(claim_class, &uuid));
849 return;
850 }
851
852 if (!efi_namespace_label_has(ndd, abstraction_guid))
853 return;
854 guid_copy(&nd_label->efi.abstraction_guid,
855 to_abstraction_guid(claim_class,
856 &nd_label->efi.abstraction_guid));
857}
858
859enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
860 struct nd_namespace_label *nd_label)
861{
862 if (ndd->cxl) {
863 uuid_t uuid;
864
865 import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
866 return uuid_to_nvdimm_cclass(&uuid);
867 }
868 if (!efi_namespace_label_has(ndd, abstraction_guid))
869 return NVDIMM_CCLASS_NONE;
870 return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid);
871}
872
873static int __pmem_label_update(struct nd_region *nd_region,
874 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
875 int pos, unsigned long flags)
876{
877 struct nd_namespace_common *ndns = &nspm->nsio.common;
878 struct nd_interleave_set *nd_set = nd_region->nd_set;
879 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
880 struct nd_namespace_label *nd_label;
881 struct nd_namespace_index *nsindex;
882 struct nd_label_ent *label_ent;
883 struct nd_label_id label_id;
884 struct resource *res;
885 unsigned long *free;
886 u32 nslot, slot;
887 size_t offset;
888 u64 cookie;
889 int rc;
890
891 if (!preamble_next(ndd, &nsindex, &free, &nslot))
892 return -ENXIO;
893
894 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
895 nd_label_gen_id(&label_id, nspm->uuid, 0);
896 for_each_dpa_resource(ndd, res)
897 if (strcmp(res->name, label_id.id) == 0)
898 break;
899
900 if (!res) {
901 WARN_ON_ONCE(1);
902 return -ENXIO;
903 }
904
905 /* allocate and write the label to the staging (next) index */
906 slot = nd_label_alloc_slot(ndd);
907 if (slot == UINT_MAX)
908 return -ENXIO;
909 dev_dbg(ndd->dev, "allocated: %d\n", slot);
910
911 nd_label = to_label(ndd, slot);
912 memset(nd_label, 0, sizeof_namespace_label(ndd));
913 nsl_set_uuid(ndd, nd_label, nspm->uuid);
914 nsl_set_name(ndd, nd_label, nspm->alt_name);
915 nsl_set_flags(ndd, nd_label, flags);
916 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
917 nsl_set_nrange(ndd, nd_label, 1);
918 nsl_set_position(ndd, nd_label, pos);
919 nsl_set_isetcookie(ndd, nd_label, cookie);
920 nsl_set_rawsize(ndd, nd_label, resource_size(res));
921 nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
922 nsl_set_dpa(ndd, nd_label, res->start);
923 nsl_set_slot(ndd, nd_label, slot);
924 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
925 nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
926 nsl_calculate_checksum(ndd, nd_label);
927 nd_dbg_dpa(nd_region, ndd, res, "\n");
928
929 /* update label */
930 offset = nd_label_offset(ndd, nd_label);
931 rc = nvdimm_set_config_data(ndd, offset, nd_label,
932 sizeof_namespace_label(ndd));
933 if (rc < 0)
934 return rc;
935
936 /* Garbage collect the previous label */
937 mutex_lock(&nd_mapping->lock);
938 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
939 if (!label_ent->label)
940 continue;
941 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
942 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
943 reap_victim(nd_mapping, label_ent);
944 }
945
946 /* update index */
947 rc = nd_label_write_index(ndd, ndd->ns_next,
948 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
949 if (rc == 0) {
950 list_for_each_entry(label_ent, &nd_mapping->labels, list)
951 if (!label_ent->label) {
952 label_ent->label = nd_label;
953 nd_label = NULL;
954 break;
955 }
956 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
957 "failed to track label: %d\n",
958 to_slot(ndd, nd_label));
959 if (nd_label)
960 rc = -ENXIO;
961 }
962 mutex_unlock(&nd_mapping->lock);
963
964 return rc;
965}
966
967static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
968{
969 int i, old_num_labels = 0;
970 struct nd_label_ent *label_ent;
971 struct nd_namespace_index *nsindex;
972 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
973
974 mutex_lock(&nd_mapping->lock);
975 list_for_each_entry(label_ent, &nd_mapping->labels, list)
976 old_num_labels++;
977 mutex_unlock(&nd_mapping->lock);
978
979 /*
980 * We need to preserve all the old labels for the mapping so
981 * they can be garbage collected after writing the new labels.
982 */
983 for (i = old_num_labels; i < num_labels; i++) {
984 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
985 if (!label_ent)
986 return -ENOMEM;
987 mutex_lock(&nd_mapping->lock);
988 list_add_tail(&label_ent->list, &nd_mapping->labels);
989 mutex_unlock(&nd_mapping->lock);
990 }
991
992 if (ndd->ns_current == -1 || ndd->ns_next == -1)
993 /* pass */;
994 else
995 return max(num_labels, old_num_labels);
996
997 nsindex = to_namespace_index(ndd, 0);
998 memset(nsindex, 0, ndd->nsarea.config_size);
999 for (i = 0; i < 2; i++) {
1000 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1001
1002 if (rc)
1003 return rc;
1004 }
1005 ndd->ns_next = 1;
1006 ndd->ns_current = 0;
1007
1008 return max(num_labels, old_num_labels);
1009}
1010
1011static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid)
1012{
1013 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1014 struct nd_label_ent *label_ent, *e;
1015 struct nd_namespace_index *nsindex;
1016 unsigned long *free;
1017 LIST_HEAD(list);
1018 u32 nslot, slot;
1019 int active = 0;
1020
1021 if (!uuid)
1022 return 0;
1023
1024 /* no index || no labels == nothing to delete */
1025 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1026 return 0;
1027
1028 mutex_lock(&nd_mapping->lock);
1029 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1030 struct nd_namespace_label *nd_label = label_ent->label;
1031
1032 if (!nd_label)
1033 continue;
1034 active++;
1035 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1036 continue;
1037 active--;
1038 slot = to_slot(ndd, nd_label);
1039 nd_label_free_slot(ndd, slot);
1040 dev_dbg(ndd->dev, "free: %d\n", slot);
1041 list_move_tail(&label_ent->list, &list);
1042 label_ent->label = NULL;
1043 }
1044 list_splice_tail_init(&list, &nd_mapping->labels);
1045
1046 if (active == 0) {
1047 nd_mapping_free_labels(nd_mapping);
1048 dev_dbg(ndd->dev, "no more active labels\n");
1049 }
1050 mutex_unlock(&nd_mapping->lock);
1051
1052 return nd_label_write_index(ndd, ndd->ns_next,
1053 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1054}
1055
1056int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1057 struct nd_namespace_pmem *nspm, resource_size_t size)
1058{
1059 int i, rc;
1060
1061 for (i = 0; i < nd_region->ndr_mappings; i++) {
1062 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1063 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1064 struct resource *res;
1065 int count = 0;
1066
1067 if (size == 0) {
1068 rc = del_labels(nd_mapping, nspm->uuid);
1069 if (rc)
1070 return rc;
1071 continue;
1072 }
1073
1074 for_each_dpa_resource(ndd, res)
1075 if (strncmp(res->name, "pmem", 4) == 0)
1076 count++;
1077 WARN_ON_ONCE(!count);
1078
1079 rc = init_labels(nd_mapping, count);
1080 if (rc < 0)
1081 return rc;
1082
1083 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1084 NSLABEL_FLAG_UPDATING);
1085 if (rc)
1086 return rc;
1087 }
1088
1089 if (size == 0)
1090 return 0;
1091
1092 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1093 for (i = 0; i < nd_region->ndr_mappings; i++) {
1094 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1095
1096 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1097 if (rc)
1098 return rc;
1099 }
1100
1101 return 0;
1102}
1103
1104int __init nd_label_init(void)
1105{
1106 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1107 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1108 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1109 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1110
1111 WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid));
1112 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid));
1113 WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid));
1114 WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid));
1115
1116 WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid));
1117 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid));
1118
1119 return 0;
1120}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/device.h>
6#include <linux/ndctl.h>
7#include <linux/uuid.h>
8#include <linux/slab.h>
9#include <linux/io.h>
10#include <linux/nd.h>
11#include "nd-core.h"
12#include "label.h"
13#include "nd.h"
14
15static guid_t nvdimm_btt_guid;
16static guid_t nvdimm_btt2_guid;
17static guid_t nvdimm_pfn_guid;
18static guid_t nvdimm_dax_guid;
19
20static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
21
22static u32 best_seq(u32 a, u32 b)
23{
24 a &= NSINDEX_SEQ_MASK;
25 b &= NSINDEX_SEQ_MASK;
26
27 if (a == 0 || a == b)
28 return b;
29 else if (b == 0)
30 return a;
31 else if (nd_inc_seq(a) == b)
32 return b;
33 else
34 return a;
35}
36
37unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
38{
39 return ndd->nslabel_size;
40}
41
42static size_t __sizeof_namespace_index(u32 nslot)
43{
44 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
45 NSINDEX_ALIGN);
46}
47
48static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
49 size_t index_size)
50{
51 return (ndd->nsarea.config_size - index_size * 2) /
52 sizeof_namespace_label(ndd);
53}
54
55int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
56{
57 u32 tmp_nslot, n;
58
59 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
60 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
61
62 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
63}
64
65size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
66{
67 u32 nslot, space, size;
68
69 /*
70 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
71 * enough to hold 2 index blocks and 2 labels. The minimum index
72 * block size is 256 bytes. The label size is 128 for namespaces
73 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
74 */
75 nslot = nvdimm_num_label_slots(ndd);
76 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
77 size = __sizeof_namespace_index(nslot) * 2;
78 if (size <= space && nslot >= 2)
79 return size / 2;
80
81 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
82 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
83 return 0;
84}
85
86static int __nd_label_validate(struct nvdimm_drvdata *ndd)
87{
88 /*
89 * On media label format consists of two index blocks followed
90 * by an array of labels. None of these structures are ever
91 * updated in place. A sequence number tracks the current
92 * active index and the next one to write, while labels are
93 * written to free slots.
94 *
95 * +------------+
96 * | |
97 * | nsindex0 |
98 * | |
99 * +------------+
100 * | |
101 * | nsindex1 |
102 * | |
103 * +------------+
104 * | label0 |
105 * +------------+
106 * | label1 |
107 * +------------+
108 * | |
109 * ....nslot...
110 * | |
111 * +------------+
112 * | labelN |
113 * +------------+
114 */
115 struct nd_namespace_index *nsindex[] = {
116 to_namespace_index(ndd, 0),
117 to_namespace_index(ndd, 1),
118 };
119 const int num_index = ARRAY_SIZE(nsindex);
120 struct device *dev = ndd->dev;
121 bool valid[2] = { 0 };
122 int i, num_valid = 0;
123 u32 seq;
124
125 for (i = 0; i < num_index; i++) {
126 u32 nslot;
127 u8 sig[NSINDEX_SIG_LEN];
128 u64 sum_save, sum, size;
129 unsigned int version, labelsize;
130
131 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
132 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
133 dev_dbg(dev, "nsindex%d signature invalid\n", i);
134 continue;
135 }
136
137 /* label sizes larger than 128 arrived with v1.2 */
138 version = __le16_to_cpu(nsindex[i]->major) * 100
139 + __le16_to_cpu(nsindex[i]->minor);
140 if (version >= 102)
141 labelsize = 1 << (7 + nsindex[i]->labelsize);
142 else
143 labelsize = 128;
144
145 if (labelsize != sizeof_namespace_label(ndd)) {
146 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
147 i, nsindex[i]->labelsize);
148 continue;
149 }
150
151 sum_save = __le64_to_cpu(nsindex[i]->checksum);
152 nsindex[i]->checksum = __cpu_to_le64(0);
153 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
154 nsindex[i]->checksum = __cpu_to_le64(sum_save);
155 if (sum != sum_save) {
156 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
157 continue;
158 }
159
160 seq = __le32_to_cpu(nsindex[i]->seq);
161 if ((seq & NSINDEX_SEQ_MASK) == 0) {
162 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
163 continue;
164 }
165
166 /* sanity check the index against expected values */
167 if (__le64_to_cpu(nsindex[i]->myoff)
168 != i * sizeof_namespace_index(ndd)) {
169 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
170 i, (unsigned long long)
171 __le64_to_cpu(nsindex[i]->myoff));
172 continue;
173 }
174 if (__le64_to_cpu(nsindex[i]->otheroff)
175 != (!i) * sizeof_namespace_index(ndd)) {
176 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
177 i, (unsigned long long)
178 __le64_to_cpu(nsindex[i]->otheroff));
179 continue;
180 }
181 if (__le64_to_cpu(nsindex[i]->labeloff)
182 != 2 * sizeof_namespace_index(ndd)) {
183 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
184 i, (unsigned long long)
185 __le64_to_cpu(nsindex[i]->labeloff));
186 continue;
187 }
188
189 size = __le64_to_cpu(nsindex[i]->mysize);
190 if (size > sizeof_namespace_index(ndd)
191 || size < sizeof(struct nd_namespace_index)) {
192 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
193 continue;
194 }
195
196 nslot = __le32_to_cpu(nsindex[i]->nslot);
197 if (nslot * sizeof_namespace_label(ndd)
198 + 2 * sizeof_namespace_index(ndd)
199 > ndd->nsarea.config_size) {
200 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
201 i, nslot, ndd->nsarea.config_size);
202 continue;
203 }
204 valid[i] = true;
205 num_valid++;
206 }
207
208 switch (num_valid) {
209 case 0:
210 break;
211 case 1:
212 for (i = 0; i < num_index; i++)
213 if (valid[i])
214 return i;
215 /* can't have num_valid > 0 but valid[] = { false, false } */
216 WARN_ON(1);
217 break;
218 default:
219 /* pick the best index... */
220 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
221 __le32_to_cpu(nsindex[1]->seq));
222 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
223 return 1;
224 else
225 return 0;
226 break;
227 }
228
229 return -1;
230}
231
232static int nd_label_validate(struct nvdimm_drvdata *ndd)
233{
234 /*
235 * In order to probe for and validate namespace index blocks we
236 * need to know the size of the labels, and we can't trust the
237 * size of the labels until we validate the index blocks.
238 * Resolve this dependency loop by probing for known label
239 * sizes, but default to v1.2 256-byte namespace labels if
240 * discovery fails.
241 */
242 int label_size[] = { 128, 256 };
243 int i, rc;
244
245 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
246 ndd->nslabel_size = label_size[i];
247 rc = __nd_label_validate(ndd);
248 if (rc >= 0)
249 return rc;
250 }
251
252 return -1;
253}
254
255static void nd_label_copy(struct nvdimm_drvdata *ndd,
256 struct nd_namespace_index *dst,
257 struct nd_namespace_index *src)
258{
259 /* just exit if either destination or source is NULL */
260 if (!dst || !src)
261 return;
262
263 memcpy(dst, src, sizeof_namespace_index(ndd));
264}
265
266static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
267{
268 void *base = to_namespace_index(ndd, 0);
269
270 return base + 2 * sizeof_namespace_index(ndd);
271}
272
273static int to_slot(struct nvdimm_drvdata *ndd,
274 struct nd_namespace_label *nd_label)
275{
276 unsigned long label, base;
277
278 label = (unsigned long) nd_label;
279 base = (unsigned long) nd_label_base(ndd);
280
281 return (label - base) / sizeof_namespace_label(ndd);
282}
283
284static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
285{
286 unsigned long label, base;
287
288 base = (unsigned long) nd_label_base(ndd);
289 label = base + sizeof_namespace_label(ndd) * slot;
290
291 return (struct nd_namespace_label *) label;
292}
293
294#define for_each_clear_bit_le(bit, addr, size) \
295 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
296 (bit) < (size); \
297 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
298
299/**
300 * preamble_index - common variable initialization for nd_label_* routines
301 * @ndd: dimm container for the relevant label set
302 * @idx: namespace_index index
303 * @nsindex_out: on return set to the currently active namespace index
304 * @free: on return set to the free label bitmap in the index
305 * @nslot: on return set to the number of slots in the label space
306 */
307static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
308 struct nd_namespace_index **nsindex_out,
309 unsigned long **free, u32 *nslot)
310{
311 struct nd_namespace_index *nsindex;
312
313 nsindex = to_namespace_index(ndd, idx);
314 if (nsindex == NULL)
315 return false;
316
317 *free = (unsigned long *) nsindex->free;
318 *nslot = __le32_to_cpu(nsindex->nslot);
319 *nsindex_out = nsindex;
320
321 return true;
322}
323
324char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
325{
326 if (!label_id || !uuid)
327 return NULL;
328 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
329 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
330 return label_id->id;
331}
332
333static bool preamble_current(struct nvdimm_drvdata *ndd,
334 struct nd_namespace_index **nsindex,
335 unsigned long **free, u32 *nslot)
336{
337 return preamble_index(ndd, ndd->ns_current, nsindex,
338 free, nslot);
339}
340
341static bool preamble_next(struct nvdimm_drvdata *ndd,
342 struct nd_namespace_index **nsindex,
343 unsigned long **free, u32 *nslot)
344{
345 return preamble_index(ndd, ndd->ns_next, nsindex,
346 free, nslot);
347}
348
349static bool slot_valid(struct nvdimm_drvdata *ndd,
350 struct nd_namespace_label *nd_label, u32 slot)
351{
352 /* check that we are written where we expect to be written */
353 if (slot != __le32_to_cpu(nd_label->slot))
354 return false;
355
356 /* check checksum */
357 if (namespace_label_has(ndd, checksum)) {
358 u64 sum, sum_save;
359
360 sum_save = __le64_to_cpu(nd_label->checksum);
361 nd_label->checksum = __cpu_to_le64(0);
362 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
363 nd_label->checksum = __cpu_to_le64(sum_save);
364 if (sum != sum_save) {
365 dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
366 slot, sum);
367 return false;
368 }
369 }
370
371 return true;
372}
373
374int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
375{
376 struct nd_namespace_index *nsindex;
377 unsigned long *free;
378 u32 nslot, slot;
379
380 if (!preamble_current(ndd, &nsindex, &free, &nslot))
381 return 0; /* no label, nothing to reserve */
382
383 for_each_clear_bit_le(slot, free, nslot) {
384 struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
385 struct nd_namespace_label *nd_label;
386 struct nd_region *nd_region = NULL;
387 u8 label_uuid[NSLABEL_UUID_LEN];
388 struct nd_label_id label_id;
389 struct resource *res;
390 u32 flags;
391
392 nd_label = to_label(ndd, slot);
393
394 if (!slot_valid(ndd, nd_label, slot))
395 continue;
396
397 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
398 flags = __le32_to_cpu(nd_label->flags);
399 if (test_bit(NDD_NOBLK, &nvdimm->flags))
400 flags &= ~NSLABEL_FLAG_LOCAL;
401 nd_label_gen_id(&label_id, label_uuid, flags);
402 res = nvdimm_allocate_dpa(ndd, &label_id,
403 __le64_to_cpu(nd_label->dpa),
404 __le64_to_cpu(nd_label->rawsize));
405 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
406 if (!res)
407 return -EBUSY;
408 }
409
410 return 0;
411}
412
413int nd_label_data_init(struct nvdimm_drvdata *ndd)
414{
415 size_t config_size, read_size, max_xfer, offset;
416 struct nd_namespace_index *nsindex;
417 unsigned int i;
418 int rc = 0;
419 u32 nslot;
420
421 if (ndd->data)
422 return 0;
423
424 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
425 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
426 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
427 return -ENXIO;
428 }
429
430 /*
431 * We need to determine the maximum index area as this is the section
432 * we must read and validate before we can start processing labels.
433 *
434 * If the area is too small to contain the two indexes and 2 labels
435 * then we abort.
436 *
437 * Start at a label size of 128 as this should result in the largest
438 * possible namespace index size.
439 */
440 ndd->nslabel_size = 128;
441 read_size = sizeof_namespace_index(ndd) * 2;
442 if (!read_size)
443 return -ENXIO;
444
445 /* Allocate config data */
446 config_size = ndd->nsarea.config_size;
447 ndd->data = kvzalloc(config_size, GFP_KERNEL);
448 if (!ndd->data)
449 return -ENOMEM;
450
451 /*
452 * We want to guarantee as few reads as possible while conserving
453 * memory. To do that we figure out how much unused space will be left
454 * in the last read, divide that by the total number of reads it is
455 * going to take given our maximum transfer size, and then reduce our
456 * maximum transfer size based on that result.
457 */
458 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
459 if (read_size < max_xfer) {
460 /* trim waste */
461 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
462 DIV_ROUND_UP(config_size, max_xfer);
463 /* make certain we read indexes in exactly 1 read */
464 if (max_xfer < read_size)
465 max_xfer = read_size;
466 }
467
468 /* Make our initial read size a multiple of max_xfer size */
469 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
470 config_size);
471
472 /* Read the index data */
473 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
474 if (rc)
475 goto out_err;
476
477 /* Validate index data, if not valid assume all labels are invalid */
478 ndd->ns_current = nd_label_validate(ndd);
479 if (ndd->ns_current < 0)
480 return 0;
481
482 /* Record our index values */
483 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
484
485 /* Copy "current" index on top of the "next" index */
486 nsindex = to_current_namespace_index(ndd);
487 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
488
489 /* Determine starting offset for label data */
490 offset = __le64_to_cpu(nsindex->labeloff);
491 nslot = __le32_to_cpu(nsindex->nslot);
492
493 /* Loop through the free list pulling in any active labels */
494 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
495 size_t label_read_size;
496
497 /* zero out the unused labels */
498 if (test_bit_le(i, nsindex->free)) {
499 memset(ndd->data + offset, 0, ndd->nslabel_size);
500 continue;
501 }
502
503 /* if we already read past here then just continue */
504 if (offset + ndd->nslabel_size <= read_size)
505 continue;
506
507 /* if we haven't read in a while reset our read_size offset */
508 if (read_size < offset)
509 read_size = offset;
510
511 /* determine how much more will be read after this next call. */
512 label_read_size = offset + ndd->nslabel_size - read_size;
513 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
514 max_xfer;
515
516 /* truncate last read if needed */
517 if (read_size + label_read_size > config_size)
518 label_read_size = config_size - read_size;
519
520 /* Read the label data */
521 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
522 read_size, label_read_size);
523 if (rc)
524 goto out_err;
525
526 /* push read_size to next read offset */
527 read_size += label_read_size;
528 }
529
530 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
531out_err:
532 return rc;
533}
534
535int nd_label_active_count(struct nvdimm_drvdata *ndd)
536{
537 struct nd_namespace_index *nsindex;
538 unsigned long *free;
539 u32 nslot, slot;
540 int count = 0;
541
542 if (!preamble_current(ndd, &nsindex, &free, &nslot))
543 return 0;
544
545 for_each_clear_bit_le(slot, free, nslot) {
546 struct nd_namespace_label *nd_label;
547
548 nd_label = to_label(ndd, slot);
549
550 if (!slot_valid(ndd, nd_label, slot)) {
551 u32 label_slot = __le32_to_cpu(nd_label->slot);
552 u64 size = __le64_to_cpu(nd_label->rawsize);
553 u64 dpa = __le64_to_cpu(nd_label->dpa);
554
555 dev_dbg(ndd->dev,
556 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
557 slot, label_slot, dpa, size);
558 continue;
559 }
560 count++;
561 }
562 return count;
563}
564
565struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
566{
567 struct nd_namespace_index *nsindex;
568 unsigned long *free;
569 u32 nslot, slot;
570
571 if (!preamble_current(ndd, &nsindex, &free, &nslot))
572 return NULL;
573
574 for_each_clear_bit_le(slot, free, nslot) {
575 struct nd_namespace_label *nd_label;
576
577 nd_label = to_label(ndd, slot);
578 if (!slot_valid(ndd, nd_label, slot))
579 continue;
580
581 if (n-- == 0)
582 return to_label(ndd, slot);
583 }
584
585 return NULL;
586}
587
588u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
589{
590 struct nd_namespace_index *nsindex;
591 unsigned long *free;
592 u32 nslot, slot;
593
594 if (!preamble_next(ndd, &nsindex, &free, &nslot))
595 return UINT_MAX;
596
597 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
598
599 slot = find_next_bit_le(free, nslot, 0);
600 if (slot == nslot)
601 return UINT_MAX;
602
603 clear_bit_le(slot, free);
604
605 return slot;
606}
607
608bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
609{
610 struct nd_namespace_index *nsindex;
611 unsigned long *free;
612 u32 nslot;
613
614 if (!preamble_next(ndd, &nsindex, &free, &nslot))
615 return false;
616
617 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
618
619 if (slot < nslot)
620 return !test_and_set_bit_le(slot, free);
621 return false;
622}
623
624u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
625{
626 struct nd_namespace_index *nsindex;
627 unsigned long *free;
628 u32 nslot;
629
630 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
631
632 if (!preamble_next(ndd, &nsindex, &free, &nslot))
633 return nvdimm_num_label_slots(ndd);
634
635 return bitmap_weight(free, nslot);
636}
637
638static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
639 unsigned long flags)
640{
641 struct nd_namespace_index *nsindex;
642 unsigned long offset;
643 u64 checksum;
644 u32 nslot;
645 int rc;
646
647 nsindex = to_namespace_index(ndd, index);
648 if (flags & ND_NSINDEX_INIT)
649 nslot = nvdimm_num_label_slots(ndd);
650 else
651 nslot = __le32_to_cpu(nsindex->nslot);
652
653 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
654 memset(&nsindex->flags, 0, 3);
655 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
656 nsindex->seq = __cpu_to_le32(seq);
657 offset = (unsigned long) nsindex
658 - (unsigned long) to_namespace_index(ndd, 0);
659 nsindex->myoff = __cpu_to_le64(offset);
660 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
661 offset = (unsigned long) to_namespace_index(ndd,
662 nd_label_next_nsindex(index))
663 - (unsigned long) to_namespace_index(ndd, 0);
664 nsindex->otheroff = __cpu_to_le64(offset);
665 offset = (unsigned long) nd_label_base(ndd)
666 - (unsigned long) to_namespace_index(ndd, 0);
667 nsindex->labeloff = __cpu_to_le64(offset);
668 nsindex->nslot = __cpu_to_le32(nslot);
669 nsindex->major = __cpu_to_le16(1);
670 if (sizeof_namespace_label(ndd) < 256)
671 nsindex->minor = __cpu_to_le16(1);
672 else
673 nsindex->minor = __cpu_to_le16(2);
674 nsindex->checksum = __cpu_to_le64(0);
675 if (flags & ND_NSINDEX_INIT) {
676 unsigned long *free = (unsigned long *) nsindex->free;
677 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
678 int last_bits, i;
679
680 memset(nsindex->free, 0xff, nfree / 8);
681 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
682 clear_bit_le(nslot + i, free);
683 }
684 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
685 nsindex->checksum = __cpu_to_le64(checksum);
686 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
687 nsindex, sizeof_namespace_index(ndd));
688 if (rc < 0)
689 return rc;
690
691 if (flags & ND_NSINDEX_INIT)
692 return 0;
693
694 /* copy the index we just wrote to the new 'next' */
695 WARN_ON(index != ndd->ns_next);
696 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
697 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
698 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
699 WARN_ON(ndd->ns_current == ndd->ns_next);
700
701 return 0;
702}
703
704static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
705 struct nd_namespace_label *nd_label)
706{
707 return (unsigned long) nd_label
708 - (unsigned long) to_namespace_index(ndd, 0);
709}
710
711enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
712{
713 if (guid_equal(guid, &nvdimm_btt_guid))
714 return NVDIMM_CCLASS_BTT;
715 else if (guid_equal(guid, &nvdimm_btt2_guid))
716 return NVDIMM_CCLASS_BTT2;
717 else if (guid_equal(guid, &nvdimm_pfn_guid))
718 return NVDIMM_CCLASS_PFN;
719 else if (guid_equal(guid, &nvdimm_dax_guid))
720 return NVDIMM_CCLASS_DAX;
721 else if (guid_equal(guid, &guid_null))
722 return NVDIMM_CCLASS_NONE;
723
724 return NVDIMM_CCLASS_UNKNOWN;
725}
726
727static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
728 guid_t *target)
729{
730 if (claim_class == NVDIMM_CCLASS_BTT)
731 return &nvdimm_btt_guid;
732 else if (claim_class == NVDIMM_CCLASS_BTT2)
733 return &nvdimm_btt2_guid;
734 else if (claim_class == NVDIMM_CCLASS_PFN)
735 return &nvdimm_pfn_guid;
736 else if (claim_class == NVDIMM_CCLASS_DAX)
737 return &nvdimm_dax_guid;
738 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
739 /*
740 * If we're modifying a namespace for which we don't
741 * know the claim_class, don't touch the existing guid.
742 */
743 return target;
744 } else
745 return &guid_null;
746}
747
748static void reap_victim(struct nd_mapping *nd_mapping,
749 struct nd_label_ent *victim)
750{
751 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
752 u32 slot = to_slot(ndd, victim->label);
753
754 dev_dbg(ndd->dev, "free: %d\n", slot);
755 nd_label_free_slot(ndd, slot);
756 victim->label = NULL;
757}
758
759static int __pmem_label_update(struct nd_region *nd_region,
760 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
761 int pos, unsigned long flags)
762{
763 struct nd_namespace_common *ndns = &nspm->nsio.common;
764 struct nd_interleave_set *nd_set = nd_region->nd_set;
765 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
766 struct nd_namespace_label *nd_label;
767 struct nd_namespace_index *nsindex;
768 struct nd_label_ent *label_ent;
769 struct nd_label_id label_id;
770 struct resource *res;
771 unsigned long *free;
772 u32 nslot, slot;
773 size_t offset;
774 u64 cookie;
775 int rc;
776
777 if (!preamble_next(ndd, &nsindex, &free, &nslot))
778 return -ENXIO;
779
780 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
781 nd_label_gen_id(&label_id, nspm->uuid, 0);
782 for_each_dpa_resource(ndd, res)
783 if (strcmp(res->name, label_id.id) == 0)
784 break;
785
786 if (!res) {
787 WARN_ON_ONCE(1);
788 return -ENXIO;
789 }
790
791 /* allocate and write the label to the staging (next) index */
792 slot = nd_label_alloc_slot(ndd);
793 if (slot == UINT_MAX)
794 return -ENXIO;
795 dev_dbg(ndd->dev, "allocated: %d\n", slot);
796
797 nd_label = to_label(ndd, slot);
798 memset(nd_label, 0, sizeof_namespace_label(ndd));
799 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
800 if (nspm->alt_name)
801 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
802 nd_label->flags = __cpu_to_le32(flags);
803 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
804 nd_label->position = __cpu_to_le16(pos);
805 nd_label->isetcookie = __cpu_to_le64(cookie);
806 nd_label->rawsize = __cpu_to_le64(resource_size(res));
807 nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
808 nd_label->dpa = __cpu_to_le64(res->start);
809 nd_label->slot = __cpu_to_le32(slot);
810 if (namespace_label_has(ndd, type_guid))
811 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
812 if (namespace_label_has(ndd, abstraction_guid))
813 guid_copy(&nd_label->abstraction_guid,
814 to_abstraction_guid(ndns->claim_class,
815 &nd_label->abstraction_guid));
816 if (namespace_label_has(ndd, checksum)) {
817 u64 sum;
818
819 nd_label->checksum = __cpu_to_le64(0);
820 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
821 nd_label->checksum = __cpu_to_le64(sum);
822 }
823 nd_dbg_dpa(nd_region, ndd, res, "\n");
824
825 /* update label */
826 offset = nd_label_offset(ndd, nd_label);
827 rc = nvdimm_set_config_data(ndd, offset, nd_label,
828 sizeof_namespace_label(ndd));
829 if (rc < 0)
830 return rc;
831
832 /* Garbage collect the previous label */
833 mutex_lock(&nd_mapping->lock);
834 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
835 if (!label_ent->label)
836 continue;
837 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
838 || memcmp(nspm->uuid, label_ent->label->uuid,
839 NSLABEL_UUID_LEN) == 0)
840 reap_victim(nd_mapping, label_ent);
841 }
842
843 /* update index */
844 rc = nd_label_write_index(ndd, ndd->ns_next,
845 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
846 if (rc == 0) {
847 list_for_each_entry(label_ent, &nd_mapping->labels, list)
848 if (!label_ent->label) {
849 label_ent->label = nd_label;
850 nd_label = NULL;
851 break;
852 }
853 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
854 "failed to track label: %d\n",
855 to_slot(ndd, nd_label));
856 if (nd_label)
857 rc = -ENXIO;
858 }
859 mutex_unlock(&nd_mapping->lock);
860
861 return rc;
862}
863
864static bool is_old_resource(struct resource *res, struct resource **list, int n)
865{
866 int i;
867
868 if (res->flags & DPA_RESOURCE_ADJUSTED)
869 return false;
870 for (i = 0; i < n; i++)
871 if (res == list[i])
872 return true;
873 return false;
874}
875
876static struct resource *to_resource(struct nvdimm_drvdata *ndd,
877 struct nd_namespace_label *nd_label)
878{
879 struct resource *res;
880
881 for_each_dpa_resource(ndd, res) {
882 if (res->start != __le64_to_cpu(nd_label->dpa))
883 continue;
884 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
885 continue;
886 return res;
887 }
888
889 return NULL;
890}
891
892/*
893 * 1/ Account all the labels that can be freed after this update
894 * 2/ Allocate and write the label to the staging (next) index
895 * 3/ Record the resources in the namespace device
896 */
897static int __blk_label_update(struct nd_region *nd_region,
898 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
899 int num_labels)
900{
901 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
902 struct nd_interleave_set *nd_set = nd_region->nd_set;
903 struct nd_namespace_common *ndns = &nsblk->common;
904 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
905 struct nd_namespace_label *nd_label;
906 struct nd_label_ent *label_ent, *e;
907 struct nd_namespace_index *nsindex;
908 unsigned long *free, *victim_map = NULL;
909 struct resource *res, **old_res_list;
910 struct nd_label_id label_id;
911 u8 uuid[NSLABEL_UUID_LEN];
912 int min_dpa_idx = 0;
913 LIST_HEAD(list);
914 u32 nslot, slot;
915
916 if (!preamble_next(ndd, &nsindex, &free, &nslot))
917 return -ENXIO;
918
919 old_res_list = nsblk->res;
920 nfree = nd_label_nfree(ndd);
921 old_num_resources = nsblk->num_resources;
922 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
923
924 /*
925 * We need to loop over the old resources a few times, which seems a
926 * bit inefficient, but we need to know that we have the label
927 * space before we start mutating the tracking structures.
928 * Otherwise the recovery method of last resort for userspace is
929 * disable and re-enable the parent region.
930 */
931 alloc = 0;
932 for_each_dpa_resource(ndd, res) {
933 if (strcmp(res->name, label_id.id) != 0)
934 continue;
935 if (!is_old_resource(res, old_res_list, old_num_resources))
936 alloc++;
937 }
938
939 victims = 0;
940 if (old_num_resources) {
941 /* convert old local-label-map to dimm-slot victim-map */
942 victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
943 if (!victim_map)
944 return -ENOMEM;
945
946 /* mark unused labels for garbage collection */
947 for_each_clear_bit_le(slot, free, nslot) {
948 nd_label = to_label(ndd, slot);
949 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
950 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
951 continue;
952 res = to_resource(ndd, nd_label);
953 if (res && is_old_resource(res, old_res_list,
954 old_num_resources))
955 continue;
956 slot = to_slot(ndd, nd_label);
957 set_bit(slot, victim_map);
958 victims++;
959 }
960 }
961
962 /* don't allow updates that consume the last label */
963 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
964 dev_info(&nsblk->common.dev, "insufficient label space\n");
965 bitmap_free(victim_map);
966 return -ENOSPC;
967 }
968 /* from here on we need to abort on error */
969
970
971 /* assign all resources to the namespace before writing the labels */
972 nsblk->res = NULL;
973 nsblk->num_resources = 0;
974 for_each_dpa_resource(ndd, res) {
975 if (strcmp(res->name, label_id.id) != 0)
976 continue;
977 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
978 rc = -ENOMEM;
979 goto abort;
980 }
981 }
982
983 /*
984 * Find the resource associated with the first label in the set
985 * per the v1.2 namespace specification.
986 */
987 for (i = 0; i < nsblk->num_resources; i++) {
988 struct resource *min = nsblk->res[min_dpa_idx];
989
990 res = nsblk->res[i];
991 if (res->start < min->start)
992 min_dpa_idx = i;
993 }
994
995 for (i = 0; i < nsblk->num_resources; i++) {
996 size_t offset;
997
998 res = nsblk->res[i];
999 if (is_old_resource(res, old_res_list, old_num_resources))
1000 continue; /* carry-over */
1001 slot = nd_label_alloc_slot(ndd);
1002 if (slot == UINT_MAX)
1003 goto abort;
1004 dev_dbg(ndd->dev, "allocated: %d\n", slot);
1005
1006 nd_label = to_label(ndd, slot);
1007 memset(nd_label, 0, sizeof_namespace_label(ndd));
1008 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1009 if (nsblk->alt_name)
1010 memcpy(nd_label->name, nsblk->alt_name,
1011 NSLABEL_NAME_LEN);
1012 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1013
1014 /*
1015 * Use the presence of the type_guid as a flag to
1016 * determine isetcookie usage and nlabel + position
1017 * policy for blk-aperture namespaces.
1018 */
1019 if (namespace_label_has(ndd, type_guid)) {
1020 if (i == min_dpa_idx) {
1021 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1022 nd_label->position = __cpu_to_le16(0);
1023 } else {
1024 nd_label->nlabel = __cpu_to_le16(0xffff);
1025 nd_label->position = __cpu_to_le16(0xffff);
1026 }
1027 nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1028 } else {
1029 nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1030 nd_label->position = __cpu_to_le16(0); /* N/A */
1031 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1032 }
1033
1034 nd_label->dpa = __cpu_to_le64(res->start);
1035 nd_label->rawsize = __cpu_to_le64(resource_size(res));
1036 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1037 nd_label->slot = __cpu_to_le32(slot);
1038 if (namespace_label_has(ndd, type_guid))
1039 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1040 if (namespace_label_has(ndd, abstraction_guid))
1041 guid_copy(&nd_label->abstraction_guid,
1042 to_abstraction_guid(ndns->claim_class,
1043 &nd_label->abstraction_guid));
1044
1045 if (namespace_label_has(ndd, checksum)) {
1046 u64 sum;
1047
1048 nd_label->checksum = __cpu_to_le64(0);
1049 sum = nd_fletcher64(nd_label,
1050 sizeof_namespace_label(ndd), 1);
1051 nd_label->checksum = __cpu_to_le64(sum);
1052 }
1053
1054 /* update label */
1055 offset = nd_label_offset(ndd, nd_label);
1056 rc = nvdimm_set_config_data(ndd, offset, nd_label,
1057 sizeof_namespace_label(ndd));
1058 if (rc < 0)
1059 goto abort;
1060 }
1061
1062 /* free up now unused slots in the new index */
1063 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1064 dev_dbg(ndd->dev, "free: %d\n", slot);
1065 nd_label_free_slot(ndd, slot);
1066 }
1067
1068 /* update index */
1069 rc = nd_label_write_index(ndd, ndd->ns_next,
1070 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1071 if (rc)
1072 goto abort;
1073
1074 /*
1075 * Now that the on-dimm labels are up to date, fix up the tracking
1076 * entries in nd_mapping->labels
1077 */
1078 nlabel = 0;
1079 mutex_lock(&nd_mapping->lock);
1080 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1081 nd_label = label_ent->label;
1082 if (!nd_label)
1083 continue;
1084 nlabel++;
1085 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1086 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1087 continue;
1088 nlabel--;
1089 list_move(&label_ent->list, &list);
1090 label_ent->label = NULL;
1091 }
1092 list_splice_tail_init(&list, &nd_mapping->labels);
1093 mutex_unlock(&nd_mapping->lock);
1094
1095 if (nlabel + nsblk->num_resources > num_labels) {
1096 /*
1097 * Bug, we can't end up with more resources than
1098 * available labels
1099 */
1100 WARN_ON_ONCE(1);
1101 rc = -ENXIO;
1102 goto out;
1103 }
1104
1105 mutex_lock(&nd_mapping->lock);
1106 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1107 typeof(*label_ent), list);
1108 if (!label_ent) {
1109 WARN_ON(1);
1110 mutex_unlock(&nd_mapping->lock);
1111 rc = -ENXIO;
1112 goto out;
1113 }
1114 for_each_clear_bit_le(slot, free, nslot) {
1115 nd_label = to_label(ndd, slot);
1116 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1117 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1118 continue;
1119 res = to_resource(ndd, nd_label);
1120 res->flags &= ~DPA_RESOURCE_ADJUSTED;
1121 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1122 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1123 if (label_ent->label)
1124 continue;
1125 label_ent->label = nd_label;
1126 nd_label = NULL;
1127 break;
1128 }
1129 if (nd_label)
1130 dev_WARN(&nsblk->common.dev,
1131 "failed to track label slot%d\n", slot);
1132 }
1133 mutex_unlock(&nd_mapping->lock);
1134
1135 out:
1136 kfree(old_res_list);
1137 bitmap_free(victim_map);
1138 return rc;
1139
1140 abort:
1141 /*
1142 * 1/ repair the allocated label bitmap in the index
1143 * 2/ restore the resource list
1144 */
1145 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1146 kfree(nsblk->res);
1147 nsblk->res = old_res_list;
1148 nsblk->num_resources = old_num_resources;
1149 old_res_list = NULL;
1150 goto out;
1151}
1152
1153static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1154{
1155 int i, old_num_labels = 0;
1156 struct nd_label_ent *label_ent;
1157 struct nd_namespace_index *nsindex;
1158 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1159
1160 mutex_lock(&nd_mapping->lock);
1161 list_for_each_entry(label_ent, &nd_mapping->labels, list)
1162 old_num_labels++;
1163 mutex_unlock(&nd_mapping->lock);
1164
1165 /*
1166 * We need to preserve all the old labels for the mapping so
1167 * they can be garbage collected after writing the new labels.
1168 */
1169 for (i = old_num_labels; i < num_labels; i++) {
1170 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1171 if (!label_ent)
1172 return -ENOMEM;
1173 mutex_lock(&nd_mapping->lock);
1174 list_add_tail(&label_ent->list, &nd_mapping->labels);
1175 mutex_unlock(&nd_mapping->lock);
1176 }
1177
1178 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1179 /* pass */;
1180 else
1181 return max(num_labels, old_num_labels);
1182
1183 nsindex = to_namespace_index(ndd, 0);
1184 memset(nsindex, 0, ndd->nsarea.config_size);
1185 for (i = 0; i < 2; i++) {
1186 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1187
1188 if (rc)
1189 return rc;
1190 }
1191 ndd->ns_next = 1;
1192 ndd->ns_current = 0;
1193
1194 return max(num_labels, old_num_labels);
1195}
1196
1197static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1198{
1199 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1200 struct nd_label_ent *label_ent, *e;
1201 struct nd_namespace_index *nsindex;
1202 u8 label_uuid[NSLABEL_UUID_LEN];
1203 unsigned long *free;
1204 LIST_HEAD(list);
1205 u32 nslot, slot;
1206 int active = 0;
1207
1208 if (!uuid)
1209 return 0;
1210
1211 /* no index || no labels == nothing to delete */
1212 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1213 return 0;
1214
1215 mutex_lock(&nd_mapping->lock);
1216 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1217 struct nd_namespace_label *nd_label = label_ent->label;
1218
1219 if (!nd_label)
1220 continue;
1221 active++;
1222 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1223 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1224 continue;
1225 active--;
1226 slot = to_slot(ndd, nd_label);
1227 nd_label_free_slot(ndd, slot);
1228 dev_dbg(ndd->dev, "free: %d\n", slot);
1229 list_move_tail(&label_ent->list, &list);
1230 label_ent->label = NULL;
1231 }
1232 list_splice_tail_init(&list, &nd_mapping->labels);
1233
1234 if (active == 0) {
1235 nd_mapping_free_labels(nd_mapping);
1236 dev_dbg(ndd->dev, "no more active labels\n");
1237 }
1238 mutex_unlock(&nd_mapping->lock);
1239
1240 return nd_label_write_index(ndd, ndd->ns_next,
1241 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1242}
1243
1244int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1245 struct nd_namespace_pmem *nspm, resource_size_t size)
1246{
1247 int i, rc;
1248
1249 for (i = 0; i < nd_region->ndr_mappings; i++) {
1250 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1251 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1252 struct resource *res;
1253 int count = 0;
1254
1255 if (size == 0) {
1256 rc = del_labels(nd_mapping, nspm->uuid);
1257 if (rc)
1258 return rc;
1259 continue;
1260 }
1261
1262 for_each_dpa_resource(ndd, res)
1263 if (strncmp(res->name, "pmem", 4) == 0)
1264 count++;
1265 WARN_ON_ONCE(!count);
1266
1267 rc = init_labels(nd_mapping, count);
1268 if (rc < 0)
1269 return rc;
1270
1271 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1272 NSLABEL_FLAG_UPDATING);
1273 if (rc)
1274 return rc;
1275 }
1276
1277 if (size == 0)
1278 return 0;
1279
1280 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1281 for (i = 0; i < nd_region->ndr_mappings; i++) {
1282 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1283
1284 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1285 if (rc)
1286 return rc;
1287 }
1288
1289 return 0;
1290}
1291
1292int nd_blk_namespace_label_update(struct nd_region *nd_region,
1293 struct nd_namespace_blk *nsblk, resource_size_t size)
1294{
1295 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1296 struct resource *res;
1297 int count = 0;
1298
1299 if (size == 0)
1300 return del_labels(nd_mapping, nsblk->uuid);
1301
1302 for_each_dpa_resource(to_ndd(nd_mapping), res)
1303 count++;
1304
1305 count = init_labels(nd_mapping, count);
1306 if (count < 0)
1307 return count;
1308
1309 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1310}
1311
1312int __init nd_label_init(void)
1313{
1314 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1315 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1316 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1317 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1318
1319 return 0;
1320}